summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorBrian Foster <bfoster@redhat.com>2021-01-22 16:48:19 -0800
committerDarrick J. Wong <djwong@kernel.org>2021-01-22 16:54:50 -0800
commit10fb9ac1251fd0daa645c9e6a22270bfc72bd5e8 (patch)
treef1c6568a22fe94bd52dbdf4d11f8af06a90acc9a /fs/xfs/xfs_buf.c
parent88a9e03beef22cc5fabea344f54b9a0dfe63de08 (diff)
xfs: rename xfs_wait_buftarg() to xfs_buftarg_drain()
xfs_wait_buftarg() is vaguely named and somewhat overloaded. Its primary purpose is to reclaim all buffers from the provided buffer target LRU. In preparation to refactor xfs_wait_buftarg() into serialization and LRU draining components, rename the function and associated helpers to something more descriptive. This patch has no functional changes with the minor exception of renaming a tracepoint. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index f8400bbd6473..99ee8b0f499c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -43,7 +43,7 @@ static kmem_zone_t *xfs_buf_zone;
* pag_buf_lock
* lru_lock
*
- * xfs_buftarg_wait_rele
+ * xfs_buftarg_drain_rele
* lru_lock
* b_lock (trylock due to inversion)
*
@@ -88,7 +88,7 @@ xfs_buf_vmap_len(
* because the corresponding decrement is deferred to buffer release. Buffers
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
* tracking adds unnecessary overhead. This is used for sychronization purposes
- * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
+ * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
* in-flight buffers.
*
* Buffers that are never released (e.g., superblock, iclog buffers) must set
@@ -1786,7 +1786,7 @@ __xfs_buf_mark_corrupt(
* while freeing all the buffers only held by the LRU.
*/
static enum lru_status
-xfs_buftarg_wait_rele(
+xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
spinlock_t *lru_lock,
@@ -1798,7 +1798,7 @@ xfs_buftarg_wait_rele(
if (atomic_read(&bp->b_hold) > 1) {
/* need to wait, so skip it this pass */
- trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+ trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
if (!spin_trylock(&bp->b_lock))
@@ -1816,7 +1816,7 @@ xfs_buftarg_wait_rele(
}
void
-xfs_wait_buftarg(
+xfs_buftarg_drain(
struct xfs_buftarg *btp)
{
LIST_HEAD(dispose);
@@ -1841,7 +1841,7 @@ xfs_wait_buftarg(
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
- list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) {