summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-10-22 15:31:05 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-10-22 16:41:15 -0700
commitb9872a3e43dcb62d05ee10f93ce45940e0674487 (patch)
treed944f29a906b713c507b29d47cd9137858ab9df8 /fs/xfs/libxfs
parentc5355cbaca02979360a5f1227ae3c4971222dc3d (diff)
xfs: allow queued AG intents to drain before scrubbingscrub-drain-intents_2021-10-22
Currently, online scrub isn't sufficiently careful about quiescing allocation groups before checking them. While scrub does take the AG header locks, it doesn't serialize against chains of AG update intents that are being processed concurrently. If there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) can yield false corruption events; if repair is running, this results in incorrect repairs. Fix this by adding to the perag structure the count of active intents and make scrub wait until there aren't any to continue. This is a little stupid since transactions can queue intents without taking buffer locks, but we'll also wait for those transactions. XXX: should have instead a per-ag rwsem that gets taken as soon as the AG[IF] are locked and stays held until the transaction commits or moves on to the next AG? would we rather have a six lock so that intents can take an ix lock, and not have to upgrade to x until we actually want to make changes to that ag? is that how those even work?? Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/libxfs')
-rw-r--r--fs/xfs/libxfs/xfs_ag.c6
-rw-r--r--fs/xfs/libxfs/xfs_ag.h11
-rw-r--r--fs/xfs/libxfs/xfs_defer.c9
-rw-r--r--fs/xfs/libxfs/xfs_defer.h3
4 files changed, 25 insertions, 4 deletions
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index dc9d78fb7bac..b6a9f6dde55a 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -193,6 +193,9 @@ xfs_free_perag(
spin_unlock(&mp->m_perag_lock);
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
+#ifdef CONFIG_XFS_ONLINE_SCRUB
+ ASSERT(atomic_read(&pag->pag_intents) == 0);
+#endif
cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_iunlink_destroy(pag);
@@ -254,6 +257,9 @@ xfs_initialize_perag(
spin_lock_init(&pag->pag_state_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+#ifdef CONFIG_XFS_ONLINE_SCRUB
+ init_waitqueue_head(&pag->pag_intents_wq);
+#endif
init_waitqueue_head(&pag->pagb_wait);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index c9e198e62b74..cfc51a5af74a 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -103,6 +103,17 @@ struct xfs_perag {
* or have some other means to control concurrency.
*/
struct rhashtable pagi_unlinked_hash;
+
+#ifdef CONFIG_XFS_ONLINE_SCRUB
+ /*
+ * Counter of live intents. We track the number of log intent items
+ * that have been queued (but not yet processed) so that scrub can
+ * detect the presence of other threads that are in the middle of
+ * processing a chain of deferred items.
+ */
+ atomic_t pag_intents;
+ wait_queue_head_t pag_intents_wq;
+#endif
};
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 3045ad184972..108d950b6f41 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -363,7 +363,8 @@ xfs_defer_cancel_list(
list_for_each_safe(pwi, n, &dfp->dfp_work) {
list_del(pwi);
dfp->dfp_count--;
- ops->cancel_item(pwi);
+ trace_xfs_defer_cancel_item(mp, dfp, pwi);
+ ops->cancel_item(mp, pwi);
}
ASSERT(dfp->dfp_count == 0);
kmem_free(dfp);
@@ -442,6 +443,7 @@ xfs_defer_finish_one(
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
+ trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
error = ops->finish_item(tp, dfp->dfp_done, li, &state);
if (error == -EAGAIN) {
/*
@@ -585,7 +587,7 @@ xfs_defer_add(
struct list_head *li)
{
struct xfs_defer_pending *dfp = NULL;
- const struct xfs_defer_op_type *ops;
+ const struct xfs_defer_op_type *ops = defer_op_types[type];
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
@@ -598,7 +600,6 @@ xfs_defer_add(
if (!list_empty(&tp->t_dfops)) {
dfp = list_last_entry(&tp->t_dfops,
struct xfs_defer_pending, dfp_list);
- ops = defer_op_types[dfp->dfp_type];
if (dfp->dfp_type != type ||
(ops->max_items && dfp->dfp_count >= ops->max_items))
dfp = NULL;
@@ -616,6 +617,8 @@ xfs_defer_add(
}
list_add_tail(li, &dfp->dfp_work);
+ trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
+ ops->add_item(tp->t_mountp, li);
dfp->dfp_count++;
}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index b4d23235931d..51e7c992d95e 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -55,7 +55,8 @@ struct xfs_defer_op_type {
struct list_head *item, struct xfs_btree_cur **state);
void (*finish_cleanup)(struct xfs_trans *tp,
struct xfs_btree_cur *state, int error);
- void (*cancel_item)(struct list_head *item);
+ void (*cancel_item)(struct xfs_mount *mp, struct list_head *item);
+ void (*add_item)(struct xfs_mount *mp, const struct list_head *item);
unsigned int max_items;
};