summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_refcount_item.c
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-09-01 11:25:37 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-09-17 18:55:29 -0700
commit47a4c4899791dd29837d1149dffb70ad18f986fa (patch)
tree7c249c521749662ac2ce4da1bcdf408017decd6e /fs/xfs/xfs_refcount_item.c
parent803efaa936528ec6a35e4e521cddeb440236c9d0 (diff)
xfs: allow queued AG intents to drain before scrubbingscrub-drain-intents_2021-09-17
Currently, online scrub isn't sufficiently careful about quiescing allocation groups before checking them. While scrub does take the AG header locks, it doesn't serialize against chains of AG update intents that are being processed concurrently. If there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) can yield false corruption events; if repair is running, this results in incorrect repairs. Fix this by adding to the perag structure the count of active intents and make scrub wait until there aren't any to continue. This is a little stupid since transactions can queue intents without taking buffer locks, but we'll also wait for those transactions. XXX: should have instead a per-ag rwsem that gets taken as soon as the AG[IF] are locked and stays held until the transaction commits or moves on to the next AG? would we rather have a six lock so that intents can take an ix lock, and not have to upgrade to x until we actually want to make changes to that ag? is that how those even work?? Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_refcount_item.c')
-rw-r--r--fs/xfs/xfs_refcount_item.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 006bd1086e50..f9d67cdf9c5d 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -364,9 +364,12 @@ xfs_refcount_update_finish_item(
struct xfs_btree_cur **state)
{
struct xfs_refcount_intent *ri;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_fsblock_t orig_startblock;
int error;
ri = container_of(item, struct xfs_refcount_intent, ri_list);
+ orig_startblock = ri->ri_startblock;
error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
state);
@@ -376,6 +379,13 @@ xfs_refcount_update_finish_item(
ri->ri_type == XFS_REFCOUNT_DECREASE);
return -EAGAIN;
}
+
+ /*
+ * Drop our intent counter reference now that we've finished all the
+ * work or failed. Be careful to use the original startblock because
+ * the finishing functions can update the intent state.
+ */
+ xfs_fs_drop_intents(mp, ri->ri_realtime, orig_startblock);
kmem_free(ri);
return error;
}
@@ -391,14 +401,29 @@ xfs_refcount_update_abort_intent(
/* Cancel a deferred refcount update. */
STATIC void
xfs_refcount_update_cancel_item(
+ struct xfs_mount *mp,
struct list_head *item)
{
struct xfs_refcount_intent *ri;
ri = container_of(item, struct xfs_refcount_intent, ri_list);
+ xfs_fs_drop_intents(mp, ri->ri_realtime, ri->ri_startblock);
kmem_free(ri);
}
+/* Add a deferred refcount update. */
+STATIC void
+xfs_refcount_update_add_item(
+ struct xfs_mount *mp,
+ const struct list_head *item)
+{
+ const struct xfs_refcount_intent *ri;
+
+ /* Grab an intent counter reference for this intent item. */
+ ri = container_of(item, struct xfs_refcount_intent, ri_list);
+ xfs_fs_bump_intents(mp, ri->ri_realtime, ri->ri_startblock);
+}
+
const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.max_items = XFS_CUI_MAX_FAST_EXTENTS,
.create_intent = xfs_refcount_update_create_intent,
@@ -407,6 +432,7 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.finish_item = xfs_refcount_update_finish_item,
.finish_cleanup = xfs_refcount_finish_one_cleanup,
.cancel_item = xfs_refcount_update_cancel_item,
+ .add_item = xfs_refcount_update_add_item,
};
/* Is this recovered CUI ok? */