summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_rmap_item.c
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-09-01 11:25:37 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-09-17 18:55:29 -0700
commit47a4c4899791dd29837d1149dffb70ad18f986fa (patch)
tree7c249c521749662ac2ce4da1bcdf408017decd6e /fs/xfs/xfs_rmap_item.c
parent803efaa936528ec6a35e4e521cddeb440236c9d0 (diff)
xfs: allow queued AG intents to drain before scrubbingscrub-drain-intents_2021-09-17
Currently, online scrub isn't sufficiently careful about quiescing allocation groups before checking them. While scrub does take the AG header locks, it doesn't serialize against chains of AG update intents that are being processed concurrently. If there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) can yield false corruption events; if repair is running, this results in incorrect repairs. Fix this by adding to the perag structure the count of active intents and make scrub wait until there aren't any to continue. This is a little stupid since transactions can queue intents without taking buffer locks, but we'll also wait for those transactions. XXX: should have instead a per-ag rwsem that gets taken as soon as the AG[IF] are locked and stays held until the transaction commits or moves on to the next AG? would we rather have a six lock so that intents can take an ix lock, and not have to upgrade to x until we actually want to make changes to that ag? is that how those even work?? Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_rmap_item.c')
-rw-r--r--fs/xfs/xfs_rmap_item.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 6f115ca55746..5a3953088b5d 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -411,11 +411,19 @@ xfs_rmap_update_finish_item(
struct xfs_btree_cur **state)
{
struct xfs_rmap_intent *ri;
+ struct xfs_mount *mp = tp->t_mountp;
int error;
ri = container_of(item, struct xfs_rmap_intent, ri_list);
error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), ri,
state);
+
+ /*
+ * Drop our intent counter reference now that we've finished all the
+ * work or failed. The finishing function doesn't update the intent
+ * state, so we need not preserve the original startblock.
+ */
+ xfs_fs_drop_intents(mp, ri->ri_realtime, ri->ri_bmap.br_startblock);
kmem_free(ri);
return error;
}
@@ -431,14 +439,30 @@ xfs_rmap_update_abort_intent(
/* Cancel a deferred rmap update. */
STATIC void
xfs_rmap_update_cancel_item(
+ struct xfs_mount *mp,
struct list_head *item)
{
struct xfs_rmap_intent *ri;
ri = container_of(item, struct xfs_rmap_intent, ri_list);
+ xfs_fs_drop_intents(mp, ri->ri_realtime, ri->ri_bmap.br_startblock);
kmem_free(ri);
}
+/* Add a deferred rmap update. */
+STATIC void
+xfs_rmap_update_add_item(
+ struct xfs_mount *mp,
+ const struct list_head *item)
+{
+ const struct xfs_rmap_intent *ri;
+
+ ri = container_of(item, struct xfs_rmap_intent, ri_list);
+
+ /* Grab an intent counter reference for this intent item. */
+ xfs_fs_bump_intents(mp, ri->ri_realtime, ri->ri_bmap.br_startblock);
+}
+
const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.max_items = XFS_RUI_MAX_FAST_EXTENTS,
.create_intent = xfs_rmap_update_create_intent,
@@ -447,6 +471,7 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.finish_item = xfs_rmap_update_finish_item,
.finish_cleanup = xfs_rmap_finish_one_cleanup,
.cancel_item = xfs_rmap_update_cancel_item,
+ .add_item = xfs_rmap_update_add_item,
};
/* Is this recovered RUI ok? */