summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_mount.h
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-09-01 11:25:37 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-09-17 18:55:29 -0700
commit47a4c4899791dd29837d1149dffb70ad18f986fa (patch)
tree7c249c521749662ac2ce4da1bcdf408017decd6e /fs/xfs/xfs_mount.h
parent803efaa936528ec6a35e4e521cddeb440236c9d0 (diff)
xfs: allow queued AG intents to drain before scrubbingscrub-drain-intents_2021-09-17
Currently, online scrub isn't sufficiently careful about quiescing allocation groups before checking them. While scrub does take the AG header locks, it doesn't serialize against chains of AG update intents that are being processed concurrently. If there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) can yield false corruption events; if repair is running, this results in incorrect repairs. Fix this by adding to the perag structure the count of active intents and make scrub wait until there aren't any to continue. This is a little stupid since transactions can queue intents without taking buffer locks, but we'll also wait for those transactions. XXX: should have instead a per-ag rwsem that gets taken as soon as the AG[IF] are locked and stays held until the transaction commits or moves on to the next AG? would we rather have a six lock so that intents can take an ix lock, and not have to upgrade to x until we actually want to make changes to that ag? is that how those even work?? Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_mount.h')
-rw-r--r--fs/xfs/xfs_mount.h33
1 files changed, 33 insertions, 0 deletions
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a161aabd8438..62b8fe194219 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -263,6 +263,17 @@ typedef struct xfs_mount {
/* online nlink check stuff */
struct xfs_hook_chain m_nlink_mod_hooks;
#endif
+
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB) && IS_ENABLED(CONFIG_XFS_RT)
+ /*
+ * Counter of live intents. We track the number of log intent items
+ * that have been queued (but not yet processed) so that scrub can
+ * detect the presence of other threads that are in the middle of
+ * processing a chain of deferred items.
+ */
+ atomic_t m_rt_intents;
+ wait_queue_head_t m_rt_intents_wq;
+#endif
} xfs_mount_t;
/* Parameters for xfs_bumplink/droplink hook. */
@@ -580,4 +591,26 @@ int xfs_hook_add(struct xfs_hook_chain *chain, struct notifier_block *hook,
void xfs_hook_del(struct xfs_hook_chain *chain, struct notifier_block *hook);
int xfs_hook_call(struct xfs_hook_chain *chain, unsigned long val, void *priv);
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+# if IS_ENABLED(CONFIG_XFS_RT)
+int xfs_rt_wait_intents(struct xfs_mount *mp);
+# else
+# define xfs_rt_wait_intents(mp) (-ENOSYS)
+# endif /* CONFIG_XFS_RT */
+
+void xfs_fs_bump_intents(struct xfs_mount *mp, bool isrt, xfs_fsblock_t fsb);
+void xfs_fs_drop_intents(struct xfs_mount *mp, bool isrt, xfs_fsblock_t fsb);
+int xfs_perag_wait_intents(struct xfs_perag *pag);
+
+#else
+static inline void
+xfs_fs_bump_intents(struct xfs_mount *mp, bool isrt, xfs_fsblock_t fsb) { }
+static inline void
+xfs_fs_drop_intents(struct xfs_mount *mp, bool isrt, xfs_fsblock_t fsb) { }
+
+int xfs_perag_wait_intents(struct xfs_perag *pag);
+# define xfs_perag_wait_intents(pag) (-ENOSYS)
+# define xfs_rt_wait_intents(mp) (-ENOSYS)
+#endif /* CONFIG_XFS_ONLINE_SCRUB */
+
#endif /* __XFS_MOUNT_H__ */