summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_mount.c
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2022-07-14 11:05:36 -0700
committerDarrick J. Wong <djwong@kernel.org>2022-10-14 14:16:26 -0700
commitd90e72e5a0314e4fc4162e06f1f3cbccada99df6 (patch)
treec873d37aeee2201f7c96737d72a5d9b20d9ec1b1 /fs/xfs/xfs_mount.c
parent68a312deb494321b85c6a468ac2fb7db7d7e8b93 (diff)
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header buffer locks will cycle during a transaction roll to get from one intent item to the next in a chain. Although scrub takes all AG header buffer locks, this isn't sufficient to guard against scrub checking an AG while that writer thread is in the middle of finishing a chain because there's no higher level locking primitive guarding allocation groups. When there's a collision, cross-referencing between data structures (e.g. rmapbt and refcountbt) yields false corruption events; if repair is running, this results in incorrect repairs, which is catastrophic. Fix this by adding to the perag structure the count of active intents and make scrub wait until it has both AG header buffer locks and the intent counter reaches zero. This is a little stupid since transactions can queue intents without taking buffer locks, but it's not the end of the world for scrub to wait (in KILLABLE state) for those transactions. In the next patch we'll improve on this facility, but this patch provides the basic functionality. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_mount.c')
-rw-r--r--fs/xfs/xfs_mount.c76
1 files changed, 76 insertions, 0 deletions
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index f10c88cee116..6c84c6547a0b 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1385,3 +1385,79 @@ xfs_mod_delalloc(
percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
XFS_DELALLOC_BATCH);
}
+
+#ifdef CONFIG_XFS_DRAIN_INTENTS
+/* Increase the pending intent count. */
+static inline void xfs_drain_bump(struct xfs_drain *dr)
+{
+ atomic_inc(&dr->dr_count);
+}
+
+/* Decrease the pending intent count, and wake any waiters, if appropriate. */
+static inline void xfs_drain_drop(struct xfs_drain *dr)
+{
+ if (atomic_dec_and_test(&dr->dr_count) &&
+ wq_has_sleeper(&dr->dr_waiters))
+ wake_up(&dr->dr_waiters);
+}
+
+/*
+ * Wait for the pending intent count for a drain to hit zero.
+ *
+ * Callers must not hold any locks that would prevent intents from being
+ * finished.
+ */
+static inline int xfs_drain_wait(struct xfs_drain *dr)
+{
+ return wait_event_killable(dr->dr_waiters, !xfs_drain_busy(dr));
+}
+
+/* Add an item to the pending count. */
+void
+xfs_fs_bump_intents(
+ struct xfs_mount *mp,
+ xfs_fsblock_t fsb)
+{
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsb));
+ trace_xfs_perag_bump_intents(pag, __return_address);
+ xfs_drain_bump(&pag->pag_intents);
+ xfs_perag_put(pag);
+}
+
+/* Remove an item from the pending count. */
+void
+xfs_fs_drop_intents(
+ struct xfs_mount *mp,
+ xfs_fsblock_t fsb)
+{
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsb));
+ trace_xfs_perag_drop_intents(pag, __return_address);
+ xfs_drain_drop(&pag->pag_intents);
+ xfs_perag_put(pag);
+}
+
+/*
+ * Wait for the pending intent count for AG metadata to hit zero.
+ * Callers must not hold any AG header buffers.
+ */
+int
+xfs_ag_drain_intents(
+ struct xfs_perag *pag)
+{
+ trace_xfs_perag_wait_intents(pag, __return_address);
+ return xfs_drain_wait(&pag->pag_intents);
+}
+
+/* Might someone else be processing intents for this AG? */
+bool
+xfs_ag_intents_busy(
+ struct xfs_perag *pag)
+{
+ return xfs_drain_busy(&pag->pag_intents);
+}
+
+#endif /* CONFIG_XFS_DRAIN_INTENTS */