summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-01-05 17:43:25 -0800
committerDarrick J. Wong <djwong@kernel.org>2021-08-25 22:25:51 -0700
commit2cf0f9ecb562c7420f0474d6492e23493cbaf0d9 (patch)
treecc22dc91ab05f07f03cffe46e649e8ee21cf175e /fs/xfs/scrub
parentb55e262b0a39e25c1b9c665dd8ef3df9107ae788 (diff)
xfs: remove the for_each_xbitmap_ helpers
Remove the for_each_xbitmap_ macros in favor of proper iterator functions. We'll soon be switching this data structure over to an interval tree implementation, which means that we can't allow callers to modify the bitmap during iteration without telling us. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/scrub')
-rw-r--r--fs/xfs/scrub/agheader_repair.c74
-rw-r--r--fs/xfs/scrub/bitmap.c59
-rw-r--r--fs/xfs/scrub/bitmap.h22
-rw-r--r--fs/xfs/scrub/repair.c56
4 files changed, 144 insertions, 67 deletions
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 83ef97aa1cab..58d9186d5cde 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -562,6 +562,40 @@ xrep_agfl_update_agf(
XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
}
+struct xrep_agfl_fill {
+ struct xbitmap used_extents;
+ struct xfs_scrub *sc;
+ __be32 *agfl_bno;
+ xfs_agblock_t flcount;
+ unsigned int fl_off;
+};
+
+/* Fill the AGFL with whatever blocks are in this extent. */
+static int
+xrep_agfl_fill(
+ uint64_t start,
+ uint64_t len,
+ void *priv)
+{
+ struct xrep_agfl_fill *af = priv;
+ struct xfs_scrub *sc = af->sc;
+ xfs_fsblock_t fsbno = start;
+
+ while (fsbno < start + len && af->fl_off < af->flcount)
+ af->agfl_bno[af->fl_off++] =
+ cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++));
+
+ trace_xrep_agfl_insert(sc->mp, sc->sa.pag->pag_agno,
+ XFS_FSB_TO_AGBNO(sc->mp, start), len);
+
+ xbitmap_set(&af->used_extents, start, fsbno - 1);
+
+ if (af->fl_off == af->flcount)
+ return -ECANCELED;
+
+ return 0;
+}
+
/* Write out a totally new AGFL. */
STATIC void
xrep_agfl_init_header(
@@ -570,13 +604,12 @@ xrep_agfl_init_header(
struct xbitmap *agfl_extents,
xfs_agblock_t flcount)
{
+ struct xrep_agfl_fill af = {
+ .sc = sc,
+ .flcount = flcount,
+ };
struct xfs_mount *mp = sc->mp;
- __be32 *agfl_bno;
- struct xbitmap_range *br;
- struct xbitmap_range *n;
struct xfs_agfl *agfl;
- xfs_agblock_t agbno;
- unsigned int fl_off;
ASSERT(flcount <= xfs_agfl_size(mp));
@@ -595,36 +628,15 @@ xrep_agfl_init_header(
* blocks than fit in the AGFL, they will be freed in a subsequent
* step.
*/
- fl_off = 0;
- agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
- for_each_xbitmap_extent(br, n, agfl_extents) {
- agbno = XFS_FSB_TO_AGBNO(mp, br->start);
-
- trace_xrep_agfl_insert(mp, sc->sa.pag->pag_agno, agbno,
- br->len);
-
- while (br->len > 0 && fl_off < flcount) {
- agfl_bno[fl_off] = cpu_to_be32(agbno);
- fl_off++;
- agbno++;
-
- /*
- * We've now used br->start by putting it in the AGFL,
- * so bump br so that we don't reap the block later.
- */
- br->start++;
- br->len--;
- }
-
- if (br->len)
- break;
- list_del(&br->list);
- kmem_free(br);
- }
+ xbitmap_init(&af.used_extents);
+ af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp),
+ xbitmap_walk(agfl_extents, xrep_agfl_fill, &af);
+ xbitmap_disunion(agfl_extents, &af.used_extents);
/* Write new AGFL to disk. */
xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
+ xbitmap_destroy(&af.used_extents);
}
/* Repair the AGFL. */
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 813b5f219113..e9fb101f339d 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -12,6 +12,9 @@
#include "xfs_btree.h"
#include "scrub/bitmap.h"
+#define for_each_xbitmap_extent(bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+
/*
* Set a range of this bitmap. Caller must ensure the range is not set.
*
@@ -312,3 +315,59 @@ xbitmap_hweight(
return ret;
}
+
+/* Call a function for every run of set bits in this bitmap. */
+int
+xbitmap_walk(
+ struct xbitmap *bitmap,
+ xbitmap_walk_fn fn,
+ void *priv)
+{
+ struct xbitmap_range *bex, *n;
+ int error = 0;
+
+ for_each_xbitmap_extent(bex, n, bitmap) {
+ error = fn(bex->start, bex->len, priv);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+struct xbitmap_walk_bits {
+ xbitmap_walk_bits_fn fn;
+ void *priv;
+};
+
+/* Walk all the bits in a run. */
+static int
+xbitmap_walk_bits_in_run(
+ uint64_t start,
+ uint64_t len,
+ void *priv)
+{
+ struct xbitmap_walk_bits *wb = priv;
+ uint64_t i;
+ int error = 0;
+
+ for (i = start; i < start + len; i++) {
+ error = wb->fn(i, wb->priv);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+/* Call a function for every set bit in this bitmap. */
+int
+xbitmap_walk_bits(
+ struct xbitmap *bitmap,
+ xbitmap_walk_bits_fn fn,
+ void *priv)
+{
+ struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv};
+
+ return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb);
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index 900646b72de1..53601d281ffb 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -19,13 +19,6 @@ struct xbitmap {
void xbitmap_init(struct xbitmap *bitmap);
void xbitmap_destroy(struct xbitmap *bitmap);
-#define for_each_xbitmap_extent(bex, n, bitmap) \
- list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
-
-#define for_each_xbitmap_block(b, bex, n, bitmap) \
- list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
- for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
-
int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
int xbitmap_set_btcur_path(struct xbitmap *bitmap,
@@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap,
struct xfs_btree_cur *cur);
uint64_t xbitmap_hweight(struct xbitmap *bitmap);
+/*
+ * Return codes for the bitmap iterator functions are 0 to continue iterating,
+ * and non-zero to stop iterating. Any non-zero value will be passed up to the
+ * iteration caller. The special value -ECANCELED can be used to stop
+ * iteration, because neither bitmap iterator ever generates that error code on
+ * its own. Callers must not modify the bitmap while walking it.
+ */
+typedef int (*xbitmap_walk_fn)(uint64_t start, uint64_t len, void *priv);
+int xbitmap_walk(struct xbitmap *bitmap, xbitmap_walk_fn fn,
+ void *priv);
+
+typedef int (*xbitmap_walk_bits_fn)(uint64_t bit, void *priv);
+int xbitmap_walk_bits(struct xbitmap *bitmap, xbitmap_walk_bits_fn fn,
+ void *priv);
+
#endif /* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index a40f4851bd89..545fb47e771d 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -515,15 +515,21 @@ xrep_reap_invalidate_block(
xfs_trans_binval(sc->tp, bp);
}
+struct xrep_reap_state {
+ struct xfs_scrub *sc;
+ const struct xfs_owner_info *oinfo;
+ enum xfs_ag_resv_type resv;
+ unsigned int deferred;
+};
+
/* Dispose of a single block. */
STATIC int
xrep_reap_block(
- struct xfs_scrub *sc,
- xfs_fsblock_t fsbno,
- const struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type resv,
- unsigned int *deferred)
+ uint64_t fsbno,
+ void *priv)
{
+ struct xrep_reap_state *rs = priv;
+ struct xfs_scrub *sc = rs->sc;
struct xfs_btree_cur *cur;
xfs_agnumber_t agno;
xfs_agblock_t agbno;
@@ -543,7 +549,8 @@ xrep_reap_block(
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, sc->sa.pag);
/* Can we find any other rmappings? */
- error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
+ error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo,
+ &has_other_rmap);
xfs_btree_del_cursor(cur, error);
if (error)
return error;
@@ -563,8 +570,8 @@ xrep_reap_block(
*/
if (has_other_rmap) {
error = xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno,
- 1, oinfo);
- } else if (resv == XFS_AG_RESV_AGFL) {
+ 1, rs->oinfo);
+ } else if (rs->resv == XFS_AG_RESV_AGFL) {
xrep_reap_invalidate_block(sc, fsbno);
error = xrep_put_freelist(sc, agbno);
} else {
@@ -576,14 +583,14 @@ xrep_reap_block(
* reservation.
*/
xrep_reap_invalidate_block(sc, fsbno);
- __xfs_bmap_add_free(sc->tp, fsbno, 1, oinfo, false);
- (*deferred)++;
- need_roll = *deferred > 100;
+ __xfs_bmap_add_free(sc->tp, fsbno, 1, rs->oinfo, false);
+ rs->deferred++;
+ need_roll = rs->deferred > 100;
}
if (error || !need_roll)
return error;
- *deferred = 0;
+ rs->deferred = 0;
return xrep_roll_ag_trans(sc);
}
@@ -595,26 +602,17 @@ xrep_reap_extents(
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
{
- struct xbitmap_range *bmr;
- struct xbitmap_range *n;
- xfs_fsblock_t fsbno;
- unsigned int deferred = 0;
- int error = 0;
+ struct xrep_reap_state rs = {
+ .sc = sc,
+ .oinfo = oinfo,
+ .resv = type,
+ };
+ int error;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
- for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
- ASSERT(sc->ip != NULL ||
- XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.pag->pag_agno);
- trace_xrep_dispose_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
-
- error = xrep_reap_block(sc, fsbno, oinfo, type, &deferred);
- if (error)
- break;
- }
- if (error || deferred == 0)
+ error = xbitmap_walk_bits(bitmap, xrep_reap_block, &rs);
+ if (error || rs.deferred == 0)
return error;
return xrep_roll_ag_trans(sc);