summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2022-11-16 19:18:11 -0800
committerDarrick J. Wong <djwong@kernel.org>2022-11-16 19:18:11 -0800
commit823ca26a8f0740598ccabb2652a638eec150e4e9 (patch)
tree40b87933bca9746cf24ca4c6c8a2836693025bce
parentf0c4d9fc9cc9462659728d168387191387e903cc (diff)
parentb255fab0f80cc65a334fcd90cd278673cddbc988 (diff)
Merge tag 'scrub-fix-ag-header-handling-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.2-mergeA
xfs: fix handling of AG[IF] header buffers during scrub While reading through the online fsck code, I noticed that the setup code for AG metadata scrubs will attach the AGI, the AGF, and the AGFL buffers to the transaction. It isn't necessary to hold the AGFL buffer, since any code that wants to do anything with the AGFL will need to hold the AGF to know which parts of the AGFL are active. Therefore, we only need to hold the AGFL when scrubbing the AGFL itself. The second bug fixed by this patchset is one that I observed while testing online repair. When a buffer is held across a transaction roll, its buffer log item will be detached if the bli was clean before the roll. If we are holding the AG headers to maintain a lock on an AG, we then need to set the buffer type on the new bli to avoid confusing the logging code later. There's also a bug fix for uninitialized memory in the directory scanner that didn't fit anywhere else. Ths patchset finishes off by teaching the AGFL repair function to look for and discard crosslinked blocks instead of putting them back on the AGFL. v23.2: Log the buffers before rolling the transaction to keep the moving forward in the log and avoid the bli falling off. Signed-off-by: Darrick J. Wong <djwong@kernel.org> * tag 'scrub-fix-ag-header-handling-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux: xfs: make AGFL repair function avoid crosslinked blocks xfs: log the AGI/AGF buffers when rolling transactions during an AG repair xfs: don't track the AGFL buffer in the scrub AG context xfs: fully initialize xfs_da_args in xchk_directory_blocks
-rw-r--r--fs/xfs/scrub/agheader.c47
-rw-r--r--fs/xfs/scrub/agheader_repair.c79
-rw-r--r--fs/xfs/scrub/common.c8
-rw-r--r--fs/xfs/scrub/dir.c10
-rw-r--r--fs/xfs/scrub/repair.c41
-rw-r--r--fs/xfs/scrub/scrub.h1
6 files changed, 128 insertions, 58 deletions
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index b7b838bd4ba4..af284baa6f4c 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -609,9 +609,16 @@ out:
/* AGFL */
struct xchk_agfl_info {
- unsigned int sz_entries;
+ /* Number of AGFL entries that the AGF claims are in use. */
+ unsigned int agflcount;
+
+ /* Number of AGFL entries that we found. */
unsigned int nr_entries;
+
+ /* Buffer to hold AGFL entries for extent checking. */
xfs_agblock_t *entries;
+
+ struct xfs_buf *agfl_bp;
struct xfs_scrub *sc;
};
@@ -641,10 +648,10 @@ xchk_agfl_block(
struct xfs_scrub *sc = sai->sc;
if (xfs_verify_agbno(sc->sa.pag, agbno) &&
- sai->nr_entries < sai->sz_entries)
+ sai->nr_entries < sai->agflcount)
sai->entries[sai->nr_entries++] = agbno;
else
- xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
+ xchk_block_set_corrupt(sc, sai->agfl_bp);
xchk_agfl_block_xref(sc, agbno);
@@ -696,19 +703,26 @@ int
xchk_agfl(
struct xfs_scrub *sc)
{
- struct xchk_agfl_info sai;
+ struct xchk_agfl_info sai = {
+ .sc = sc,
+ };
struct xfs_agf *agf;
xfs_agnumber_t agno = sc->sm->sm_agno;
- unsigned int agflcount;
unsigned int i;
int error;
+ /* Lock the AGF and AGI so that nobody can touch this AG. */
error = xchk_ag_read_headers(sc, agno, &sc->sa);
if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
- goto out;
+ return error;
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
- xchk_buffer_recheck(sc, sc->sa.agfl_bp);
+
+ /* Try to read the AGFL, and verify its structure if we get it. */
+ error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &sai.agfl_bp);
+ if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
+ return error;
+ xchk_buffer_recheck(sc, sai.agfl_bp);
xchk_agfl_xref(sc);
@@ -717,24 +731,21 @@ xchk_agfl(
/* Allocate buffer to ensure uniqueness of AGFL entries. */
agf = sc->sa.agf_bp->b_addr;
- agflcount = be32_to_cpu(agf->agf_flcount);
- if (agflcount > xfs_agfl_size(sc->mp)) {
+ sai.agflcount = be32_to_cpu(agf->agf_flcount);
+ if (sai.agflcount > xfs_agfl_size(sc->mp)) {
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out;
}
- memset(&sai, 0, sizeof(sai));
- sai.sc = sc;
- sai.sz_entries = agflcount;
- sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
- KM_MAYFAIL);
+ sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!sai.entries) {
error = -ENOMEM;
goto out;
}
/* Check the blocks in the AGFL. */
- error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
- sc->sa.agfl_bp, xchk_agfl_block, &sai);
+ error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, sai.agfl_bp,
+ xchk_agfl_block, &sai);
if (error == -ECANCELED) {
error = 0;
goto out_free;
@@ -742,7 +753,7 @@ xchk_agfl(
if (error)
goto out_free;
- if (agflcount != sai.nr_entries) {
+ if (sai.agflcount != sai.nr_entries) {
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out_free;
}
@@ -758,7 +769,7 @@ xchk_agfl(
}
out_free:
- kmem_free(sai.entries);
+ kvfree(sai.entries);
out:
return error;
}
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 1b0b4e243f77..82ceb60ea5fc 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -442,12 +442,18 @@ out_revert:
/* AGFL */
struct xrep_agfl {
+ /* Bitmap of alleged AGFL blocks that we're not going to add. */
+ struct xbitmap crossed;
+
/* Bitmap of other OWN_AG metadata blocks. */
struct xbitmap agmetablocks;
/* Bitmap of free space. */
struct xbitmap *freesp;
+ /* rmapbt cursor for finding crosslinked blocks */
+ struct xfs_btree_cur *rmap_cur;
+
struct xfs_scrub *sc;
};
@@ -477,6 +483,41 @@ xrep_agfl_walk_rmap(
return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
}
+/* Strike out the blocks that are cross-linked according to the rmapbt. */
+STATIC int
+xrep_agfl_check_extent(
+ struct xrep_agfl *ra,
+ uint64_t start,
+ uint64_t len)
+{
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(ra->sc->mp, start);
+ xfs_agblock_t last_agbno = agbno + len - 1;
+ int error;
+
+ ASSERT(XFS_FSB_TO_AGNO(ra->sc->mp, start) == ra->sc->sa.pag->pag_agno);
+
+ while (agbno <= last_agbno) {
+ bool other_owners;
+
+ error = xfs_rmap_has_other_keys(ra->rmap_cur, agbno, 1,
+ &XFS_RMAP_OINFO_AG, &other_owners);
+ if (error)
+ return error;
+
+ if (other_owners) {
+ error = xbitmap_set(&ra->crossed, agbno, 1);
+ if (error)
+ return error;
+ }
+
+ if (xchk_should_terminate(ra->sc, &error))
+ return error;
+ agbno++;
+ }
+
+ return 0;
+}
+
/*
* Map out all the non-AGFL OWN_AG space in this AG so that we can deduce
* which blocks belong to the AGFL.
@@ -496,44 +537,58 @@ xrep_agfl_collect_blocks(
struct xrep_agfl ra;
struct xfs_mount *mp = sc->mp;
struct xfs_btree_cur *cur;
+ struct xbitmap_range *br, *n;
int error;
ra.sc = sc;
ra.freesp = agfl_extents;
xbitmap_init(&ra.agmetablocks);
+ xbitmap_init(&ra.crossed);
/* Find all space used by the free space btrees & rmapbt. */
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
- if (error)
- goto err;
xfs_btree_del_cursor(cur, error);
+ if (error)
+ goto out_bmp;
/* Find all blocks currently being used by the bnobt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_BNO);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
- if (error)
- goto err;
xfs_btree_del_cursor(cur, error);
+ if (error)
+ goto out_bmp;
/* Find all blocks currently being used by the cntbt. */
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag, XFS_BTNUM_CNT);
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
- if (error)
- goto err;
-
xfs_btree_del_cursor(cur, error);
+ if (error)
+ goto out_bmp;
/*
* Drop the freesp meta blocks that are in use by btrees.
* The remaining blocks /should/ be AGFL blocks.
*/
error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
- xbitmap_destroy(&ra.agmetablocks);
if (error)
- return error;
+ goto out_bmp;
+
+ /* Strike out the blocks that are cross-linked. */
+ ra.rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
+ for_each_xbitmap_extent(br, n, agfl_extents) {
+ error = xrep_agfl_check_extent(&ra, br->start, br->len);
+ if (error)
+ break;
+ }
+ xfs_btree_del_cursor(ra.rmap_cur, error);
+ if (error)
+ goto out_bmp;
+ error = xbitmap_disunion(agfl_extents, &ra.crossed);
+ if (error)
+ goto out_bmp;
/*
* Calculate the new AGFL size. If we found more blocks than fit in
@@ -541,11 +596,10 @@ xrep_agfl_collect_blocks(
*/
*flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
xfs_agfl_size(mp));
- return 0;
-err:
+out_bmp:
+ xbitmap_destroy(&ra.crossed);
xbitmap_destroy(&ra.agmetablocks);
- xfs_btree_del_cursor(cur, error);
return error;
}
@@ -697,7 +751,6 @@ xrep_agfl(
* freespace overflow to the freespace btrees.
*/
sc->sa.agf_bp = agf_bp;
- sc->sa.agfl_bp = agfl_bp;
error = xrep_roll_ag_trans(sc);
if (error)
goto err;
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 9bbbf20f401b..ad70f29233c3 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -424,10 +424,6 @@ xchk_ag_read_headers(
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
return error;
- error = xfs_alloc_read_agfl(sa->pag, sc->tp, &sa->agfl_bp);
- if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
- return error;
-
return 0;
}
@@ -515,10 +511,6 @@ xchk_ag_free(
struct xchk_ag *sa)
{
xchk_ag_btcur_free(sa);
- if (sa->agfl_bp) {
- xfs_trans_brelse(sc->tp, sa->agfl_bp);
- sa->agfl_bp = NULL;
- }
if (sa->agf_bp) {
xfs_trans_brelse(sc->tp, sa->agf_bp);
sa->agf_bp = NULL;
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 5c87800ab223..d1b0f23c2c59 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -666,7 +666,12 @@ xchk_directory_blocks(
struct xfs_scrub *sc)
{
struct xfs_bmbt_irec got;
- struct xfs_da_args args;
+ struct xfs_da_args args = {
+ .dp = sc ->ip,
+ .whichfork = XFS_DATA_FORK,
+ .geo = sc->mp->m_dir_geo,
+ .trans = sc->tp,
+ };
struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
struct xfs_mount *mp = sc->mp;
xfs_fileoff_t leaf_lblk;
@@ -689,9 +694,6 @@ xchk_directory_blocks(
free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
/* Is this a block dir? */
- args.dp = sc->ip;
- args.geo = mp->m_dir_geo;
- args.trans = sc->tp;
error = xfs_dir2_isblock(&args, &is_block);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index c18bd039fce9..22335619c84e 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -121,32 +121,40 @@ xrep_roll_ag_trans(
{
int error;
- /* Keep the AG header buffers locked so we can keep going. */
- if (sc->sa.agi_bp)
+ /*
+ * Keep the AG header buffers locked while we roll the transaction.
+ * Ensure that both AG buffers are dirty and held when we roll the
+ * transaction so that they move forward in the log without losing the
+ * bli (and hence the bli type) when the transaction commits.
+ *
+ * Normal code would never hold clean buffers across a roll, but repair
+ * needs both buffers to maintain a total lock on the AG.
+ */
+ if (sc->sa.agi_bp) {
+ xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
- if (sc->sa.agf_bp)
+ }
+
+ if (sc->sa.agf_bp) {
+ xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
- if (sc->sa.agfl_bp)
- xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
+ }
/*
- * Roll the transaction. We still own the buffer and the buffer lock
- * regardless of whether or not the roll succeeds. If the roll fails,
- * the buffers will be released during teardown on our way out of the
- * kernel. If it succeeds, we join them to the new transaction and
- * move on.
+ * Roll the transaction. We still hold the AG header buffers locked
+ * regardless of whether or not that succeeds. On failure, the buffers
+ * will be released during teardown on our way out of the kernel. If
+ * successful, join the buffers to the new transaction and move on.
*/
error = xfs_trans_roll(&sc->tp);
if (error)
return error;
- /* Join AG headers to the new transaction. */
+ /* Join the AG headers to the new transaction. */
if (sc->sa.agi_bp)
xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
if (sc->sa.agf_bp)
xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
- if (sc->sa.agfl_bp)
- xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
return 0;
}
@@ -498,6 +506,7 @@ xrep_put_freelist(
struct xfs_scrub *sc,
xfs_agblock_t agbno)
{
+ struct xfs_buf *agfl_bp;
int error;
/* Make sure there's space on the freelist. */
@@ -516,8 +525,12 @@ xrep_put_freelist(
return error;
/* Put the block on the AGFL. */
+ error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
+ if (error)
+ return error;
+
error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
- sc->sa.agfl_bp, agbno, 0);
+ agfl_bp, agbno, 0);
if (error)
return error;
xfs_extent_busy_insert(sc->tp, sc->sa.pag, agbno, 1,
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index 3de5287e98d8..151567f88366 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -39,7 +39,6 @@ struct xchk_ag {
/* AG btree roots */
struct xfs_buf *agf_bp;
- struct xfs_buf *agfl_bp;
struct xfs_buf *agi_bp;
/* AG btrees */