From d8d6df4253adcdb5862a9410d962e9168b973c88 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:35:15 -0800 Subject: xfs: extern some btree ops structures Expose these static btree ops structures so that we can reference them in the AG initialization code in the next patch. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_shared.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs/xfs/libxfs/xfs_shared.h') diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index 4220d3584c1b..518ea9456eba 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -43,6 +43,15 @@ extern const struct xfs_buf_ops xfs_sb_buf_ops; extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops; extern const struct xfs_buf_ops xfs_symlink_buf_ops; +/* btree ops */ +extern const struct xfs_btree_ops xfs_bnobt_ops; +extern const struct xfs_btree_ops xfs_cntbt_ops; +extern const struct xfs_btree_ops xfs_inobt_ops; +extern const struct xfs_btree_ops xfs_finobt_ops; +extern const struct xfs_btree_ops xfs_bmbt_ops; +extern const struct xfs_btree_ops xfs_refcountbt_ops; +extern const struct xfs_btree_ops xfs_rmapbt_ops; + /* log size calculation functions */ int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes); int xfs_log_calc_minimum_size(struct xfs_mount *); -- cgit v1.2.3 From ec793e690f801d97a7ae2a0d429fea1fee4d44aa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 22 Feb 2024 12:40:51 -0800 Subject: xfs: remove xfs_btnum_t The last checks for bc_btnum can be replaced with helpers that check the btree ops. This allows adding new btrees to XFS without having to update a global enum. Signed-off-by: Christoph Hellwig Reviewed-by: Darrick J. Wong [djwong: complete the ops predicates] Signed-off-by: Darrick J. Wong --- fs/xfs/libxfs/xfs_alloc.c | 6 +++--- fs/xfs/libxfs/xfs_alloc_btree.c | 12 ++++++------ fs/xfs/libxfs/xfs_bmap_btree.c | 4 ++-- fs/xfs/libxfs/xfs_btree.c | 4 ++-- fs/xfs/libxfs/xfs_btree.h | 11 ----------- fs/xfs/libxfs/xfs_ialloc.c | 2 +- fs/xfs/libxfs/xfs_ialloc_btree.c | 10 +++++----- fs/xfs/libxfs/xfs_refcount_btree.c | 5 ++--- fs/xfs/libxfs/xfs_rmap_btree.c | 2 +- fs/xfs/libxfs/xfs_shared.h | 35 +++++++++++++++++++++++++++++++++++ fs/xfs/libxfs/xfs_types.h | 9 --------- fs/xfs/scrub/btree.c | 10 ++++------ fs/xfs/scrub/ialloc.c | 6 +++--- fs/xfs/scrub/trace.h | 8 -------- fs/xfs/xfs_health.c | 2 +- fs/xfs/xfs_trace.h | 9 --------- 16 files changed, 65 insertions(+), 70 deletions(-) (limited to 'fs/xfs/libxfs/xfs_shared.h') diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 2b74aded4a2c..9da52e92172a 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -918,7 +918,7 @@ xfs_alloc_cur_check( bool busy; unsigned busy_gen = 0; bool deactivate = false; - bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO; + bool isbnobt = xfs_btree_is_bno(cur->bc_ops); *new = 0; @@ -4026,7 +4026,7 @@ xfs_alloc_query_range( union xfs_btree_irec high_brec = { .a = *high_rec }; struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn }; - ASSERT(cur->bc_btnum == XFS_BTNUM_BNO); + ASSERT(xfs_btree_is_bno(cur->bc_ops)); return xfs_btree_query_range(cur, &low_brec, &high_brec, xfs_alloc_query_range_helper, &query); } @@ -4040,7 +4040,7 @@ xfs_alloc_query_all( { struct xfs_alloc_query_range_info query; - ASSERT(cur->bc_btnum == XFS_BTNUM_BNO); + ASSERT(xfs_btree_is_bno(cur->bc_ops)); query.priv = priv; query.fn = fn; return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query); diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 722863464289..885c7db5d6e7 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -51,7 +51,7 @@ xfs_allocbt_set_root( ASSERT(ptr->s != 0); - if (cur->bc_btnum == XFS_BTNUM_BNO) { + if (xfs_btree_is_bno(cur->bc_ops)) { agf->agf_bno_root = ptr->s; be32_add_cpu(&agf->agf_bno_level, inc); cur->bc_ag.pag->pagf_bno_level += inc; @@ -131,7 +131,7 @@ xfs_allocbt_update_lastrec( __be32 len; int numrecs; - ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); + ASSERT(!xfs_btree_is_bno(cur->bc_ops)); switch (reason) { case LASTREC_UPDATE: @@ -241,7 +241,7 @@ xfs_allocbt_init_ptr_from_cur( ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); - if (cur->bc_btnum == XFS_BTNUM_BNO) + if (xfs_btree_is_bno(cur->bc_ops)) ptr->s = agf->agf_bno_root; else ptr->s = agf->agf_cnt_root; @@ -554,7 +554,7 @@ xfs_bnobt_init_cursor( { struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BNO, &xfs_bnobt_ops, + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops, mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.agbp = agbp; @@ -580,7 +580,7 @@ xfs_cntbt_init_cursor( { struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_CNT, &xfs_cntbt_ops, + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops, mp->m_alloc_maxlevels, xfs_allocbt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.agbp = agbp; @@ -607,7 +607,7 @@ xfs_allocbt_commit_staged_btree( ASSERT(cur->bc_flags & XFS_BTREE_STAGING); - if (cur->bc_btnum == XFS_BTNUM_BNO) { + if (xfs_btree_is_bno(cur->bc_ops)) { agf->agf_bno_root = cpu_to_be32(afake->af_root); agf->agf_bno_level = cpu_to_be32(afake->af_levels); } else { diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 25193551e95b..54fdf0df8ec3 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -574,8 +574,8 @@ xfs_bmbt_init_cursor( maxlevels = mp->m_bm_maxlevels[whichfork]; break; } - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops, - maxlevels, xfs_bmbt_cur_cache); + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bmbt_ops, maxlevels, + xfs_bmbt_cur_cache); cur->bc_ino.ip = ip; cur->bc_ino.whichfork = whichfork; cur->bc_bmap.allocated = 0; diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 278461d0f64d..769be61ad63f 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -454,7 +454,7 @@ xfs_btree_del_cursor( * zero, then we should be shut down or on our way to shutdown due to * cancelling a dirty transaction on error. */ - ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_bmap.allocated == 0 || + ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 || xfs_is_shutdown(cur->bc_mp) || error != 0); switch (cur->bc_ops->type) { @@ -3016,7 +3016,7 @@ xfs_btree_split( struct xfs_btree_split_args args; DECLARE_COMPLETION_ONSTACK(done); - if (cur->bc_btnum != XFS_BTNUM_BMAP || + if (!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_tp->t_highest_agno == NULLAGNUMBER) return __xfs_btree_split(cur, level, ptrp, key, curp, stat); diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index 6e5fd0c06453..9a264ffee303 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -55,14 +55,6 @@ union xfs_btree_rec { #define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi) #define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi) -#define XFS_BTNUM_BNO ((xfs_btnum_t)XFS_BTNUM_BNOi) -#define XFS_BTNUM_CNT ((xfs_btnum_t)XFS_BTNUM_CNTi) -#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi) -#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi) -#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi) -#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi) -#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi) - struct xfs_btree_ops; uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops); @@ -272,7 +264,6 @@ struct xfs_btree_cur const struct xfs_btree_ops *bc_ops; struct kmem_cache *bc_cache; /* cursor cache */ unsigned int bc_flags; /* btree features - below */ - xfs_btnum_t bc_btnum; /* identifies which btree type */ union xfs_btree_irec bc_rec; /* current insert/search record value */ uint8_t bc_nlevels; /* number of levels in the tree */ uint8_t bc_maxlevels; /* maximum levels for this btree type */ @@ -726,7 +717,6 @@ static inline struct xfs_btree_cur * xfs_btree_alloc_cursor( struct xfs_mount *mp, struct xfs_trans *tp, - xfs_btnum_t btnum, const struct xfs_btree_ops *ops, uint8_t maxlevels, struct kmem_cache *cache) @@ -742,7 +732,6 @@ xfs_btree_alloc_cursor( cur->bc_ops = ops; cur->bc_tp = tp; cur->bc_mp = mp; - cur->bc_btnum = btnum; cur->bc_maxlevels = maxlevels; cur->bc_cache = cache; diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index e6decc37ff18..e5ac3e5430c4 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -2848,7 +2848,7 @@ xfs_ialloc_count_inodes( struct xfs_ialloc_count_inodes ci = {0}; int error; - ASSERT(cur->bc_btnum == XFS_BTNUM_INO); + ASSERT(xfs_btree_is_ino(cur->bc_ops)); error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci); if (error) return error; diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 9cb5da9be904..74f144b2db68 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -90,9 +90,9 @@ xfs_inobt_mod_blockcount( if (!xfs_has_inobtcounts(cur->bc_mp)) return; - if (cur->bc_btnum == XFS_BTNUM_FINO) + if (xfs_btree_is_fino(cur->bc_ops)) be32_add_cpu(&agi->agi_fblocks, howmuch); - else if (cur->bc_btnum == XFS_BTNUM_INO) + else be32_add_cpu(&agi->agi_iblocks, howmuch); xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS); } @@ -481,7 +481,7 @@ xfs_inobt_init_cursor( struct xfs_mount *mp = pag->pag_mount; struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_INO, &xfs_inobt_ops, + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops, M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.agbp = agbp; @@ -507,7 +507,7 @@ xfs_finobt_init_cursor( struct xfs_mount *mp = pag->pag_mount; struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_FINO, &xfs_finobt_ops, + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops, M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.agbp = agbp; @@ -535,7 +535,7 @@ xfs_inobt_commit_staged_btree( ASSERT(cur->bc_flags & XFS_BTREE_STAGING); - if (cur->bc_btnum == XFS_BTNUM_INO) { + if (xfs_btree_is_ino(cur->bc_ops)) { fields = XFS_AGI_ROOT | XFS_AGI_LEVEL; agi->agi_root = cpu_to_be32(afake->af_root); agi->agi_level = cpu_to_be32(afake->af_levels); diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index 6388a0c9b691..f93dae3db701 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -364,9 +364,8 @@ xfs_refcountbt_init_cursor( ASSERT(pag->pag_agno < mp->m_sb.sb_agcount); - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC, - &xfs_refcountbt_ops, mp->m_refc_maxlevels, - xfs_refcountbt_cur_cache); + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops, + mp->m_refc_maxlevels, xfs_refcountbt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_refc.nr_ops = 0; cur->bc_refc.shape_changes = 0; diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index abaf5e190e99..b1ecc061fdc9 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -518,7 +518,7 @@ xfs_rmapbt_init_cursor( { struct xfs_btree_cur *cur; - cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops, + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_ops, mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.agbp = agbp; diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index 518ea9456eba..6b8bc276d461 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -52,6 +52,41 @@ extern const struct xfs_btree_ops xfs_bmbt_ops; extern const struct xfs_btree_ops xfs_refcountbt_ops; extern const struct xfs_btree_ops xfs_rmapbt_ops; +static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_bnobt_ops; +} + +static inline bool xfs_btree_is_cnt(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_cntbt_ops; +} + +static inline bool xfs_btree_is_bmap(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_bmbt_ops; +} + +static inline bool xfs_btree_is_ino(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_inobt_ops; +} + +static inline bool xfs_btree_is_fino(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_finobt_ops; +} + +static inline bool xfs_btree_is_refcount(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_refcountbt_ops; +} + +static inline bool xfs_btree_is_rmap(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_rmapbt_ops; +} + /* log size calculation functions */ int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes); int xfs_log_calc_minimum_size(struct xfs_mount *); diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h index f577247b748d..76eb9e328835 100644 --- a/fs/xfs/libxfs/xfs_types.h +++ b/fs/xfs/libxfs/xfs_types.h @@ -116,15 +116,6 @@ typedef enum { { XFS_LOOKUP_LEi, "le" }, \ { XFS_LOOKUP_GEi, "ge" } -/* - * This enum is used in string mapping in xfs_trace.h and scrub/trace.h; - * please keep the TRACE_DEFINE_ENUMs for it up to date. - */ -typedef enum { - XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi, - XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_REFCi, XFS_BTNUM_MAX -} xfs_btnum_t; - struct xfs_name { const unsigned char *name; int len; diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c index 1ec3339755b9..187d692a0b58 100644 --- a/fs/xfs/scrub/btree.c +++ b/fs/xfs/scrub/btree.c @@ -374,14 +374,12 @@ xchk_btree_check_block_owner( { xfs_agnumber_t agno; xfs_agblock_t agbno; - xfs_btnum_t btnum; bool init_sa; int error = 0; if (!bs->cur) return 0; - btnum = bs->cur->bc_btnum; agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr); agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr); @@ -404,11 +402,11 @@ xchk_btree_check_block_owner( * have to nullify it (to shut down further block owner checks) if * self-xref encounters problems. */ - if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO) + if (!bs->sc->sa.bno_cur && xfs_btree_is_bno(bs->cur->bc_ops)) bs->cur = NULL; xchk_xref_is_only_owned_by(bs->sc, agbno, 1, bs->oinfo); - if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP) + if (!bs->sc->sa.rmap_cur && xfs_btree_is_rmap(bs->cur->bc_ops)) bs->cur = NULL; out_free: @@ -447,7 +445,7 @@ xchk_btree_check_owner( * duplicate cursors. Therefore, save the buffer daddr for * later scanning. */ - if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) { + if (xfs_btree_is_bno(cur->bc_ops) || xfs_btree_is_rmap(cur->bc_ops)) { struct check_owner *co; co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS); @@ -480,7 +478,7 @@ xchk_btree_check_iroot_minrecs( * existing filesystems, so instead we disable the check for data fork * bmap btrees when there's an attr fork. */ - if (bs->cur->bc_btnum == XFS_BTNUM_BMAP && + if (xfs_btree_is_bmap(bs->cur->bc_ops) && bs->cur->bc_ino.whichfork == XFS_DATA_FORK && xfs_inode_has_attr_fork(bs->sc->ip)) return false; diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c index 26d589e9ba1c..750d7b0cd25a 100644 --- a/fs/xfs/scrub/ialloc.c +++ b/fs/xfs/scrub/ialloc.c @@ -76,7 +76,7 @@ xchk_inobt_xref_finobt( int has_record; int error; - ASSERT(cur->bc_btnum == XFS_BTNUM_FINO); + ASSERT(xfs_btree_is_fino(cur->bc_ops)); error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record); if (error) @@ -179,7 +179,7 @@ xchk_finobt_xref_inobt( int has_record; int error; - ASSERT(cur->bc_btnum == XFS_BTNUM_INO); + ASSERT(xfs_btree_is_ino(cur->bc_ops)); error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record); if (error) @@ -514,7 +514,7 @@ xchk_iallocbt_rec_alignment( * Otherwise, we expect that the finobt record is aligned to the * cluster alignment as told by the superblock. */ - if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { + if (xfs_btree_is_fino(bs->cur->bc_ops)) { unsigned int imask; imask = min_t(unsigned int, XFS_INODES_PER_CHUNK, diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 2c2f99d8772c..b840f25c03d6 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -32,14 +32,6 @@ struct xchk_fscounters; * ring buffer. Somehow this was only worth mentioning in the ftrace sample * code. */ -TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi); -TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi); -TRACE_DEFINE_ENUM(XFS_BTNUM_INOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi); -TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi); - TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED); TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW); diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c index 9921b5d3f158..b28546b6fe34 100644 --- a/fs/xfs/xfs_health.c +++ b/fs/xfs/xfs_health.c @@ -532,7 +532,7 @@ xfs_btree_mark_sick( xfs_ag_mark_sick(cur->bc_ag.pag, cur->bc_ops->sick_mask); return; case XFS_BTREE_TYPE_INODE: - if (cur->bc_btnum == XFS_BTNUM_BMAP) { + if (xfs_btree_is_bmap(cur->bc_ops)) { xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork); return; diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 498b8922062a..e876a47f1427 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -2450,15 +2450,6 @@ DEFINE_DISCARD_EVENT(xfs_discard_toosmall); DEFINE_DISCARD_EVENT(xfs_discard_exclude); DEFINE_DISCARD_EVENT(xfs_discard_busy); -/* btree cursor events */ -TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi); -TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi); -TRACE_DEFINE_ENUM(XFS_BTNUM_INOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi); -TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi); -TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi); - DECLARE_EVENT_CLASS(xfs_btree_cur_class, TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), TP_ARGS(cur, level, bp), -- cgit v1.2.3 From 4787fc802752c9b73b28ff18860c0560bf4337f2 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:43:39 -0800 Subject: xfs: create a shadow rmap btree during rmap repair Create an in-memory btree of rmap records instead of an array. This enables us to do live record collection instead of freezing the fs. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_rmap.c | 37 ++++--- fs/xfs/libxfs/xfs_rmap_btree.c | 150 +++++++++++++++++++++++++- fs/xfs/libxfs/xfs_rmap_btree.h | 6 ++ fs/xfs/libxfs/xfs_shared.h | 10 ++ fs/xfs/scrub/repair.c | 18 ++++ fs/xfs/scrub/repair.h | 2 + fs/xfs/scrub/rmap_repair.c | 234 +++++++++++++++++++++++++++++------------ fs/xfs/xfs_stats.c | 3 +- fs/xfs/xfs_stats.h | 1 + 9 files changed, 377 insertions(+), 84 deletions(-) (limited to 'fs/xfs/libxfs/xfs_shared.h') diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index 83bc84a87421..d9f1b336ec33 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -269,6 +269,16 @@ xfs_rmap_check_irec( return NULL; } +static inline xfs_failaddr_t +xfs_rmap_check_btrec( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *irec) +{ + if (xfs_btree_is_mem_rmap(cur->bc_ops)) + return xfs_rmap_check_irec(cur->bc_mem.pag, irec); + return xfs_rmap_check_irec(cur->bc_ag.pag, irec); +} + static inline int xfs_rmap_complain_bad_rec( struct xfs_btree_cur *cur, @@ -277,9 +287,13 @@ xfs_rmap_complain_bad_rec( { struct xfs_mount *mp = cur->bc_mp; - xfs_warn(mp, - "Reverse Mapping BTree record corruption in AG %d detected at %pS!", - cur->bc_ag.pag->pag_agno, fa); + if (xfs_btree_is_mem_rmap(cur->bc_ops)) + xfs_warn(mp, + "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa); + else + xfs_warn(mp, + "Reverse Mapping BTree record corruption in AG %d detected at %pS!", + cur->bc_ag.pag->pag_agno, fa); xfs_warn(mp, "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x", irec->rm_owner, irec->rm_flags, irec->rm_startblock, @@ -307,7 +321,7 @@ xfs_rmap_get_rec( fa = xfs_rmap_btrec_to_irec(rec, irec); if (!fa) - fa = xfs_rmap_check_irec(cur->bc_ag.pag, irec); + fa = xfs_rmap_check_btrec(cur, irec); if (fa) return xfs_rmap_complain_bad_rec(cur, fa, irec); @@ -2404,15 +2418,12 @@ xfs_rmap_map_raw( { struct xfs_owner_info oinfo; - oinfo.oi_owner = rmap->rm_owner; - oinfo.oi_offset = rmap->rm_offset; - oinfo.oi_flags = 0; - if (rmap->rm_flags & XFS_RMAP_ATTR_FORK) - oinfo.oi_flags |= XFS_OWNER_INFO_ATTR_FORK; - if (rmap->rm_flags & XFS_RMAP_BMBT_BLOCK) - oinfo.oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK; + xfs_owner_info_pack(&oinfo, rmap->rm_owner, rmap->rm_offset, + rmap->rm_flags); - if (rmap->rm_flags || XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner)) + if ((rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK | + XFS_RMAP_UNWRITTEN)) || + XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner)) return xfs_rmap_map(cur, rmap->rm_startblock, rmap->rm_blockcount, rmap->rm_flags & XFS_RMAP_UNWRITTEN, @@ -2442,7 +2453,7 @@ xfs_rmap_query_range_helper( fa = xfs_rmap_btrec_to_irec(rec, &irec); if (!fa) - fa = xfs_rmap_check_irec(cur->bc_ag.pag, &irec); + fa = xfs_rmap_check_btrec(cur, &irec); if (fa) return xfs_rmap_complain_bad_rec(cur, fa, &irec); diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index 815e34e295dd..9e759efa81cc 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -22,6 +22,8 @@ #include "xfs_extent_busy.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" +#include "xfs_buf_mem.h" +#include "xfs_btree_mem.h" static struct kmem_cache *xfs_rmapbt_cur_cache; @@ -541,6 +543,151 @@ xfs_rmapbt_init_cursor( return cur; } +#ifdef CONFIG_XFS_BTREE_IN_MEM +static inline unsigned int +xfs_rmapbt_mem_block_maxrecs( + unsigned int blocklen, + bool leaf) +{ + if (leaf) + return blocklen / sizeof(struct xfs_rmap_rec); + return blocklen / + (2 * sizeof(struct xfs_rmap_key) + sizeof(__be64)); +} + +/* + * Validate an in-memory rmap btree block. Callers are allowed to generate an + * in-memory btree even if the ondisk feature is not enabled. + */ +static xfs_failaddr_t +xfs_rmapbt_mem_verify( + struct xfs_buf *bp) +{ + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + xfs_failaddr_t fa; + unsigned int level; + unsigned int maxrecs; + + if (!xfs_verify_magic(bp, block->bb_magic)) + return __this_address; + + fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); + if (fa) + return fa; + + level = be16_to_cpu(block->bb_level); + if (level >= xfs_rmapbt_maxlevels_ondisk()) + return __this_address; + + maxrecs = xfs_rmapbt_mem_block_maxrecs( + XFBNO_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN, level == 0); + return xfs_btree_memblock_verify(bp, maxrecs); +} + +static void +xfs_rmapbt_mem_rw_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa = xfs_rmapbt_mem_verify(bp); + + if (fa) + xfs_verifier_error(bp, -EFSCORRUPTED, fa); +} + +/* skip crc checks on in-memory btrees to save time */ +static const struct xfs_buf_ops xfs_rmapbt_mem_buf_ops = { + .name = "xfs_rmapbt_mem", + .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) }, + .verify_read = xfs_rmapbt_mem_rw_verify, + .verify_write = xfs_rmapbt_mem_rw_verify, + .verify_struct = xfs_rmapbt_mem_verify, +}; + +const struct xfs_btree_ops xfs_rmapbt_mem_ops = { + .name = "mem_rmap", + .type = XFS_BTREE_TYPE_MEM, + .geom_flags = XFS_BTGEO_OVERLAPPING, + + .rec_len = sizeof(struct xfs_rmap_rec), + /* Overlapping btree; 2 keys per pointer. */ + .key_len = 2 * sizeof(struct xfs_rmap_key), + .ptr_len = XFS_BTREE_LONG_PTR_LEN, + + .lru_refs = XFS_RMAP_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_rmap_mem_2), + + .dup_cursor = xfbtree_dup_cursor, + .set_root = xfbtree_set_root, + .alloc_block = xfbtree_alloc_block, + .free_block = xfbtree_free_block, + .get_minrecs = xfbtree_get_minrecs, + .get_maxrecs = xfbtree_get_maxrecs, + .init_key_from_rec = xfs_rmapbt_init_key_from_rec, + .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec, + .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur, + .init_ptr_from_cur = xfbtree_init_ptr_from_cur, + .key_diff = xfs_rmapbt_key_diff, + .buf_ops = &xfs_rmapbt_mem_buf_ops, + .diff_two_keys = xfs_rmapbt_diff_two_keys, + .keys_inorder = xfs_rmapbt_keys_inorder, + .recs_inorder = xfs_rmapbt_recs_inorder, + .keys_contiguous = xfs_rmapbt_keys_contiguous, +}; + +/* Create a cursor for an in-memory btree. */ +struct xfs_btree_cur * +xfs_rmapbt_mem_cursor( + struct xfs_perag *pag, + struct xfs_trans *tp, + struct xfbtree *xfbt) +{ + struct xfs_btree_cur *cur; + struct xfs_mount *mp = pag->pag_mount; + + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_mem_ops, + xfs_rmapbt_maxlevels_ondisk(), xfs_rmapbt_cur_cache); + cur->bc_mem.xfbtree = xfbt; + cur->bc_nlevels = xfbt->nlevels; + + cur->bc_mem.pag = xfs_perag_hold(pag); + return cur; +} + +/* Create an in-memory rmap btree. */ +int +xfs_rmapbt_mem_init( + struct xfs_mount *mp, + struct xfbtree *xfbt, + struct xfs_buftarg *btp, + xfs_agnumber_t agno) +{ + xfbt->owner = agno; + return xfbtree_init(mp, xfbt, btp, &xfs_rmapbt_mem_ops); +} + +/* Compute the max possible height for reverse mapping btrees in memory. */ +static unsigned int +xfs_rmapbt_mem_maxlevels(void) +{ + unsigned int minrecs[2]; + unsigned int blocklen; + + blocklen = XFBNO_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN; + + minrecs[0] = xfs_rmapbt_mem_block_maxrecs(blocklen, true) / 2; + minrecs[1] = xfs_rmapbt_mem_block_maxrecs(blocklen, false) / 2; + + /* + * How tall can an in-memory rmap btree become if we filled the entire + * AG with rmap records? + */ + return xfs_btree_compute_maxlevels(minrecs, + XFS_MAX_AG_BYTES / sizeof(struct xfs_rmap_rec)); +} +#else +# define xfs_rmapbt_mem_maxlevels() (0) +#endif /* CONFIG_XFS_BTREE_IN_MEM */ + /* * Install a new reverse mapping btree root. Caller is responsible for * invalidating and freeing the old btree blocks. @@ -611,7 +758,8 @@ xfs_rmapbt_maxlevels_ondisk(void) * like if it consumes almost all the blocks in the AG due to maximal * sharing factor. */ - return xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS); + return max(xfs_btree_space_to_height(minrecs, XFS_MAX_CRC_AG_BLOCKS), + xfs_rmapbt_mem_maxlevels()); } /* Compute the maximum height of an rmap btree. */ diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h index 27536d7e14aa..eb90d89e8086 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.h +++ b/fs/xfs/libxfs/xfs_rmap_btree.h @@ -10,6 +10,7 @@ struct xfs_buf; struct xfs_btree_cur; struct xfs_mount; struct xbtree_afakeroot; +struct xfbtree; /* rmaps only exist on crc enabled filesystems */ #define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN @@ -62,4 +63,9 @@ unsigned int xfs_rmapbt_maxlevels_ondisk(void); int __init xfs_rmapbt_init_cur_cache(void); void xfs_rmapbt_destroy_cur_cache(void); +struct xfs_btree_cur *xfs_rmapbt_mem_cursor(struct xfs_perag *pag, + struct xfs_trans *tp, struct xfbtree *xfbtree); +int xfs_rmapbt_mem_init(struct xfs_mount *mp, struct xfbtree *xfbtree, + struct xfs_buftarg *btp, xfs_agnumber_t agno); + #endif /* __XFS_RMAP_BTREE_H__ */ diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index 6b8bc276d461..cab49e7116ec 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -51,6 +51,7 @@ extern const struct xfs_btree_ops xfs_finobt_ops; extern const struct xfs_btree_ops xfs_bmbt_ops; extern const struct xfs_btree_ops xfs_refcountbt_ops; extern const struct xfs_btree_ops xfs_rmapbt_ops; +extern const struct xfs_btree_ops xfs_rmapbt_mem_ops; static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops) { @@ -87,6 +88,15 @@ static inline bool xfs_btree_is_rmap(const struct xfs_btree_ops *ops) return ops == &xfs_rmapbt_ops; } +#ifdef CONFIG_XFS_BTREE_IN_MEM +static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_rmapbt_mem_ops; +} +#else +# define xfs_btree_is_mem_rmap(...) (false) +#endif + /* log size calculation functions */ int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes); int xfs_log_calc_minimum_size(struct xfs_mount *); diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 0c56dafd9ae4..2645abddad78 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -31,12 +31,14 @@ #include "xfs_error.h" #include "xfs_reflink.h" #include "xfs_health.h" +#include "xfs_buf_mem.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/bitmap.h" #include "scrub/stats.h" +#include "scrub/xfile.h" /* * Attempt to repair some metadata, if the metadata is corrupt and userspace @@ -1147,3 +1149,19 @@ xrep_metadata_inode_forks( return 0; } + +/* + * Set up an in-memory buffer cache so that we can use the xfbtree. Allocating + * a shmem file might take loks, so we cannot be in transaction context. Park + * our resources in the scrub context and let the teardown function take care + * of them at the right time. + */ +int +xrep_setup_xfbtree( + struct xfs_scrub *sc, + const char *descr) +{ + ASSERT(sc->tp == NULL); + + return xmbuf_alloc(sc->mp, descr, &sc->xmbtp); +} diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index c01e56799bd1..059b51027365 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -81,6 +81,8 @@ int xrep_ino_dqattach(struct xfs_scrub *sc); # define xrep_ino_dqattach(sc) (0) #endif /* CONFIG_XFS_QUOTA */ +int xrep_setup_xfbtree(struct xfs_scrub *sc, const char *descr); + int xrep_ino_ensure_extent_count(struct xfs_scrub *sc, int whichfork, xfs_extnum_t nextents); int xrep_reset_perag_resv(struct xfs_scrub *sc); diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index 120efb49bbfe..6efca28e461d 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -12,6 +12,8 @@ #include "xfs_defer.h" #include "xfs_btree.h" #include "xfs_btree_staging.h" +#include "xfs_buf_mem.h" +#include "xfs_btree_mem.h" #include "xfs_bit.h" #include "xfs_log_format.h" #include "xfs_trans.h" @@ -121,33 +123,25 @@ * We use the 'xrep_rmap' prefix for all the rmap functions. */ -/* - * Packed rmap record. The ATTR/BMBT/UNWRITTEN flags are hidden in the upper - * bits of offset, just like the on-disk record. - */ -struct xrep_rmap_extent { - xfs_agblock_t startblock; - xfs_extlen_t blockcount; - uint64_t owner; - uint64_t offset; -} __packed; - /* Context for collecting rmaps */ struct xrep_rmap { /* new rmapbt information */ struct xrep_newbt new_btree; /* rmap records generated from primary metadata */ - struct xfarray *rmap_records; + struct xfbtree rmap_btree; struct xfs_scrub *sc; - /* get_records()'s position in the rmap record array. */ - xfarray_idx_t array_cur; + /* in-memory btree cursor for the xfs_btree_bload iteration */ + struct xfs_btree_cur *mcur; /* inode scan cursor */ struct xchk_iscan iscan; + /* Number of non-freespace records found. */ + unsigned long long nr_records; + /* bnobt/cntbt contribution to btreeblks */ xfs_agblock_t freesp_btblocks; @@ -161,6 +155,14 @@ xrep_setup_ag_rmapbt( struct xfs_scrub *sc) { struct xrep_rmap *rr; + char *descr; + int error; + + descr = xchk_xfile_ag_descr(sc, "reverse mapping records"); + error = xrep_setup_xfbtree(sc, descr); + kfree(descr); + if (error) + return error; rr = kzalloc(sizeof(struct xrep_rmap), XCHK_GFP_FLAGS); if (!rr) @@ -204,11 +206,6 @@ xrep_rmap_stash( uint64_t offset, unsigned int flags) { - struct xrep_rmap_extent rre = { - .startblock = startblock, - .blockcount = blockcount, - .owner = owner, - }; struct xfs_rmap_irec rmap = { .rm_startblock = startblock, .rm_blockcount = blockcount, @@ -217,6 +214,7 @@ xrep_rmap_stash( .rm_flags = flags, }; struct xfs_scrub *sc = rr->sc; + struct xfs_btree_cur *mcur; int error = 0; if (xchk_should_terminate(sc, &error)) @@ -224,8 +222,17 @@ xrep_rmap_stash( trace_xrep_rmap_found(sc->mp, sc->sa.pag->pag_agno, &rmap); - rre.offset = xfs_rmap_irec_offset_pack(&rmap); - return xfarray_append(rr->rmap_records, &rre); + mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, &rr->rmap_btree); + error = xfs_rmap_map_raw(mcur, &rmap); + xfs_btree_del_cursor(mcur, error); + if (error) + goto out_cancel; + + return xfbtree_trans_commit(&rr->rmap_btree, sc->tp); + +out_cancel: + xfbtree_trans_cancel(&rr->rmap_btree, sc->tp); + return error; } struct xrep_rmap_stash_run { @@ -804,6 +811,24 @@ xrep_rmap_find_log_rmaps( sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0); } +/* Check and count all the records that we gathered. */ +STATIC int +xrep_rmap_check_record( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_rmap *rr = priv; + int error; + + error = xrep_rmap_check_mapping(rr->sc, rec); + if (error) + return error; + + rr->nr_records++; + return 0; +} + /* * Generate all the reverse-mappings for this AG, a list of the old rmapbt * blocks, and the new btreeblks count. Figure out if we have enough free @@ -817,6 +842,7 @@ xrep_rmap_find_rmaps( struct xfs_scrub *sc = rr->sc; struct xchk_ag *sa = &sc->sa; struct xfs_inode *ip; + struct xfs_btree_cur *mcur; int error; /* Find all the per-AG metadata. */ @@ -884,7 +910,29 @@ end_agscan: error = xchk_setup_fs(sc); if (error) return error; - return xchk_perag_drain_and_lock(sc); + error = xchk_perag_drain_and_lock(sc); + if (error) + return error; + + /* + * Now that we have everything locked again, we need to count the + * number of rmap records stashed in the btree. This should reflect + * all actively-owned space in the filesystem. At the same time, check + * all our records before we start building a new btree, which requires + * a bnobt cursor. + */ + mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); + sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, + sc->sa.pag); + + rr->nr_records = 0; + error = xfs_rmap_query_all(mcur, xrep_rmap_check_record, rr); + + xfs_btree_del_cursor(sc->sa.bno_cur, error); + sc->sa.bno_cur = NULL; + xfs_btree_del_cursor(mcur, error); + + return error; } /* Section (II): Reserving space for new rmapbt and setting free space bitmap */ @@ -917,7 +965,6 @@ STATIC int xrep_rmap_try_reserve( struct xrep_rmap *rr, struct xfs_btree_cur *rmap_cur, - uint64_t nr_records, struct xagb_bitmap *freesp_blocks, uint64_t *blocks_reserved, bool *done) @@ -1001,7 +1048,7 @@ xrep_rmap_try_reserve( /* Compute how many blocks we'll need for all the rmaps. */ error = xfs_btree_bload_compute_geometry(rmap_cur, - &rr->new_btree.bload, nr_records + freesp_records); + &rr->new_btree.bload, rr->nr_records + freesp_records); if (error) return error; @@ -1020,16 +1067,13 @@ xrep_rmap_reserve_space( struct xfs_btree_cur *rmap_cur) { struct xagb_bitmap freesp_blocks; /* AGBIT */ - uint64_t nr_records; /* NR */ uint64_t blocks_reserved = 0; bool done = false; int error; - nr_records = xfarray_length(rr->rmap_records); - /* Compute how many blocks we'll need for the rmaps collected so far. */ error = xfs_btree_bload_compute_geometry(rmap_cur, - &rr->new_btree.bload, nr_records); + &rr->new_btree.bload, rr->nr_records); if (error) return error; @@ -1046,8 +1090,8 @@ xrep_rmap_reserve_space( * Finish when we don't need more blocks. */ do { - error = xrep_rmap_try_reserve(rr, rmap_cur, nr_records, - &freesp_blocks, &blocks_reserved, &done); + error = xrep_rmap_try_reserve(rr, rmap_cur, &freesp_blocks, + &blocks_reserved, &done); if (error) goto out_bitmap; } while (!done); @@ -1108,28 +1152,25 @@ xrep_rmap_get_records( unsigned int nr_wanted, void *priv) { - struct xrep_rmap_extent rec; - struct xfs_rmap_irec *irec = &cur->bc_rec.r; struct xrep_rmap *rr = priv; union xfs_btree_rec *block_rec; unsigned int loaded; int error; for (loaded = 0; loaded < nr_wanted; loaded++, idx++) { - error = xfarray_load_next(rr->rmap_records, &rr->array_cur, - &rec); + int stat = 0; + + error = xfs_btree_increment(rr->mcur, 0, &stat); if (error) return error; - - irec->rm_startblock = rec.startblock; - irec->rm_blockcount = rec.blockcount; - irec->rm_owner = rec.owner; - if (xfs_rmap_irec_offset_unpack(rec.offset, irec) != NULL) + if (!stat) return -EFSCORRUPTED; - error = xrep_rmap_check_mapping(rr->sc, irec); + error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat); if (error) return error; + if (!stat) + return -EFSCORRUPTED; block_rec = xfs_btree_rec_addr(cur, idx, block); cur->bc_ops->init_rec_from_cur(cur, block_rec); @@ -1189,6 +1230,29 @@ xrep_rmap_alloc_vextent( return xfs_alloc_vextent_near_bno(args, alloc_hint); } + +/* Count the records in this btree. */ +STATIC int +xrep_rmap_count_records( + struct xfs_btree_cur *cur, + unsigned long long *nr) +{ + int running = 1; + int error; + + *nr = 0; + + error = xfs_btree_goto_left_edge(cur); + if (error) + return error; + + while (running && !(error = xfs_btree_increment(cur, 0, &running))) { + if (running) + (*nr)++; + } + + return error; +} /* * Use the collected rmap information to stage a new rmap btree. If this is * successful we'll return with the new btree root information logged to the @@ -1238,6 +1302,17 @@ xrep_rmap_build_new_tree( if (error) goto err_cur; + /* + * Count the rmapbt records again, because the space reservation + * for the rmapbt itself probably added more records to the btree. + */ + rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, + &rr->rmap_btree); + + error = xrep_rmap_count_records(rr->mcur, &rr->nr_records); + if (error) + goto err_mcur; + /* * Due to btree slack factors, it's possible for a new btree to be one * level taller than the old btree. Update the incore btree height so @@ -1246,13 +1321,16 @@ xrep_rmap_build_new_tree( */ pag->pagf_repair_rmap_level = rr->new_btree.bload.btree_height; + /* + * Move the cursor to the left edge of the tree so that the first + * increment in ->get_records positions us at the first record. + */ + error = xfs_btree_goto_left_edge(rr->mcur); + if (error) + goto err_level; + /* Add all observed rmap records. */ - rr->array_cur = XFARRAY_CURSOR_INIT; - sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, - sc->sa.pag); error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr); - xfs_btree_del_cursor(sc->sa.bno_cur, error); - sc->sa.bno_cur = NULL; if (error) goto err_level; @@ -1262,6 +1340,14 @@ xrep_rmap_build_new_tree( */ xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp); xfs_btree_del_cursor(rmap_cur, 0); + xfs_btree_del_cursor(rr->mcur, 0); + rr->mcur = NULL; + + /* + * Now that we've written the new btree to disk, we don't need to keep + * updating the in-memory btree. Abort the scan to stop live updates. + */ + xchk_iscan_abort(&rr->iscan); /* * The newly committed rmap recordset includes mappings for the blocks @@ -1285,6 +1371,8 @@ xrep_rmap_build_new_tree( err_level: pag->pagf_repair_rmap_level = 0; +err_mcur: + xfs_btree_del_cursor(rr->mcur, error); err_cur: xfs_btree_del_cursor(rmap_cur, error); err_newbt: @@ -1312,6 +1400,28 @@ xrep_rmap_find_freesp( rec->ar_blockcount); } +/* Record the free space we find, as part of cleaning out the btree. */ +STATIC int +xrep_rmap_find_gaps( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_rmap_find_gaps *rfg = priv; + int error; + + if (rec->rm_startblock > rfg->next_agbno) { + error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno, + rec->rm_startblock - rfg->next_agbno); + if (error) + return error; + } + + rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno, + rec->rm_startblock + rec->rm_blockcount); + return 0; +} + /* * Reap the old rmapbt blocks. Now that the rmapbt is fully rebuilt, we make * a list of gaps in the rmap records and a list of the extents mentioned in @@ -1328,30 +1438,19 @@ xrep_rmap_remove_old_tree( struct xfs_scrub *sc = rr->sc; struct xfs_agf *agf = sc->sa.agf_bp->b_addr; struct xfs_perag *pag = sc->sa.pag; + struct xfs_btree_cur *mcur; xfs_agblock_t agend; - xfarray_idx_t array_cur; int error; xagb_bitmap_init(&rfg.rmap_gaps); /* Compute free space from the new rmapbt. */ - foreach_xfarray_idx(rr->rmap_records, array_cur) { - struct xrep_rmap_extent rec; - - error = xfarray_load(rr->rmap_records, array_cur, &rec); - if (error) - goto out_bitmap; + mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); - /* Record the free space we find. */ - if (rec.startblock > rfg.next_agbno) { - error = xagb_bitmap_set(&rfg.rmap_gaps, rfg.next_agbno, - rec.startblock - rfg.next_agbno); - if (error) - goto out_bitmap; - } - rfg.next_agbno = max_t(xfs_agblock_t, rfg.next_agbno, - rec.startblock + rec.blockcount); - } + error = xfs_rmap_query_all(mcur, xrep_rmap_find_gaps, &rfg); + xfs_btree_del_cursor(mcur, error); + if (error) + goto out_bitmap; /* Insert a record for space between the last rmap and EOAG. */ agend = be32_to_cpu(agf->agf_length); @@ -1402,14 +1501,11 @@ xrep_rmap_setup_scan( struct xrep_rmap *rr) { struct xfs_scrub *sc = rr->sc; - char *descr; int error; - /* Set up some storage */ - descr = xchk_xfile_ag_descr(sc, "reverse mapping records"); - error = xfarray_create(descr, 0, sizeof(struct xrep_rmap_extent), - &rr->rmap_records); - kfree(descr); + /* Set up in-memory rmap btree */ + error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp, + sc->sa.pag->pag_agno); if (error) return error; @@ -1424,7 +1520,7 @@ xrep_rmap_teardown( struct xrep_rmap *rr) { xchk_iscan_teardown(&rr->iscan); - xfarray_destroy(rr->rmap_records); + xfbtree_destroy(&rr->rmap_btree); } /* Repair the rmap btree for some AG. */ diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c index 90a77cd3ebad..5c6773628d69 100644 --- a/fs/xfs/xfs_stats.c +++ b/fs/xfs/xfs_stats.c @@ -50,7 +50,8 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) { "ibt2", xfsstats_offset(xs_fibt_2) }, { "fibt2", xfsstats_offset(xs_rmap_2) }, { "rmapbt", xfsstats_offset(xs_refcbt_2) }, - { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)}, + { "refcntbt", xfsstats_offset(xs_rmap_mem_2) }, + { "rmapbt_mem", xfsstats_offset(xs_qm_dqreclaims)}, /* we print both series of quota information together */ { "qm", xfsstats_offset(xs_xstrat_bytes)}, }; diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h index 43ffba74f045..3b50419d8bb9 100644 --- a/fs/xfs/xfs_stats.h +++ b/fs/xfs/xfs_stats.h @@ -125,6 +125,7 @@ struct __xfsstats { uint32_t xs_fibt_2[__XBTS_MAX]; uint32_t xs_rmap_2[__XBTS_MAX]; uint32_t xs_refcbt_2[__XBTS_MAX]; + uint32_t xs_rmap_mem_2[__XBTS_MAX]; uint32_t xs_qm_dqreclaims; uint32_t xs_qm_dqreclaim_misses; uint32_t xs_qm_dquot_dups; -- cgit v1.2.3 From 622d88e2ad7960b83af38dabf6b848a22a5a1c1f Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:45:01 -0800 Subject: xfs: move xfs_symlink_remote.c declarations to xfs_symlink_remote.h Move declarations for libxfs symlink functions into a separate header file like we do for most everything else. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_bmap.c | 1 + fs/xfs/libxfs/xfs_inode_fork.c | 1 + fs/xfs/libxfs/xfs_shared.h | 13 ------------- fs/xfs/libxfs/xfs_symlink_remote.c | 2 +- fs/xfs/libxfs/xfs_symlink_remote.h | 22 ++++++++++++++++++++++ fs/xfs/scrub/inode_repair.c | 1 + fs/xfs/scrub/symlink.c | 1 + fs/xfs/xfs_symlink.c | 1 + 8 files changed, 28 insertions(+), 14 deletions(-) create mode 100644 fs/xfs/libxfs/xfs_symlink_remote.h (limited to 'fs/xfs/libxfs/xfs_shared.h') diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index d78e02a3b4d6..656c95a22f2e 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -38,6 +38,7 @@ #include "xfs_iomap.h" #include "xfs_health.h" #include "xfs_bmap_item.h" +#include "xfs_symlink_remote.h" struct kmem_cache *xfs_bmap_intent_cache; diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 3ab0ea133557..7d660a973909 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -26,6 +26,7 @@ #include "xfs_types.h" #include "xfs_errortag.h" #include "xfs_health.h" +#include "xfs_symlink_remote.h" struct kmem_cache *xfs_ifork_cache; diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index cab49e7116ec..dfd61fa8332e 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -182,19 +182,6 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp, #define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */ #define XFS_ICHGTIME_CREATE 0x4 /* inode create timestamp */ - -/* - * Symlink decoding/encoding functions - */ -int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen); -int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset, - uint32_t size, struct xfs_buf *bp); -bool xfs_symlink_hdr_ok(xfs_ino_t ino, uint32_t offset, - uint32_t size, struct xfs_buf *bp); -void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp, - struct xfs_inode *ip, struct xfs_ifork *ifp); -xfs_failaddr_t xfs_symlink_shortform_verify(void *sfp, int64_t size); - /* Computed inode geometry for the filesystem. */ struct xfs_ino_geometry { /* Maximum inode count in this filesystem. */ diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c index 160aa20aa441..7c39cb0307d9 100644 --- a/fs/xfs/libxfs/xfs_symlink_remote.c +++ b/fs/xfs/libxfs/xfs_symlink_remote.c @@ -16,7 +16,7 @@ #include "xfs_trans.h" #include "xfs_buf_item.h" #include "xfs_log.h" - +#include "xfs_symlink_remote.h" /* * Each contiguous block has a header, so it is not just a simple pathlen diff --git a/fs/xfs/libxfs/xfs_symlink_remote.h b/fs/xfs/libxfs/xfs_symlink_remote.h new file mode 100644 index 000000000000..c6f621a0ec05 --- /dev/null +++ b/fs/xfs/libxfs/xfs_symlink_remote.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * Copyright (c) 2013 Red Hat, Inc. + * All Rights Reserved. + */ +#ifndef __XFS_SYMLINK_REMOTE_H +#define __XFS_SYMLINK_REMOTE_H + +/* + * Symlink decoding/encoding functions + */ +int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen); +int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset, + uint32_t size, struct xfs_buf *bp); +bool xfs_symlink_hdr_ok(xfs_ino_t ino, uint32_t offset, + uint32_t size, struct xfs_buf *bp); +void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp, + struct xfs_inode *ip, struct xfs_ifork *ifp); +xfs_failaddr_t xfs_symlink_shortform_verify(void *sfp, int64_t size); + +#endif /* __XFS_SYMLINK_REMOTE_H */ diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c index 7e859c412a5b..eab380e95ef4 100644 --- a/fs/xfs/scrub/inode_repair.c +++ b/fs/xfs/scrub/inode_repair.c @@ -37,6 +37,7 @@ #include "xfs_attr_leaf.h" #include "xfs_log_priv.h" #include "xfs_health.h" +#include "xfs_symlink_remote.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c index ddff86713df3..31df0866f2ee 100644 --- a/fs/xfs/scrub/symlink.c +++ b/fs/xfs/scrub/symlink.c @@ -13,6 +13,7 @@ #include "xfs_inode.h" #include "xfs_symlink.h" #include "xfs_health.h" +#include "xfs_symlink_remote.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/health.h" diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index fb8c57b9d13d..38f569d3f47a 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c @@ -24,6 +24,7 @@ #include "xfs_ialloc.h" #include "xfs_error.h" #include "xfs_health.h" +#include "xfs_symlink_remote.h" /* ----- Kernel only functions below ----- */ int -- cgit v1.2.3