summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs')
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c130
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h3
2 files changed, 96 insertions, 37 deletions
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index e7c70fb181d9..00f6eca3413d 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -24,6 +24,7 @@
#include "xfs_rmap.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_rtgroup.h"
struct kmem_cache *xfs_refcount_intent_cache;
@@ -40,6 +41,16 @@ STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
xfs_agblock_t agbno, xfs_extlen_t aglen);
+/* Return the maximum startblock number of the refcountbt. */
+static inline xfs_agblock_t
+xrefc_max_startblock(
+ struct xfs_btree_cur *cur)
+{
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC)
+ return cur->bc_mp->m_sb.sb_rgblocks;
+ return cur->bc_mp->m_sb.sb_agblocks;
+}
+
/*
* Look up the first record less than or equal to [bno, len] in the btree
* given by cur.
@@ -103,12 +114,22 @@ xfs_refcount_lookup_eq(
/* Convert on-disk record to in-core format. */
void
xfs_refcount_btrec_to_irec(
+ struct xfs_btree_cur *cur,
const union xfs_btree_rec *rec,
struct xfs_refcount_irec *irec)
{
__u32 start;
- start = be32_to_cpu(rec->refc.rc_startblock);
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC) {
+ start = be32_to_cpu(rec->rtrefc.rc_startblock);
+ irec->rc_blockcount = be32_to_cpu(rec->rtrefc.rc_blockcount);
+ irec->rc_refcount = be32_to_cpu(rec->rtrefc.rc_refcount);
+ } else {
+ start = be32_to_cpu(rec->refc.rc_startblock);
+ irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
+ irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
+ }
+
if (start & XFS_REFC_COWFLAG) {
start &= ~XFS_REFC_COWFLAG;
irec->rc_domain = XFS_RCDOM_COW;
@@ -117,8 +138,6 @@ xfs_refcount_btrec_to_irec(
}
irec->rc_startblock = start;
- irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
- irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
}
/*
@@ -131,15 +150,17 @@ xfs_refcount_get_rec(
int *stat)
{
struct xfs_mount *mp = cur->bc_mp;
- struct xfs_perag *pag = cur->bc_ag.pag;
union xfs_btree_rec *rec;
int error;
+ BUILD_BUG_ON(XFS_REFC_LEN_MAX != XFS_RTREFC_LEN_MAX);
+ BUILD_BUG_ON(XFS_REFC_COWFLAG != XFS_RTREFC_COWFLAG);
+
error = xfs_btree_get_rec(cur, &rec, stat);
if (error || !*stat)
return error;
- xfs_refcount_btrec_to_irec(rec, irec);
+ xfs_refcount_btrec_to_irec(cur, rec, irec);
if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX)
goto out_bad_rec;
@@ -150,12 +171,24 @@ xfs_refcount_get_rec(
goto out_bad_rec;
/* check for valid extent range, including overflow */
- if (!xfs_verify_agbno(pag, irec->rc_startblock))
- goto out_bad_rec;
- if (irec->rc_startblock > irec->rc_startblock + irec->rc_blockcount)
- goto out_bad_rec;
- if (!xfs_verify_agbno(pag, irec->rc_startblock + irec->rc_blockcount - 1))
- goto out_bad_rec;
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC) {
+ struct xfs_rtgroup *rtg = cur->bc_ino.rtg;
+
+ if (!xfs_verify_rgbext(rtg, irec->rc_startblock,
+ irec->rc_blockcount))
+ goto out_bad_rec;
+ } else {
+ struct xfs_perag *pag = cur->bc_ag.pag;
+
+ if (!xfs_verify_agbno(pag, irec->rc_startblock))
+ goto out_bad_rec;
+ if (irec->rc_startblock >
+ irec->rc_startblock + irec->rc_blockcount)
+ goto out_bad_rec;
+ if (!xfs_verify_agbno(pag,
+ irec->rc_startblock + irec->rc_blockcount - 1))
+ goto out_bad_rec;
+ }
if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX)
goto out_bad_rec;
@@ -164,9 +197,15 @@ xfs_refcount_get_rec(
return 0;
out_bad_rec:
- xfs_warn(mp,
- "Refcount BTree record corruption in AG %d detected!",
- pag->pag_agno);
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC) {
+ xfs_warn(mp,
+ "RT Refcount BTree record corruption in rtgroup %u detected!",
+ cur->bc_ino.rtg->rtg_rgno);
+ } else {
+ xfs_warn(mp,
+ "Refcount BTree record corruption in AG %d detected!",
+ cur->bc_ag.pag->pag_agno);
+ }
xfs_warn(mp,
"Start block 0x%x, block count 0x%x, references 0x%x",
irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount);
@@ -194,10 +233,15 @@ xfs_refcount_update(
if (irec->rc_domain == XFS_RCDOM_COW)
start |= XFS_REFC_COWFLAG;
- rec.refc.rc_startblock = cpu_to_be32(start);
- rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
- rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
-
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC) {
+ rec.rtrefc.rc_startblock = cpu_to_be32(start);
+ rec.rtrefc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
+ rec.rtrefc.rc_refcount = cpu_to_be32(irec->rc_refcount);
+ } else {
+ rec.refc.rc_startblock = cpu_to_be32(start);
+ rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
+ rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
+ }
error = xfs_btree_update(cur, &rec);
if (error)
trace_xfs_refcount_update_error(cur, error, _RET_IP_);
@@ -930,6 +974,15 @@ xfs_refcount_merge_extents(
return 0;
}
+static inline struct xbtree_refc *
+xrefc_btree_state(
+ struct xfs_btree_cur *cur)
+{
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC)
+ return &cur->bc_ino.refc;
+ return &cur->bc_ag.refc;
+}
+
/*
* XXX: This is a pretty hand-wavy estimate. The penalty for guessing
* true incorrectly is a shutdown FS; the penalty for guessing false
@@ -947,25 +1000,25 @@ xfs_refcount_still_have_space(
* to handle each of the shape changes to the refcount btree.
*/
overhead = xfs_allocfree_block_count(cur->bc_mp,
- cur->bc_ag.refc.shape_changes);
- overhead += cur->bc_mp->m_refc_maxlevels;
+ xrefc_btree_state(cur)->shape_changes);
+ overhead += cur->bc_maxlevels;
overhead *= cur->bc_mp->m_sb.sb_blocksize;
/*
* Only allow 2 refcount extent updates per transaction if the
* refcount continue update "error" has been injected.
*/
- if (cur->bc_ag.refc.nr_ops > 2 &&
+ if (xrefc_btree_state(cur)->nr_ops > 2 &&
XFS_TEST_ERROR(false, cur->bc_mp,
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
return false;
- if (cur->bc_ag.refc.nr_ops == 0)
+ if (xrefc_btree_state(cur)->nr_ops == 0)
return true;
else if (overhead > cur->bc_tp->t_log_res)
return false;
return cur->bc_tp->t_log_res - overhead >
- cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
+ xrefc_btree_state(cur)->nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
}
/*
@@ -1000,7 +1053,7 @@ xfs_refcount_adjust_extents(
if (error)
goto out_error;
if (!found_rec || ext.rc_domain != XFS_RCDOM_SHARED) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+ ext.rc_startblock = xrefc_max_startblock(cur);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
ext.rc_domain = XFS_RCDOM_SHARED;
@@ -1024,7 +1077,7 @@ xfs_refcount_adjust_extents(
* Either cover the hole (increment) or
* delete the range (decrement).
*/
- cur->bc_ag.refc.nr_ops++;
+ xrefc_btree_state(cur)->nr_ops++;
if (tmp.rc_refcount) {
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
@@ -1080,7 +1133,7 @@ xfs_refcount_adjust_extents(
goto skip;
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur, &ext);
- cur->bc_ag.refc.nr_ops++;
+ xrefc_btree_state(cur)->nr_ops++;
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
@@ -1167,7 +1220,7 @@ xfs_refcount_adjust(
if (shape_changed)
shape_changes++;
if (shape_changes)
- cur->bc_ag.refc.shape_changes++;
+ xrefc_btree_state(cur)->shape_changes++;
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen, adj);
@@ -1281,8 +1334,8 @@ xfs_refcount_finish_one(
*/
rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != pag) {
- nr_ops = rcur->bc_ag.refc.nr_ops;
- shape_changes = rcur->bc_ag.refc.shape_changes;
+ nr_ops = xrefc_btree_state(rcur)->nr_ops;
+ shape_changes = xrefc_btree_state(rcur)->shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@@ -1294,8 +1347,8 @@ xfs_refcount_finish_one(
goto out_drop;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
- rcur->bc_ag.refc.nr_ops = nr_ops;
- rcur->bc_ag.refc.shape_changes = shape_changes;
+ xrefc_btree_state(rcur)->nr_ops = nr_ops;
+ xrefc_btree_state(rcur)->shape_changes = shape_changes;
}
*pcur = rcur;
@@ -1586,7 +1639,7 @@ xfs_refcount_adjust_cow_extents(
goto out_error;
}
if (!found_rec) {
- ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
+ ext.rc_startblock = xrefc_max_startblock(cur);
ext.rc_blockcount = 0;
ext.rc_refcount = 0;
ext.rc_domain = XFS_RCDOM_COW;
@@ -1785,9 +1838,14 @@ xfs_refcount_recover_extent(
{
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
+ xfs_nlink_t refcount;
+
+ if (cur->bc_btnum == XFS_BTNUM_RTREFC)
+ refcount = be32_to_cpu(rec->rtrefc.rc_refcount);
+ else
+ refcount = be32_to_cpu(rec->refc.rc_refcount);
- if (XFS_IS_CORRUPT(cur->bc_mp,
- be32_to_cpu(rec->refc.rc_refcount) != 1)) {
+ if (XFS_IS_CORRUPT(cur->bc_mp, refcount != 1)) {
xfs_btree_mark_sick(cur);
return -EFSCORRUPTED;
}
@@ -1799,7 +1857,7 @@ xfs_refcount_recover_extent(
}
rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
- xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
+ xfs_refcount_btrec_to_irec(cur, rec, &rr->rr_rrec);
list_add_tail(&rr->rr_list, debris);
return 0;
@@ -1936,7 +1994,7 @@ xfs_refcount_query_range_helper(
struct xfs_refcount_query_range_info *query = priv;
struct xfs_refcount_irec irec;
- xfs_refcount_btrec_to_irec(rec, &irec);
+ xfs_refcount_btrec_to_irec(cur, rec, &irec);
return query->fn(cur, &irec, query->priv);
}
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 6271f95ce4e8..ea6a29de80b4 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -86,7 +86,8 @@ extern int xfs_refcount_scan_keyfill(struct xfs_btree_cur *cur,
enum xfs_rcext_domain domain, xfs_agblock_t bno,
xfs_extlen_t len, enum xfs_btree_keyfill *outcome);
union xfs_btree_rec;
-extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
+void xfs_refcount_btrec_to_irec(struct xfs_btree_cur *cur,
+ const union xfs_btree_rec *rec,
struct xfs_refcount_irec *irec);
extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
struct xfs_refcount_irec *irec, int *stat);