summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-01-28 15:46:57 -0800
committerDarrick J. Wong <djwong@kernel.org>2021-03-25 17:08:53 -0700
commita251bf581d7ae7b24a1e5ffc5bca49cb71e71ff6 (patch)
tree3c1f66c8faa305cb22c271706410d6c425fbecdc
parent543fe2fda76e90a8440d96791f7dac4481b05ebe (diff)
xfs: fix chown with rt quota
Make chown's quota adjustments work with realtime files. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-rw-r--r--fs/xfs/xfs_qm.c45
-rw-r--r--fs/xfs/xfs_trans.c32
2 files changed, 59 insertions, 18 deletions
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b1c7bfc7f34f..9889788e9ef5 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1206,8 +1206,8 @@ xfs_qm_dqusage_adjust(
void *data)
{
struct xfs_inode *ip;
- xfs_qcnt_t nblks;
- xfs_filblks_t rtblks = 0; /* total rt blks */
+ xfs_filblks_t nblks, rtblks;
+ unsigned int lock_mode;
int error;
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -1231,19 +1231,21 @@ xfs_qm_dqusage_adjust(
ASSERT(ip->i_delayed_blks == 0);
+ lock_mode = xfs_ilock_data_map_shared(ip);
if (XFS_IS_REALTIME_INODE(ip)) {
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ if (ifp->if_format == XFS_DINODE_FMT_BTREE &&
+ !(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
- if (error)
+ if (error) {
+ xfs_iunlock(ip, lock_mode);
goto error0;
+ }
}
-
- xfs_bmap_count_leaves(ifp, &rtblks);
}
-
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
+ xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
+ xfs_iunlock(ip, lock_mode);
/*
* Add the (disk blocks and inode) resources occupied by this
@@ -1853,9 +1855,8 @@ xfs_qm_vop_chown(
struct xfs_dquot *newdq)
{
struct xfs_dquot *prevdq;
- uint bfield = XFS_IS_REALTIME_INODE(ip) ?
- XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
-
+ xfs_filblks_t dblocks, rblocks;
+ bool isrt = XFS_IS_REALTIME_INODE(ip);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
@@ -1865,11 +1866,17 @@ xfs_qm_vop_chown(
ASSERT(prevdq);
ASSERT(prevdq != newdq);
- xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_d.di_nblocks));
+ xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
+
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT,
+ -(xfs_qcnt_t)dblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT,
+ -(xfs_qcnt_t)rblocks);
xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
/* the sparkling new dquot */
- xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_d.di_nblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks);
xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
@@ -1879,7 +1886,8 @@ xfs_qm_vop_chown(
* (having already bumped up the real counter) so that we don't have
* any reservation to give back when we commit.
*/
- xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
+ xfs_trans_mod_dquot(tp, newdq,
+ isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
-ip->i_delayed_blks);
/*
@@ -1891,8 +1899,13 @@ xfs_qm_vop_chown(
*/
tp->t_flags |= XFS_TRANS_DIRTY;
xfs_dqlock(prevdq);
- ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
- prevdq->q_blk.reserved -= ip->i_delayed_blks;
+ if (isrt) {
+ ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
+ prevdq->q_rtb.reserved -= ip->i_delayed_blks;
+ } else {
+ ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
+ prevdq->q_blk.reserved -= ip->i_delayed_blks;
+ }
xfs_dqunlock(prevdq);
/*
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e548d53c2091..09aa14579239 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1248,11 +1248,27 @@ retry:
gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
if (udqp || gdqp || pdqp) {
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_filblks_t dblocks, rblocks;
unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
if (force)
qflags |= XFS_QMOPT_FORCE_RES;
+ if (XFS_IS_REALTIME_INODE(ip) &&
+ ifp->if_format == XFS_DINODE_FMT_BTREE &&
+ !(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_cancel;
+ }
+
+ xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks);
+ if (XFS_IS_REALTIME_INODE(ip))
+ rblocks += ip->i_delayed_blks;
+ else
+ dblocks += ip->i_delayed_blks;
+
/*
* Reserve enough quota to handle blocks on disk and reserved
* for a delayed allocation. We'll actually transfer the
@@ -1260,8 +1276,20 @@ retry:
* though that part is only semi-transactional.
*/
error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
- pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
- 1, qflags);
+ pdqp, dblocks, 1, qflags);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+
+ /* Do the same for realtime. */
+ qflags = XFS_QMOPT_RES_RTBLKS | (qflags & XFS_QMOPT_FORCE_RES);
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
+ pdqp, rblocks, 0, qflags);
if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
xfs_trans_cancel(tp);
xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);