summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-08-30 15:45:05 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2019-10-09 09:39:23 -0700
commit055f78e8d716546e7eeb3733cc40e564ff3e6cdc (patch)
treee651cd13f4bbad8cde71ebff4b97b52b0b9a46bf
parentf5af145228885fd3a3fb4cf391b70f52304ca651 (diff)
xfs: flush speculative space allocations when we run out of quota
If a fs modification (creation, file write, reflink, etc.) is unable to reserve enough quota to handle the modification, try clearing whatever space the filesystem might have been hanging onto in the hopes of speeding up the filesystem. The flushing behavior will become particularly important when we add deferred inode inactivation because that will increase the amount of space that isn't actively tied to user data. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/xfs_bmap_util.c17
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_icache.h2
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_iomap.c21
-rw-r--r--fs/xfs/xfs_reflink.c45
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--fs/xfs/xfs_trace.h40
10 files changed, 147 insertions, 9 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8088b99c2d16..b8e2bff30704 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -900,6 +900,7 @@ xfs_alloc_file_space(
*/
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
+ bool cleared_space = false;
/*
* Determine space reservations for data/realtime.
@@ -942,6 +943,7 @@ xfs_alloc_file_space(
/*
* Allocate and setup the transaction.
*/
+retry:
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
resrtextents, 0, &tp);
@@ -958,6 +960,20 @@ xfs_alloc_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
0, quota_flag);
+ /*
+ * We weren't able to reserve enough quota to handle fallocate.
+ * Flush any disk space that was being held in the hopes of
+ * speeding up the filesystem. We hold the IOLOCK so we cannot
+ * do a synchronous scan.
+ */
+ if ((error == -ENOSPC || error == -EDQUOT) && !cleared_space) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ cleared_space = xfs_inode_free_quota_blocks(ip, false);
+ if (cleared_space)
+ goto retry;
+ goto error2;
+ }
if (error)
goto error1;
@@ -996,6 +1012,7 @@ error0: /* unlock inode, unreserve quota blocks, cancel trans */
error1: /* Just cancel transaction */
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+error2:
return error;
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 464453e3bee4..b196c00efa7a 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -656,7 +656,7 @@ write_retry:
*/
if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
- cleared_space = xfs_inode_free_quota_blocks(ip);
+ cleared_space = xfs_inode_free_quota_blocks(ip, true);
if (cleared_space)
goto write_retry;
iolock = 0;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index ae94b8ef7be9..6fc5cb54914f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1532,7 +1532,8 @@ xfs_icache_free_eofblocks(
*/
bool
xfs_inode_free_quota_blocks(
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ bool sync)
{
struct xfs_eofblocks eofb = {0};
struct xfs_dquot *dq;
@@ -1542,7 +1543,9 @@ xfs_inode_free_quota_blocks(
* Run a sync scan to increase effectiveness and use the union filter to
* cover all applicable quotas in a single scan.
*/
- eofb.eof_flags = XFS_EOF_FLAGS_UNION | XFS_EOF_FLAGS_SYNC;
+ eofb.eof_flags = XFS_EOF_FLAGS_UNION;
+ if (sync)
+ eofb.eof_flags |= XFS_EOF_FLAGS_SYNC;
if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
dq = xfs_inode_dquot(ip, XFS_DQ_USER);
@@ -1574,6 +1577,8 @@ xfs_inode_free_quota_blocks(
if (!do_work)
return false;
+ trace_xfs_inode_free_quota_blocks(ip->i_mount, &eofb, _RET_IP_);
+
xfs_icache_free_eofblocks(ip->i_mount, &eofb);
xfs_icache_free_cowblocks(ip->i_mount, &eofb);
return true;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index aaf44e841bec..a42a682cec0b 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -57,7 +57,7 @@ long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
-bool xfs_inode_free_quota_blocks(struct xfs_inode *ip);
+bool xfs_inode_free_quota_blocks(struct xfs_inode *ip, bool sync);
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4d0a373a3229..a96e2ce6a5c5 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1135,6 +1135,7 @@ xfs_create(
struct xfs_dquot *gdqp = NULL;
struct xfs_dquot *pdqp = NULL;
struct xfs_trans_res *tres;
+ bool cleared_space = false;
uint resblks;
trace_xfs_create(dp, name);
@@ -1168,6 +1169,7 @@ xfs_create(
* the case we'll drop the one we have and get a more
* appropriate transaction later.
*/
+retry:
error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
if (error == -ENOSPC) {
/* flush outstanding delalloc blocks and retry */
@@ -1185,6 +1187,21 @@ xfs_create(
*/
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
pdqp, resblks, 1, 0);
+ /*
+ * We weren't able to reserve enough quota to handle adding the inode.
+ * Flush any disk space that was being held in the hopes of speeding up
+ * the filesystem.
+ */
+ if ((error == -EDQUOT || error == -ENOSPC) && !cleared_space) {
+ xfs_trans_cancel(tp);
+ if (unlock_dp_on_error)
+ xfs_iunlock(dp, XFS_ILOCK_EXCL);
+ unlock_dp_on_error = false;
+ cleared_space = xfs_inode_free_quota_blocks(dp, true);
+ if (cleared_space)
+ goto retry;
+ goto out_release_inode;
+ }
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index ad4c7eedda3d..404e6029b2fd 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -2434,6 +2434,8 @@ xfs_file_ioctl(
if (error)
return error;
+ trace_xfs_ioc_free_eofblocks(mp, &keofb, _RET_IP_);
+
return xfs_icache_free_eofblocks(mp, &keofb);
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f780e223b118..d1b8618fa675 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -27,7 +27,7 @@
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_reflink.h"
-
+#include "xfs_icache.h"
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
@@ -185,6 +185,7 @@ xfs_iomap_write_direct(
int error;
int lockmode;
int bmapi_flags = XFS_BMAPI_PREALLOC;
+ bool cleared_space = false;
uint tflags = 0;
rt = XFS_IS_REALTIME_INODE(ip);
@@ -258,6 +259,7 @@ xfs_iomap_write_direct(
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
}
}
+retry:
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
tflags, &tp);
if (error)
@@ -267,6 +269,23 @@ xfs_iomap_write_direct(
xfs_ilock(ip, lockmode);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
+ /*
+ * We weren't able to reserve enough quota for the direct write.
+ * Flush any disk space that was being held in the hopes of speeding up
+ * the filesystem. Historically, we expected callers to have
+ * preallocated all the space before a direct write, but this is not an
+ * absolute requirement. We still hold the IOLOCK so we cannot do a
+ * sync scan.
+ */
+ if ((error == -ENOSPC || error == -EDQUOT) && !cleared_space) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, lockmode);
+ lockmode = 0;
+ cleared_space = xfs_inode_free_quota_blocks(ip, false);
+ if (cleared_space)
+ goto retry;
+ goto out_unlock;
+ }
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 0f08153b4994..4ab483483a31 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -353,13 +353,14 @@ xfs_reflink_allocate_cow(
bool convert_now)
{
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
- struct xfs_trans *tp;
- int nimaps, error = 0;
- bool found;
xfs_filblks_t resaligned;
xfs_extlen_t resblks = 0;
+ bool found;
+ bool cleared_space = false;
+ int nimaps, error = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (!ip->i_cowfp) {
@@ -378,6 +379,7 @@ xfs_reflink_allocate_cow(
resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
xfs_iunlock(ip, *lockmode);
+retry:
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
*lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, *lockmode);
@@ -402,6 +404,23 @@ xfs_reflink_allocate_cow(
error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
XFS_QMOPT_RES_REGBLKS);
+ /*
+ * We weren't able to reserve enough quota to handle copy on write.
+ * Flush any disk space that was being held in the hopes of speeding up
+ * the filesystem. We potentially hold the IOLOCK so we cannot do a
+ * synchronous scan.
+ */
+ if ((error == -ENOSPC || error == -EDQUOT) && !cleared_space) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, *lockmode);
+ *lockmode = 0;
+ cleared_space = xfs_inode_free_quota_blocks(ip, false);
+ if (cleared_space)
+ goto retry;
+ *lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, *lockmode);
+ goto out;
+ }
if (error)
goto out_trans_cancel;
@@ -443,6 +462,7 @@ out_unreserve:
XFS_QMOPT_RES_REGBLKS);
out_trans_cancel:
xfs_trans_cancel(tp);
+out:
return error;
}
@@ -1005,6 +1025,7 @@ xfs_reflink_remap_extent(
xfs_filblks_t rlen;
xfs_filblks_t unmap_len;
xfs_off_t newlen;
+ bool cleared_space = false;
int error;
unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
@@ -1020,21 +1041,37 @@ xfs_reflink_remap_extent(
/* Start a rolling transaction to switch the mappings */
resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+retry:
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
if (error)
goto out;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
/* If we're not just clearing space, then do we have enough quota? */
if (real_extent) {
error = xfs_trans_reserve_quota_nblks(tp, ip,
irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
+ /*
+ * We weren't able to reserve enough quota for the remapping.
+ * Flush any disk space that was being held in the hopes of
+ * speeding up the filesystem. We still hold the IOLOCK so we
+ * cannot do a sync scan.
+ */
+ if ((error == -ENOSPC || error == -EDQUOT) && !cleared_space) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ cleared_space = xfs_inode_free_quota_blocks(ip, false);
+ if (cleared_space)
+ goto retry;
+ goto out;
+ }
if (error)
goto out_cancel;
}
+ xfs_trans_ijoin(tp, ip, 0);
+
trace_xfs_reflink_remap(ip, irec->br_startoff,
irec->br_blockcount, irec->br_startblock);
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 9b5e58a92381..09c643643604 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -28,6 +28,7 @@
#include "xfs_log_recover.h"
#include "xfs_filestream.h"
#include "xfs_fsmap.h"
+#include "xfs_icache.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 6d7ba64b7a0f..37dbcef4f5c9 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -36,6 +36,7 @@ struct xfs_owner_info;
struct xfs_trans_res;
struct xfs_inobt_rec_incore;
union xfs_btree_ptr;
+struct xfs_eofblocks;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -3755,6 +3756,45 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->nr_records)
)
+DECLARE_EVENT_CLASS(xfs_eofblocks_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb,
+ unsigned long caller_ip),
+ TP_ARGS(mp, eofb, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__u32, flags)
+ __field(uint32_t, uid)
+ __field(uint32_t, gid)
+ __field(prid_t, prid)
+ __field(__u64, min_file_size)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->flags = eofb->eof_flags;
+ __entry->uid = xfs_kuid_to_uid(eofb->eof_uid);
+ __entry->gid = xfs_kgid_to_gid(eofb->eof_gid);
+ __entry->prid = eofb->eof_prid;
+ __entry->min_file_size = eofb->eof_min_file_size;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->flags,
+ __entry->uid,
+ __entry->gid,
+ __entry->prid,
+ __entry->min_file_size,
+ (char *)__entry->caller_ip)
+);
+#define DEFINE_EOFBLOCKS_EVENT(name) \
+DEFINE_EVENT(xfs_eofblocks_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, eofb, caller_ip))
+DEFINE_EOFBLOCKS_EVENT(xfs_ioc_free_eofblocks);
+DEFINE_EOFBLOCKS_EVENT(xfs_inode_free_quota_blocks);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH