summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-03-23 16:59:31 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-04-07 14:38:21 -0700
commit7d88329e5b0fe636e63e2b1f078696bc85780442 (patch)
tree5555d8aeb2fc81fbdbdb21ed60a2eff70474cf8c /fs
parent2b156ff8c82eed24d2b06520923856946143ba17 (diff)
xfs: move the check for post-EOF mappings into xfs_can_free_eofblocks
Fix the weird split of responsibilities between xfs_can_free_eofblocks and xfs_free_eofblocks by moving the chunk of code that looks for any actual post-EOF space mappings from the second function into the first. This clears the way for deferred inode inactivation to be able to decide if an inode needs inactivation work before committing the released inode to the inactivation code paths (vs. marking it for reclaim). Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_bmap_util.c145
-rw-r--r--fs/xfs/xfs_inode.c36
2 files changed, 99 insertions, 82 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 2b9991e5ea47..e79e3d1ff38d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -597,8 +597,24 @@ out_unlock:
* regular files that are marked preallocated or append-only.
*/
bool
-xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
+xfs_can_free_eofblocks(
+ struct xfs_inode *ip,
+ bool force)
{
+ struct xfs_bmbt_irec imap;
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t end_fsb;
+ xfs_fileoff_t last_fsb;
+ int nimaps = 1;
+ int error;
+
+ /*
+ * Caller must either hold the exclusive io lock; or be inactivating
+ * the inode, which guarantees there are no other users of the inode.
+ */
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
+ (VFS_I(ip)->i_state & I_FREEING));
+
/* prealloc/delalloc exists only on regular files */
if (!S_ISREG(VFS_I(ip)->i_mode))
return false;
@@ -624,7 +640,32 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
if (!force || ip->i_delayed_blks == 0)
return false;
- return true;
+ /*
+ * Do not try to free post-EOF blocks if EOF is beyond the end of the
+ * range supported by the page cache, because the truncation will loop
+ * forever.
+ */
+ end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
+ last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+ if (last_fsb <= end_fsb)
+ return false;
+
+ /*
+ * Look up the mapping for the first block past EOF. If we can't find
+ * it, there's nothing to free.
+ */
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
+ 0);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ if (error || nimaps == 0)
+ return false;
+
+ /*
+ * If there's a real mapping there or there are delayed allocation
+ * reservations, then we have post-EOF blocks to try to free.
+ */
+ return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
}
/*
@@ -637,78 +678,52 @@ xfs_free_eofblocks(
struct xfs_inode *ip)
{
struct xfs_trans *tp;
- int error;
- xfs_fileoff_t end_fsb;
- xfs_fileoff_t last_fsb;
- xfs_filblks_t map_len;
- int nimaps;
- struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
+ int error;
- /*
- * Figure out if there are any blocks beyond the end
- * of the file. If not, then there is nothing to do.
- */
- end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
- last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- if (last_fsb <= end_fsb)
- return 0;
- map_len = last_fsb - end_fsb;
+ /* Attach the dquots to the inode up front. */
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ return error;
- nimaps = 1;
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ /* Wait on dio to ensure i_size has settled. */
+ inode_dio_wait(VFS_I(ip));
- /*
- * If there are blocks after the end of file, truncate the file to its
- * current size to free them up.
- */
- if (!error && (nimaps != 0) &&
- (imap.br_startblock != HOLESTARTBLOCK ||
- ip->i_delayed_blks)) {
- /*
- * Attach the dquots to the inode up front.
- */
- error = xfs_qm_dqattach(ip);
- if (error)
- return error;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ return error;
+ }
- /* wait on dio to ensure i_size has settled */
- inode_dio_wait(VFS_I(ip));
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
- &tp);
- if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- return error;
- }
+ /*
+ * Do not update the on-disk file size. If we update the on-disk file
+ * size and then the system crashes before the contents of the file are
+ * flushed to disk then the files may be full of holes (ie NULL files
+ * bug).
+ */
+ error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
+ XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
+ if (error)
+ goto err_cancel;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_trans_commit(tp);
+ if (error)
+ goto out_unlock;
- /*
- * Do not update the on-disk file size. If we update the
- * on-disk file size and then the system crashes before the
- * contents of the file are flushed to disk then the files
- * may be full of holes (ie NULL files bug).
- */
- error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
- XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
- if (error) {
- /*
- * If we get an error at this point we simply don't
- * bother truncating the file.
- */
- xfs_trans_cancel(tp);
- } else {
- error = xfs_trans_commit(tp);
- if (!error)
- xfs_inode_clear_eofblocks_tag(ip);
- }
+ xfs_inode_clear_eofblocks_tag(ip);
+ goto out_unlock;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
+err_cancel:
+ /*
+ * If we get an error at this point we simply don't
+ * bother truncating the file.
+ */
+ xfs_trans_cancel(tp);
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e26dfcd4d241..fa2d377e2514 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1446,7 +1446,7 @@ xfs_release(
xfs_inode_t *ip)
{
xfs_mount_t *mp = ip->i_mount;
- int error;
+ int error = 0;
if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
return 0;
@@ -1482,8 +1482,16 @@ xfs_release(
if (VFS_I(ip)->i_nlink == 0)
return 0;
- if (xfs_can_free_eofblocks(ip, false)) {
+ /*
+ * If we can't get the iolock just skip truncating the blocks past EOF
+ * because we could deadlock with the mmap_lock otherwise. We'll get
+ * another chance to drop them once the last reference to the inode is
+ * dropped, so we'll never leak blocks permanently.
+ */
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
+ return 0;
+ if (xfs_can_free_eofblocks(ip, false)) {
/*
* Check if the inode is being opened, written and closed
* frequently and we have delayed allocation blocks outstanding
@@ -1499,26 +1507,20 @@ xfs_release(
* place.
*/
if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
- return 0;
- /*
- * If we can't get the iolock just skip truncating the blocks
- * past EOF because we could deadlock with the mmap_lock
- * otherwise. We'll get another chance to drop them once the
- * last reference to the inode is dropped, so we'll never leak
- * blocks permanently.
- */
- if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
- error = xfs_free_eofblocks(ip);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- if (error)
- return error;
- }
+ goto out_unlock;
+
+ error = xfs_free_eofblocks(ip);
+ if (error)
+ goto out_unlock;
/* delalloc blocks after truncation means it really is dirty */
if (ip->i_delayed_blks)
xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
}
- return 0;
+
+out_unlock:
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return error;
}
/*