summaryrefslogtreecommitdiff
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-10-17 13:12:02 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2019-10-21 08:51:59 -0700
commit2492a606b3d24ed901203bd4db19f46d112811c7 (patch)
treed6ea4d8307d72f325afdeafe1ae385014b56d4a3 /fs/xfs
parent7684e2c4384d5d1f884b01ab8bff2369e4db0bff (diff)
xfs: initialize iomap->flags in xfs_bmbt_to_iomap
Currently we don't overwrite the flags field in the iomap in xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack, but is harmful once we want to be able to reuse an iomap in the writeback code. Replace the shared parameter with a set of initial flags an thus ensures the flags field is always reinitialized. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_iomap.c30
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_pnfs.c2
3 files changed, 20 insertions, 14 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 32993c2acbd9..699bbb81b8a8 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -54,7 +54,7 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
struct xfs_bmbt_irec *imap,
- bool shared)
+ u16 flags)
{
struct xfs_mount *mp = ip->i_mount;
@@ -79,12 +79,11 @@ xfs_bmbt_to_iomap(
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->flags = flags;
if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY;
- if (shared)
- iomap->flags |= IOMAP_F_SHARED;
return 0;
}
@@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay(
struct xfs_iext_cursor icur, ccur;
xfs_fsblock_t prealloc_blocks = 0;
bool eof = false, cow_eof = false, shared = false;
+ u16 iomap_flags = 0;
int whichfork = XFS_DATA_FORK;
int error = 0;
@@ -707,7 +707,7 @@ retry:
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
- iomap->flags |= IOMAP_F_NEW;
+ iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, whichfork,
whichfork == XFS_DATA_FORK ? &imap : &cmap);
done:
@@ -715,14 +715,17 @@ done:
if (imap.br_startoff > offset_fsb) {
xfs_trim_extent(&cmap, offset_fsb,
imap.br_startoff - offset_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap,
+ IOMAP_F_SHARED);
goto out_unlock;
}
/* ensure we only report blocks we have a reservation for */
xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
shared = true;
}
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
+ if (shared)
+ iomap_flags |= IOMAP_F_SHARED;
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -930,6 +933,7 @@ xfs_file_iomap_begin(
xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0;
bool shared = false;
+ u16 iomap_flags = 0;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
@@ -1045,7 +1049,7 @@ xfs_file_iomap_begin(
if (error)
return error;
- iomap->flags |= IOMAP_F_NEW;
+ iomap_flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
out_finish:
@@ -1055,8 +1059,10 @@ out_finish:
* there is no other metadata changes pending or have been made here.
*/
if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
- iomap->flags |= IOMAP_F_DIRTY;
- return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
+ iomap_flags |= IOMAP_F_DIRTY;
+ if (shared)
+ iomap_flags |= IOMAP_F_SHARED;
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_found:
ASSERT(nimaps);
@@ -1200,7 +1206,7 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
/*
* This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed
@@ -1222,7 +1228,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM;
done:
xfs_trim_extent(&imap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
@@ -1268,7 +1274,7 @@ out_unlock:
if (error)
return error;
ASSERT(nimaps);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 5c2f6aa6d78f..71d0ae460c44 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *, bool shared);
+ struct xfs_bmbt_irec *, u16);
xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
static inline xfs_filblks_t
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index a339bd5fa260..9c96493be9e0 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -178,7 +178,7 @@ xfs_fs_map_blocks(
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
*device_generation = mp->m_generation;
return error;
out_unlock: