diff options
author | Darrick J. Wong <djwong@kernel.org> | 2022-07-14 11:06:01 -0700 |
---|---|---|
committer | Darrick J. Wong <djwong@kernel.org> | 2022-11-09 19:07:24 -0800 |
commit | de53333d8d107f84a4d29eed273fe098a2c91c7e (patch) | |
tree | 2b50339145ea73c86ac5aa95a389e36957c9520c /fs/xfs/scrub/xfile.c | |
parent | d110f042db8e051e9154a90dba22ad020519fd39 (diff) |
xfs: teach xfile to pass back direct-map pages to caller
Certain xfile array operations (such as sorting) can be sped up quite a
bit by allowing xfile users to grab a page to bulk-read the records
contained within it. Create helper methods to facilitate this.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/scrub/xfile.c')
-rw-r--r-- | fs/xfs/scrub/xfile.c | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c index c0c34cc1db99..9b32d0dfaa71 100644 --- a/fs/xfs/scrub/xfile.c +++ b/fs/xfs/scrub/xfile.c @@ -316,3 +316,101 @@ xfile_stat( statbuf->bytes = ks.blocks << SECTOR_SHIFT; return 0; } + +/* + * Grab the (locked) page for a memory object. The object cannot span a page + * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we + * cannot grab the page, or the usual negative errno. + */ +int +xfile_obj_get_page( + struct xfile *xf, + loff_t pos, + unsigned int len, + struct page **pagep, + void **fsdatap) +{ + struct inode *inode = file_inode(xf->file); + struct address_space *mapping = inode->i_mapping; + const struct address_space_operations *aops = mapping->a_ops; + struct page *page = NULL; + void *fsdata = NULL; + unsigned int pflags; + int error; + + if (inode->i_sb->s_maxbytes - pos < len) + return -ENOMEM; + if (len > PAGE_SIZE - offset_in_page(pos)) + return -ENOTBLK; + + trace_xfile_obj_get_page(xf, pos, len); + + pflags = memalloc_nofs_save(); + + /* + * We call write_begin directly here to avoid all the freezer + * protection lock-taking that happens in the normal path. shmem + * doesn't support fs freeze, but lockdep doesn't know that and will + * trip over that. + */ + error = aops->write_begin(NULL, mapping, pos, len, &page, &fsdata); + if (error) + goto out_pflags; + + /* We got the page, so make sure we push out EOF. */ + if (i_size_read(inode) < pos + len) + i_size_write(inode, pos + len); + + /* + * If the page isn't up to date, fill it with zeroes before we hand it + * to the caller and make sure the backing store will hold on to them. + */ + if (!PageUptodate(page)) { + void *kaddr; + + kaddr = kmap_local_page(page); + memset(kaddr, 0, PAGE_SIZE); + kunmap_local(kaddr); + SetPageUptodate(page); + set_page_dirty(page); + } + + *pagep = page; + *fsdatap = fsdata; +out_pflags: + memalloc_nofs_restore(pflags); + return error; +} + +/* + * Release the (locked) page for a memory object. The page must have been + * obtained by xfile_obj_get_page. Returns 0 or a negative errno. + */ +int +xfile_obj_put_page( + struct xfile *xf, + loff_t pos, + unsigned int len, + struct page *page, + void *fsdata) +{ + struct inode *inode = file_inode(xf->file); + struct address_space *mapping = inode->i_mapping; + const struct address_space_operations *aops = mapping->a_ops; + unsigned int pflags; + int ret; + + ASSERT(len <= PAGE_SIZE - offset_in_page(pos)); + + trace_xfile_obj_put_page(xf, pos, len); + + pflags = memalloc_nofs_save(); + ret = aops->write_end(NULL, mapping, pos, len, len, page, fsdata); + memalloc_nofs_restore(pflags); + + if (ret < 0) + return ret; + if (ret != len) + return -EIO; + return 0; +} |