summaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c624
1 files changed, 313 insertions, 311 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 41addc605350..06b552a0aba2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -292,13 +292,11 @@ static void f2fs_read_end_io(struct bio *bio)
struct bio_post_read_ctx *ctx;
bool intask = in_task();
- iostat_update_and_unbind_ctx(bio, 0);
+ iostat_update_and_unbind_ctx(bio);
ctx = bio->bi_private;
- if (time_to_inject(sbi, FAULT_READ_IO)) {
- f2fs_show_injection_info(sbi, FAULT_READ_IO);
+ if (time_to_inject(sbi, FAULT_READ_IO))
bio->bi_status = BLK_STS_IOERR;
- }
if (bio->bi_status) {
f2fs_finish_read_bio(bio, intask);
@@ -332,13 +330,11 @@ static void f2fs_write_end_io(struct bio *bio)
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
- iostat_update_and_unbind_ctx(bio, 1);
+ iostat_update_and_unbind_ctx(bio);
sbi = bio->bi_private;
- if (time_to_inject(sbi, FAULT_WRITE_IO)) {
- f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
+ if (time_to_inject(sbi, FAULT_WRITE_IO))
bio->bi_status = BLK_STS_IOERR;
- }
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
@@ -507,65 +503,66 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
return fscrypt_mergeable_bio(bio, inode, next_idx);
}
-static inline void __submit_bio(struct f2fs_sb_info *sbi,
- struct bio *bio, enum page_type type)
+void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type)
{
- if (!is_read_io(bio_op(bio))) {
- unsigned int start;
+ WARN_ON_ONCE(!is_read_io(bio_op(bio)));
+ trace_f2fs_submit_read_bio(sbi->sb, type, bio);
- if (type != DATA && type != NODE)
- goto submit_io;
+ iostat_update_submit_ctx(bio, type);
+ submit_bio(bio);
+}
- if (f2fs_lfs_mode(sbi) && current->plug)
- blk_finish_plug(current->plug);
+static void f2fs_align_write_bio(struct f2fs_sb_info *sbi, struct bio *bio)
+{
+ unsigned int start =
+ (bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS) % F2FS_IO_SIZE(sbi);
- if (!F2FS_IO_ALIGNED(sbi))
- goto submit_io;
+ if (start == 0)
+ return;
- start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
- start %= F2FS_IO_SIZE(sbi);
+ /* fill dummy pages */
+ for (; start < F2FS_IO_SIZE(sbi); start++) {
+ struct page *page =
+ mempool_alloc(sbi->write_io_dummy,
+ GFP_NOIO | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, !page);
- if (start == 0)
- goto submit_io;
+ lock_page(page);
- /* fill dummy pages */
- for (; start < F2FS_IO_SIZE(sbi); start++) {
- struct page *page =
- mempool_alloc(sbi->write_io_dummy,
- GFP_NOIO | __GFP_NOFAIL);
- f2fs_bug_on(sbi, !page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ set_page_private_dummy(page);
- lock_page(page);
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
+ f2fs_bug_on(sbi, 1);
+ }
+}
- zero_user_segment(page, 0, PAGE_SIZE);
- set_page_private_dummy(page);
+static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
+ enum page_type type)
+{
+ WARN_ON_ONCE(is_read_io(bio_op(bio)));
+
+ if (type == DATA || type == NODE) {
+ if (f2fs_lfs_mode(sbi) && current->plug)
+ blk_finish_plug(current->plug);
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
- f2fs_bug_on(sbi, 1);
+ if (F2FS_IO_ALIGNED(sbi)) {
+ f2fs_align_write_bio(sbi, bio);
+ /*
+ * In the NODE case, we lose next block address chain.
+ * So, we need to do checkpoint in f2fs_sync_file.
+ */
+ if (type == NODE)
+ set_sbi_flag(sbi, SBI_NEED_CP);
}
- /*
- * In the NODE case, we lose next block address chain. So, we
- * need to do checkpoint in f2fs_sync_file.
- */
- if (type == NODE)
- set_sbi_flag(sbi, SBI_NEED_CP);
}
-submit_io:
- if (is_read_io(bio_op(bio)))
- trace_f2fs_submit_read_bio(sbi->sb, type, bio);
- else
- trace_f2fs_submit_write_bio(sbi->sb, type, bio);
+ trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
}
-void f2fs_submit_bio(struct f2fs_sb_info *sbi,
- struct bio *bio, enum page_type type)
-{
- __submit_bio(sbi, bio, type);
-}
-
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -573,12 +570,13 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio)
return;
- if (is_read_io(fio->op))
+ if (is_read_io(fio->op)) {
trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
- else
+ f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
+ } else {
trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
-
- __submit_bio(io->sbi, io->bio, fio->type);
+ f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
+ }
io->bio = NULL;
}
@@ -655,6 +653,9 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
f2fs_down_write(&io->io_rwsem);
+ if (!io->bio)
+ goto unlock_out;
+
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
@@ -663,6 +664,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
}
__submit_merged_bio(io);
+unlock_out:
f2fs_up_write(&io->io_rwsem);
}
@@ -741,12 +743,15 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
}
if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page) : WB_DATA_TYPE(fio->page));
- __submit_bio(fio->sbi, bio, fio->type);
+ if (is_read_io(bio_op(bio)))
+ f2fs_submit_read_bio(fio->sbi, bio, fio->type);
+ else
+ f2fs_submit_write_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -848,7 +853,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
/* page can't be merged into bio; submit the bio */
del_bio_entry(be);
- __submit_bio(sbi, *bio, DATA);
+ f2fs_submit_write_bio(sbi, *bio, DATA);
break;
}
f2fs_up_write(&io->bio_list_lock);
@@ -911,7 +916,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
}
if (found)
- __submit_bio(sbi, target, DATA);
+ f2fs_submit_write_bio(sbi, target, DATA);
if (bio && *bio) {
bio_put(*bio);
*bio = NULL;
@@ -948,7 +953,7 @@ alloc_new:
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, WB_DATA_TYPE(page));
@@ -991,7 +996,7 @@ next:
bio_page = fio->page;
/* set submitted = true as a return value */
- fio->submitted = true;
+ fio->submitted = 1;
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
@@ -1007,7 +1012,7 @@ alloc_new:
(fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
- fio->retry = true;
+ fio->retry = 1;
goto skip;
}
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
@@ -1022,7 +1027,7 @@ alloc_new:
}
if (fio->io_wbc)
- wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
+ wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
io->last_block_in_bio = fio->new_blkaddr;
@@ -1107,7 +1112,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
}
inc_page_count(sbi, F2FS_RD_DATA);
f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
- __submit_bio(sbi, bio, DATA);
+ f2fs_submit_read_bio(sbi, bio, DATA);
return 0;
}
@@ -1207,19 +1212,6 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err;
}
-int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
-{
- struct extent_info ei = {0, };
- struct inode *inode = dn->inode;
-
- if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
- dn->data_blkaddr = ei.blk + index - ei.fofs;
- return 0;
- }
-
- return f2fs_reserve_block(dn, index);
-}
-
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
blk_opf_t op_flags, bool for_write,
pgoff_t *next_pgofs)
@@ -1227,15 +1219,14 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
- struct extent_info ei = {0, };
int err;
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return ERR_PTR(-ENOMEM);
- if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
DATA_GENERIC_ENHANCE_READ)) {
err = -EFSCORRUPTED;
@@ -1432,13 +1423,12 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;
dn->data_blkaddr = f2fs_data_blkaddr(dn);
- if (dn->data_blkaddr != NULL_ADDR)
- goto alloc;
-
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
- return err;
+ if (dn->data_blkaddr == NULL_ADDR) {
+ err = inc_valid_block_count(sbi, dn->inode, &count);
+ if (unlikely(err))
+ return err;
+ }
-alloc:
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
old_blkaddr = dn->data_blkaddr;
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
@@ -1452,19 +1442,91 @@ alloc:
return 0;
}
-void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
{
- if (flag == F2FS_GET_BLOCK_PRE_AIO) {
- if (lock)
- f2fs_down_read(&sbi->node_change);
- else
- f2fs_up_read(&sbi->node_change);
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ f2fs_down_read(&sbi->node_change);
+ else
+ f2fs_lock_op(sbi);
+}
+
+static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
+{
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ f2fs_up_read(&sbi->node_change);
+ else
+ f2fs_unlock_op(sbi);
+}
+
+int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ int err = 0;
+
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
+ &dn->data_blkaddr))
+ err = f2fs_reserve_block(dn, index);
+ f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+
+ return err;
+}
+
+static int f2fs_map_no_dnode(struct inode *inode,
+ struct f2fs_map_blocks *map, struct dnode_of_data *dn,
+ pgoff_t pgoff)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ /*
+ * There is one exceptional case that read_node_page() may return
+ * -ENOENT due to filesystem has been shutdown or cp_error, return
+ * -EIO in that case.
+ */
+ if (map->m_may_create &&
+ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
+ return -EIO;
+
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
+ if (map->m_next_extent)
+ *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
+ return 0;
+}
+
+static bool f2fs_map_blocks_cached(struct inode *inode,
+ struct f2fs_map_blocks *map, int flag)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int maxblocks = map->m_len;
+ pgoff_t pgoff = (pgoff_t)map->m_lblk;
+ struct extent_info ei = {};
+
+ if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
+ return false;
+
+ map->m_pblk = ei.blk + pgoff - ei.fofs;
+ map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
+ map->m_flags = F2FS_MAP_MAPPED;
+ if (map->m_next_extent)
+ *map->m_next_extent = pgoff + map->m_len;
+
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
+
+ if (f2fs_allow_multi_device_dio(sbi, flag)) {
+ int bidx = f2fs_target_device_index(sbi, map->m_pblk);
+ struct f2fs_dev_info *dev = &sbi->devs[bidx];
+
+ map->m_bdev = dev->bdev;
+ map->m_pblk -= dev->start_blk;
+ map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
} else {
- if (lock)
- f2fs_lock_op(sbi);
- else
- f2fs_unlock_op(sbi);
+ map->m_bdev = inode->i_sb->s_bdev;
}
+ return true;
}
/*
@@ -1472,8 +1534,7 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
* maps continuous logical blocks to physical blocks, and return such
* info via f2fs_map_blocks structure.
*/
-int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
- int create, int flag)
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
@@ -1483,14 +1544,17 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
- struct extent_info ei = {0, };
block_t blkaddr;
unsigned int start_pgofs;
int bidx = 0;
+ bool is_hole;
if (!maxblocks)
return 0;
+ if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
+ goto out;
+
map->m_bdev = inode->i_sb->s_bdev;
map->m_multidev_dio =
f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
@@ -1502,42 +1566,9 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
- if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) {
- if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
- map->m_may_create)
- goto next_dnode;
-
- map->m_pblk = ei.blk + pgofs - ei.fofs;
- map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
- map->m_flags = F2FS_MAP_MAPPED;
- if (map->m_next_extent)
- *map->m_next_extent = pgofs + map->m_len;
-
- /* for hardware encryption, but to avoid potential issue in future */
- if (flag == F2FS_GET_BLOCK_DIO)
- f2fs_wait_on_block_writeback_range(inode,
- map->m_pblk, map->m_len);
-
- if (map->m_multidev_dio) {
- block_t blk_addr = map->m_pblk;
-
- bidx = f2fs_target_device_index(sbi, map->m_pblk);
-
- map->m_bdev = FDEV(bidx).bdev;
- map->m_pblk -= FDEV(bidx).start_blk;
- map->m_len = min(map->m_len,
- FDEV(bidx).end_blk + 1 - map->m_pblk);
-
- if (map->m_may_create)
- f2fs_update_device_state(sbi, inode->i_ino,
- blk_addr, map->m_len);
- }
- goto out;
- }
-
next_dnode:
if (map->m_may_create)
- f2fs_do_map_lock(sbi, flag, true);
+ f2fs_map_lock(sbi, flag);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1545,29 +1576,8 @@ next_dnode:
if (err) {
if (flag == F2FS_GET_BLOCK_BMAP)
map->m_pblk = 0;
-
- if (err == -ENOENT) {
- /*
- * There is one exceptional case that read_node_page()
- * may return -ENOENT due to filesystem has been
- * shutdown or cp_error, so force to convert error
- * number to EIO for such case.
- */
- if (map->m_may_create &&
- (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
- f2fs_cp_error(sbi))) {
- err = -EIO;
- goto unlock_out;
- }
-
- err = 0;
- if (map->m_next_pgofs)
- *map->m_next_pgofs =
- f2fs_get_next_page_offset(&dn, pgofs);
- if (map->m_next_extent)
- *map->m_next_extent =
- f2fs_get_next_page_offset(&dn, pgofs);
- }
+ if (err == -ENOENT)
+ err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
goto unlock_out;
}
@@ -1578,78 +1588,76 @@ next_dnode:
next_block:
blkaddr = f2fs_data_blkaddr(&dn);
-
- if (__is_valid_data_blkaddr(blkaddr) &&
- !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
+ is_hole = !__is_valid_data_blkaddr(blkaddr);
+ if (!is_hole &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
err = -EFSCORRUPTED;
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
goto sync_out;
}
- if (__is_valid_data_blkaddr(blkaddr)) {
- /* use out-place-update for driect IO under LFS mode */
- if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
- map->m_may_create) {
+ /* use out-place-update for direct IO under LFS mode */
+ if (map->m_may_create &&
+ (is_hole || (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO))) {
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto sync_out;
+ }
+
+ switch (flag) {
+ case F2FS_GET_BLOCK_PRE_AIO:
+ if (blkaddr == NULL_ADDR) {
+ prealloc++;
+ last_ofs_in_node = dn.ofs_in_node;
+ }
+ break;
+ case F2FS_GET_BLOCK_PRE_DIO:
+ case F2FS_GET_BLOCK_DIO:
err = __allocate_data_block(&dn, map->m_seg_type);
if (err)
goto sync_out;
- blkaddr = dn.data_blkaddr;
+ if (flag == F2FS_GET_BLOCK_PRE_DIO)
+ file_need_truncate(inode);
set_inode_flag(inode, FI_APPEND_WRITE);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ err = -EIO;
+ goto sync_out;
}
- } else {
- if (create) {
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_PRE_AIO) {
- if (blkaddr == NULL_ADDR) {
- prealloc++;
- last_ofs_in_node = dn.ofs_in_node;
- }
- } else {
- WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
- flag != F2FS_GET_BLOCK_DIO);
- err = __allocate_data_block(&dn,
- map->m_seg_type);
- if (!err) {
- if (flag == F2FS_GET_BLOCK_PRE_DIO)
- file_need_truncate(inode);
- set_inode_flag(inode, FI_APPEND_WRITE);
- }
- }
- if (err)
- goto sync_out;
+
+ blkaddr = dn.data_blkaddr;
+ if (is_hole)
map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
- } else {
- if (f2fs_compressed_file(inode) &&
- f2fs_sanity_check_cluster(&dn) &&
- (flag != F2FS_GET_BLOCK_FIEMAP ||
- IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
- err = -EFSCORRUPTED;
- f2fs_handle_error(sbi,
- ERROR_CORRUPTED_CLUSTER);
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_BMAP) {
- map->m_pblk = 0;
- goto sync_out;
- }
- if (flag == F2FS_GET_BLOCK_PRECACHE)
- goto sync_out;
- if (flag == F2FS_GET_BLOCK_FIEMAP &&
- blkaddr == NULL_ADDR) {
- if (map->m_next_pgofs)
- *map->m_next_pgofs = pgofs + 1;
- goto sync_out;
- }
- if (flag != F2FS_GET_BLOCK_FIEMAP) {
- /* for defragment case */
+ } else if (is_hole) {
+ if (f2fs_compressed_file(inode) &&
+ f2fs_sanity_check_cluster(&dn) &&
+ (flag != F2FS_GET_BLOCK_FIEMAP ||
+ IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
+ err = -EFSCORRUPTED;
+ f2fs_handle_error(sbi,
+ ERROR_CORRUPTED_CLUSTER);
+ goto sync_out;
+ }
+
+ switch (flag) {
+ case F2FS_GET_BLOCK_PRECACHE:
+ goto sync_out;
+ case F2FS_GET_BLOCK_BMAP:
+ map->m_pblk = 0;
+ goto sync_out;
+ case F2FS_GET_BLOCK_FIEMAP:
+ if (blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
goto sync_out;
}
+ break;
+ default:
+ /* for defragment case */
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ goto sync_out;
}
}
@@ -1660,9 +1668,9 @@ next_block:
bidx = f2fs_target_device_index(sbi, blkaddr);
if (map->m_len == 0) {
- /* preallocated unwritten block should be mapped for fiemap. */
+ /* reserved delalloc block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
- map->m_flags |= F2FS_MAP_UNWRITTEN;
+ map->m_flags |= F2FS_MAP_DELALLOC;
map->m_flags |= F2FS_MAP_MAPPED;
map->m_pblk = blkaddr;
@@ -1721,7 +1729,7 @@ skip:
f2fs_put_dnode(&dn);
if (map->m_may_create) {
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
@@ -1767,11 +1775,11 @@ sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (map->m_may_create) {
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, flag);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
- trace_f2fs_map_blocks(inode, map, create, flag, err);
+ trace_f2fs_map_blocks(inode, map, flag, err);
return err;
}
@@ -1793,7 +1801,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
while (map.m_lblk < last_lblk) {
map.m_len = last_lblk - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
if (err || map.m_len == 0)
return false;
map.m_lblk += map.m_len;
@@ -1967,7 +1975,7 @@ next:
map.m_len = cluster_size - count_in_cluster;
}
- ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
+ ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto out;
@@ -1984,7 +1992,7 @@ next:
compr_appended = false;
/* In a case of compressed cluster, append this to the last extent */
- if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
+ if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
!(map.m_flags & F2FS_MAP_FLAGS))) {
compr_appended = true;
goto skip_fill;
@@ -2030,7 +2038,7 @@ skip_fill:
compr_cluster = false;
size += blks_to_bytes(inode, 1);
}
- } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
+ } else if (map.m_flags & F2FS_MAP_DELALLOC) {
flags = FIEMAP_EXTENT_UNWRITTEN;
}
@@ -2099,7 +2107,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
map->m_lblk = block_in_file;
map->m_len = last_block - block_in_file;
- ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
+ ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
if (ret)
goto out;
got_it:
@@ -2136,7 +2144,7 @@ zero_out:
*last_block_in_bio, block_nr) ||
!f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
submit_and_realloc:
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
@@ -2283,7 +2291,7 @@ skip_reading_dnode:
*last_block_in_bio, blkaddr) ||
!f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
submit_and_realloc:
- __submit_bio(sbi, bio, DATA);
+ f2fs_submit_read_bio(sbi, bio, DATA);
bio = NULL;
}
@@ -2377,7 +2385,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- /* there are remained comressed pages, submit them */
+ /* there are remained compressed pages, submit them */
if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
@@ -2444,7 +2452,7 @@ next_page:
#endif
}
if (bio)
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
return ret;
}
@@ -2530,34 +2538,29 @@ static inline bool check_inplace_update_policy(struct inode *inode,
struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int policy = SM_I(sbi)->ipu_policy;
- if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
- is_inode_flag_set(inode, FI_OPU_WRITE))
+ if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
+ is_inode_flag_set(inode, FI_OPU_WRITE))
return false;
- if (policy & (0x1 << F2FS_IPU_FORCE))
+ if (IS_F2FS_IPU_FORCE(sbi))
return true;
- if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
+ if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
return true;
- if (policy & (0x1 << F2FS_IPU_UTIL) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
- if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
/*
* IPU for rewrite async pages
*/
- if (policy & (0x1 << F2FS_IPU_ASYNC) &&
- fio && fio->op == REQ_OP_WRITE &&
- !(fio->op_flags & REQ_SYNC) &&
- !IS_ENCRYPTED(inode))
+ if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
return true;
/* this is only set during fdatasync */
- if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(inode, FI_NEED_IPU))
+ if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
return true;
if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
@@ -2635,7 +2638,6 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
- struct extent_info ei = {0, };
struct node_info ni;
bool ipu_force = false;
int err = 0;
@@ -2647,9 +2649,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (need_inplace_update(fio) &&
- f2fs_lookup_read_extent_cache(inode, page->index, &ei)) {
- fio->old_blkaddr = ei.blk + page->index - ei.fofs;
-
+ f2fs_lookup_read_extent_cache_block(inode, page->index,
+ &fio->old_blkaddr)) {
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
DATA_GENERIC_ENHANCE)) {
f2fs_handle_error(fio->sbi,
@@ -2699,7 +2700,6 @@ got_it:
goto out_writepage;
set_page_writeback(page);
- ClearPageError(page);
f2fs_put_dnode(&dn);
if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
@@ -2735,7 +2735,6 @@ got_it:
goto out_writepage;
set_page_writeback(page);
- ClearPageError(page);
if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
@@ -2780,10 +2779,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.old_blkaddr = NULL_ADDR,
.page = page,
.encrypted_page = NULL,
- .submitted = false,
+ .submitted = 0,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
- .post_read = f2fs_post_read_required(inode),
+ .post_read = f2fs_post_read_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
@@ -2792,7 +2791,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
trace_f2fs_writepage(page, DATA);
- /* we should bypass data pages to proceed the kworkder jobs */
+ /* we should bypass data pages to proceed the kworker jobs */
if (unlikely(f2fs_cp_error(sbi))) {
mapping_set_error(page->mapping, -EIO);
/*
@@ -2904,14 +2903,14 @@ out:
}
if (submitted)
- *submitted = fio.submitted ? 1 : 0;
+ *submitted = fio.submitted;
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
/*
- * pageout() in MM traslates EAGAIN, so calls handle_write_error()
+ * pageout() in MM translates EAGAIN, so calls handle_write_error()
* -> mapping_set_error() -> set_bit(AS_EIO, ...).
* file_write_and_wait_range() will see EIO error, which is critical
* to return value of fsync() followed by atomic_write failure to user.
@@ -2945,7 +2944,7 @@ out:
}
/*
- * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * This function was copied from write_cache_pages from mm/page-writeback.c.
* The major change is making write step of cold data page separately from
* warm/hot data page.
*/
@@ -3354,9 +3353,8 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
struct dnode_of_data dn;
struct page *ipage;
bool locked = false;
- struct extent_info ei = {0, };
+ int flag = F2FS_GET_BLOCK_PRE_AIO;
int err = 0;
- int flag;
/*
* If a whole page is being written and we already preallocated all the
@@ -3366,14 +3364,13 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
return 0;
/* f2fs_lock_op avoids race between write CP and convert_inline_page */
- if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
- flag = F2FS_GET_BLOCK_DEFAULT;
- else
- flag = F2FS_GET_BLOCK_PRE_AIO;
-
- if (f2fs_has_inline_data(inode) ||
- (pos & PAGE_MASK) >= i_size_read(inode)) {
- f2fs_do_map_lock(sbi, flag, true);
+ if (f2fs_has_inline_data(inode)) {
+ if (pos + len > MAX_INLINE_DATA(inode))
+ flag = F2FS_GET_BLOCK_DEFAULT;
+ f2fs_map_lock(sbi, flag);
+ locked = true;
+ } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+ f2fs_map_lock(sbi, flag);
locked = true;
}
@@ -3393,40 +3390,40 @@ restart:
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_page_private_inline(ipage);
- } else {
- err = f2fs_convert_inline_page(&dn, page);
- if (err)
- goto out;
- if (dn.data_blkaddr == NULL_ADDR)
- err = f2fs_get_block(&dn, index);
- }
- } else if (locked) {
- err = f2fs_get_block(&dn, index);
- } else {
- if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
- } else {
- /* hole case */
- err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
- if (err || dn.data_blkaddr == NULL_ADDR) {
- f2fs_put_dnode(&dn);
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
- true);
- WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
- locked = true;
- goto restart;
- }
+ goto out;
}
+ err = f2fs_convert_inline_page(&dn, page);
+ if (err || dn.data_blkaddr != NULL_ADDR)
+ goto out;
}
- /* convert_inline_page can make node_changed */
- *blk_addr = dn.data_blkaddr;
- *node_changed = dn.node_changed;
+ if (!f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
+ if (locked) {
+ err = f2fs_reserve_block(&dn, index);
+ goto out;
+ }
+
+ /* hole case */
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (!err && dn.data_blkaddr != NULL_ADDR)
+ goto out;
+ f2fs_put_dnode(&dn);
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
+ locked = true;
+ goto restart;
+ }
out:
+ if (!err) {
+ /* convert_inline_page can make node_changed */
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+ }
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- f2fs_do_map_lock(sbi, flag, false);
+ f2fs_map_unlock(sbi, flag);
return err;
}
@@ -3435,7 +3432,6 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
{
struct dnode_of_data dn;
struct page *ipage;
- struct extent_info ei = {0, };
int err = 0;
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
@@ -3444,9 +3440,8 @@ static int __find_data_block(struct inode *inode, pgoff_t index,
set_new_dnode(&dn, inode, ipage, ipage, 0);
- if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
- dn.data_blkaddr = ei.blk + index - ei.fofs;
- } else {
+ if (!f2fs_lookup_read_extent_cache_block(inode, index,
+ &dn.data_blkaddr)) {
/* hole case */
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err) {
@@ -3467,7 +3462,7 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
struct page *ipage;
int err = 0;
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
ipage = f2fs_get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
@@ -3476,14 +3471,16 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,
}
set_new_dnode(&dn, inode, ipage, ipage, 0);
- err = f2fs_get_block(&dn, index);
+ if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
+ &dn.data_blkaddr))
+ err = f2fs_reserve_block(&dn, index);
*blk_addr = dn.data_blkaddr;
*node_changed = dn.node_changed;
f2fs_put_dnode(&dn);
unlock_out:
- f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
return err;
}
@@ -3729,6 +3726,7 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
}
}
+ clear_page_private_reference(&folio->page);
clear_page_private_gcing(&folio->page);
if (test_opt(sbi, COMPRESS_CACHE) &&
@@ -3754,6 +3752,7 @@ bool f2fs_release_folio(struct folio *folio, gfp_t wait)
clear_page_private_data(&folio->page);
}
+ clear_page_private_reference(&folio->page);
clear_page_private_gcing(&folio->page);
folio_detach_private(folio);
@@ -3835,7 +3834,7 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
map.m_next_pgofs = NULL;
map.m_seg_type = NO_CHECK_TYPE;
- if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
+ if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
blknr = map.m_pblk;
}
out:
@@ -3943,7 +3942,7 @@ retry:
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
+ ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
if (ret)
goto out;
@@ -4168,8 +4167,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (flags & IOMAP_WRITE)
map.m_may_create = true;
- err = f2fs_map_blocks(inode, &map, flags & IOMAP_WRITE,
- F2FS_GET_BLOCK_DIO);
+ err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
if (err)
return err;
@@ -4182,20 +4180,24 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
*/
map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
- if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
- iomap->length = blks_to_bytes(inode, map.m_len);
- if (map.m_flags & F2FS_MAP_MAPPED) {
- iomap->type = IOMAP_MAPPED;
- iomap->flags |= IOMAP_F_MERGED;
- } else {
- iomap->type = IOMAP_UNWRITTEN;
- }
- if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
- return -EINVAL;
+ /*
+ * We should never see delalloc or compressed extents here based on
+ * prior flushing and checks.
+ */
+ if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
+ return -EINVAL;
+ if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+ return -EINVAL;
+ if (map.m_pblk != NULL_ADDR) {
+ iomap->length = blks_to_bytes(inode, map.m_len);
+ iomap->type = IOMAP_MAPPED;
+ iomap->flags |= IOMAP_F_MERGED;
iomap->bdev = map.m_bdev;
iomap->addr = blks_to_bytes(inode, map.m_pblk);
} else {
+ if (flags & IOMAP_WRITE)
+ return -ENOTBLK;
iomap->length = blks_to_bytes(inode, next_pgofs) -
iomap->offset;
iomap->type = IOMAP_HOLE;