summaryrefslogtreecommitdiff
path: root/fs/bcachefs/fs-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/fs-io.c')
-rw-r--r--fs/bcachefs/fs-io.c1474
1 files changed, 701 insertions, 773 deletions
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 8f0b2a745064..df2f317f5443 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -35,6 +35,81 @@
#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
+static inline bool bio_full(struct bio *bio, unsigned len)
+{
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+ return true;
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return true;
+ return false;
+}
+
static inline struct address_space *faults_disabled_mapping(void)
{
return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
@@ -56,7 +131,6 @@ struct quota_res {
};
struct bch_writepage_io {
- struct closure cl;
struct bch_inode_info *inode;
/* must be last: */
@@ -64,11 +138,14 @@ struct bch_writepage_io {
};
struct dio_write {
- struct completion done;
struct kiocb *req;
+ struct address_space *mapping;
+ struct bch_inode_info *inode;
struct mm_struct *mm;
unsigned loop:1,
+ extending:1,
sync:1,
+ flush:1,
free_iov:1;
struct quota_res quota_res;
u64 written;
@@ -89,7 +166,7 @@ struct dio_read {
};
/* pagecache_block must be held */
-static int write_invalidate_inode_pages_range(struct address_space *mapping,
+static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
loff_t start, loff_t end)
{
int ret;
@@ -121,28 +198,33 @@ static int write_invalidate_inode_pages_range(struct address_space *mapping,
#ifdef CONFIG_BCACHEFS_QUOTA
-static void bch2_quota_reservation_put(struct bch_fs *c,
- struct bch_inode_info *inode,
- struct quota_res *res)
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
{
- if (!res->sectors)
- return;
-
- mutex_lock(&inode->ei_quota_lock);
BUG_ON(res->sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC,
-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
inode->ei_quota_reserved -= res->sectors;
- mutex_unlock(&inode->ei_quota_lock);
-
res->sectors = 0;
}
+static void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ if (res->sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_quota_reservation_put(c, inode, res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+}
+
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
- unsigned sectors,
+ u64 sectors,
bool check_enospc)
{
int ret;
@@ -161,11 +243,13 @@ static int bch2_quota_reservation_add(struct bch_fs *c,
#else
+static void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
static void bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
- struct quota_res *res)
-{
-}
+ struct quota_res *res) {}
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
@@ -216,14 +300,13 @@ int __must_check bch2_write_inode_size(struct bch_fs *c,
return bch2_write_inode(c, inode, inode_set_size, &s, fields);
}
-static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+static void __i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
struct quota_res *quota_res, s64 sectors)
{
- if (!sectors)
- return;
-
- mutex_lock(&inode->ei_quota_lock);
- BUG_ON((s64) inode->v.i_blocks + sectors < 0);
+ bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
+ "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+ inode->ei_inode.bi_sectors);
inode->v.i_blocks += sectors;
#ifdef CONFIG_BCACHEFS_QUOTA
@@ -237,7 +320,16 @@ static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
}
#endif
- mutex_unlock(&inode->ei_quota_lock);
+}
+
+static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ if (sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, quota_res, sectors);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
}
/* page state: */
@@ -285,28 +377,13 @@ static inline struct bch_page_state *bch2_page_state(struct page *page)
/* for newly allocated pages: */
static void __bch2_page_state_release(struct page *page)
{
- struct bch_page_state *s = __bch2_page_state(page);
-
- if (!s)
- return;
-
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+ kfree(detach_page_private(page));
}
static void bch2_page_state_release(struct page *page)
{
- struct bch_page_state *s = bch2_page_state(page);
-
- if (!s)
- return;
-
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+ EBUG_ON(!PageLocked(page));
+ __bch2_page_state_release(page);
}
/* for newly allocated pages: */
@@ -320,13 +397,7 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page,
return NULL;
spin_lock_init(&s->lock);
- /*
- * migrate_page_move_mapping() assumes that pages with private data
- * have their count elevated by 1.
- */
- get_page(page);
- set_page_private(page, (unsigned long) s);
- SetPagePrivate(page);
+ attach_page_private(page, s);
return s;
}
@@ -336,11 +407,11 @@ static struct bch_page_state *bch2_page_state_create(struct page *page,
return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
}
-static unsigned bkey_to_sector_state(const struct bkey *k)
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
{
- if (k->type == KEY_TYPE_reservation)
+ if (bkey_extent_is_reservation(k))
return SECTOR_RESERVED;
- if (bkey_extent_is_allocation(k))
+ if (bkey_extent_is_allocation(k.k))
return SECTOR_ALLOCATED;
return SECTOR_UNALLOCATED;
}
@@ -391,7 +462,7 @@ retry:
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, ret) {
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k.k);
+ unsigned state = bkey_to_sector_state(k);
while (pg_idx < nr_pages) {
struct page *page = pages[pg_idx];
@@ -418,7 +489,7 @@ retry:
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
@@ -431,7 +502,7 @@ static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
struct bio_vec bv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
- unsigned state = bkey_to_sector_state(k.k);
+ unsigned state = bkey_to_sector_state(k);
bio_for_each_segment(bv, bio, iter)
__bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
@@ -443,22 +514,20 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
{
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct pagevec pvec;
+ struct folio_batch fbatch;
+ unsigned i, j;
if (end <= start)
return;
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i, j;
+ folio_batch_init(&fbatch);
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
unsigned pg_offset = max(start, pg_start) - pg_start;
unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
struct bch_page_state *s;
@@ -467,8 +536,8 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
BUG_ON(pg_offset >= PAGE_SECTORS);
BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
- lock_page(page);
- s = bch2_page_state(page);
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
if (s) {
spin_lock(&s->lock);
@@ -477,10 +546,11 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode,
spin_unlock(&s->lock);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
- } while (index <= end_index);
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
}
static void mark_pagecache_reserved(struct bch_inode_info *inode,
@@ -489,23 +559,21 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
- struct pagevec pvec;
+ struct folio_batch fbatch;
s64 i_sectors_delta = 0;
+ unsigned i, j;
if (end <= start)
return;
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i, j;
+ folio_batch_init(&fbatch);
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
- u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
unsigned pg_offset = max(start, pg_start) - pg_start;
unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
struct bch_page_state *s;
@@ -514,8 +582,8 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
BUG_ON(pg_offset >= PAGE_SECTORS);
BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
- lock_page(page);
- s = bch2_page_state(page);
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
if (s) {
spin_lock(&s->lock);
@@ -534,10 +602,11 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode,
spin_unlock(&s->lock);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- pagevec_release(&pvec);
- } while (index <= end_index);
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
@@ -617,7 +686,7 @@ static void bch2_page_reservation_put(struct bch_fs *c,
static int bch2_page_reservation_get(struct bch_fs *c,
struct bch_inode_info *inode, struct page *page,
struct bch2_page_reservation *res,
- unsigned offset, unsigned len, bool check_enospc)
+ unsigned offset, unsigned len)
{
struct bch_page_state *s = bch2_page_state_create(page, 0);
unsigned i, disk_sectors = 0, quota_sectors = 0;
@@ -637,19 +706,14 @@ static int bch2_page_reservation_get(struct bch_fs *c,
}
if (disk_sectors) {
- ret = bch2_disk_reservation_add(c, &res->disk,
- disk_sectors,
- !check_enospc
- ? BCH_DISK_RESERVATION_NOFAIL
- : 0);
+ ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
if (unlikely(ret))
return ret;
}
if (quota_sectors) {
ret = bch2_quota_reservation_add(c, inode, &res->quota,
- quota_sectors,
- check_enospc);
+ quota_sectors, true);
if (unlikely(ret)) {
struct disk_reservation tmp = {
.sectors = disk_sectors
@@ -748,7 +812,7 @@ static void bch2_set_page_dirty(struct bch_fs *c,
i_sectors_acct(c, inode, &res->quota, dirty_sectors);
if (!PageDirty(page))
- __set_page_dirty_nobuffers(page);
+ filemap_dirty_folio(inode->v.i_mapping, page_folio(page));
}
vm_fault_t bch2_page_fault(struct vm_fault *vmf)
@@ -766,25 +830,25 @@ vm_fault_t bch2_page_fault(struct vm_fault *vmf)
if (fdm > mapping) {
struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
- if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
+ if (bch2_pagecache_add_tryget(inode))
goto got_lock;
- bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
+ bch2_pagecache_block_put(fdm_host);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
+ bch2_pagecache_add_put(inode);
- bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
+ bch2_pagecache_block_get(fdm_host);
/* Signal that lock has been dropped: */
set_fdm_dropped_locks();
return VM_FAULT_SIGBUS;
}
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
got_lock:
ret = filemap_fault(vmf);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return ret;
}
@@ -812,7 +876,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
* a write_invalidate_inode_pages_range() that works without dropping
* page lock before invalidating page
*/
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
lock_page(page);
isize = i_size_read(&inode->v);
@@ -833,7 +897,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
}
}
- if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
+ if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
goto out;
@@ -845,68 +909,37 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
wait_for_stable_page(page);
ret = VM_FAULT_LOCKED;
out:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
sb_end_pagefault(inode->v.i_sb);
return ret;
}
-void bch2_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- if (offset || length < PAGE_SIZE)
+ if (offset || length < folio_size(folio))
return;
- bch2_clear_page_bits(page);
+ bch2_clear_page_bits(&folio->page);
}
-int bch2_releasepage(struct page *page, gfp_t gfp_mask)
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- if (PageDirty(page))
- return 0;
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
+ return false;
- bch2_clear_page_bits(page);
- return 1;
+ bch2_clear_page_bits(&folio->page);
+ return true;
}
-#ifdef CONFIG_MIGRATION
-int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- int ret;
-
- EBUG_ON(!PageLocked(page));
- EBUG_ON(!PageLocked(newpage));
-
- ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
- if (ret != MIGRATEPAGE_SUCCESS)
- return ret;
-
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
- get_page(newpage);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- SetPagePrivate(newpage);
- }
-
- if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
- else
- migrate_page_states(newpage, page);
- return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
/* readpage(s): */
static void bch2_readpages_end_io(struct bio *bio)
{
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_segment_all(bv, bio, iter) {
struct page *page = bv->bv_page;
if (!bio->bi_status) {
@@ -925,31 +958,29 @@ struct readpages_iter {
struct address_space *mapping;
struct page **pages;
unsigned nr_pages;
- unsigned nr_added;
unsigned idx;
pgoff_t offset;
};
static int readpages_iter_init(struct readpages_iter *iter,
- struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+ struct readahead_control *ractl)
{
+ unsigned i, nr_pages = readahead_count(ractl);
+
memset(iter, 0, sizeof(*iter));
- iter->mapping = mapping;
- iter->offset = list_last_entry(pages, struct page, lru)->index;
+ iter->mapping = ractl->mapping;
+ iter->offset = readahead_index(ractl);
+ iter->nr_pages = nr_pages;
iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!iter->pages)
return -ENOMEM;
- while (!list_empty(pages)) {
- struct page *page = list_last_entry(pages, struct page, lru);
-
- __bch2_page_state_create(page, __GFP_NOFAIL);
-
- iter->pages[iter->nr_pages++] = page;
- list_del(&page->lru);
+ nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
+ for (i = 0; i < nr_pages; i++) {
+ __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
+ put_page(iter->pages[i]);
}
return 0;
@@ -957,41 +988,9 @@ static int readpages_iter_init(struct readpages_iter *iter,
static inline struct page *readpage_iter_next(struct readpages_iter *iter)
{
- struct page *page;
- unsigned i;
- int ret;
-
- BUG_ON(iter->idx > iter->nr_added);
- BUG_ON(iter->nr_added > iter->nr_pages);
-
- if (iter->idx < iter->nr_added)
- goto out;
-
- while (1) {
- if (iter->idx == iter->nr_pages)
- return NULL;
-
- ret = add_to_page_cache_lru_vec(iter->mapping,
- iter->pages + iter->nr_added,
- iter->nr_pages - iter->nr_added,
- iter->offset + iter->nr_added,
- GFP_NOFS);
- if (ret > 0)
- break;
-
- page = iter->pages[iter->nr_added];
- iter->idx++;
- iter->nr_added++;
-
- __bch2_page_state_release(page);
- put_page(page);
- }
-
- iter->nr_added += ret;
+ if (iter->idx >= iter->nr_pages)
+ return NULL;
- for (i = iter->idx; i < iter->nr_added; i++)
- put_page(iter->pages[i]);
-out:
EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
return iter->pages[iter->idx];
@@ -1029,11 +1028,8 @@ static void readpage_bio_extend(struct readpages_iter *iter,
if (!get_more)
break;
- rcu_read_lock();
- page = radix_tree_lookup(&iter->mapping->i_pages, page_offset);
- rcu_read_unlock();
-
- if (page && !radix_tree_exceptional_entry(page))
+ page = xa_load(&iter->mapping->i_pages, page_offset);
+ if (page && !xa_is_value(page))
break;
page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
@@ -1098,10 +1094,9 @@ retry:
* read_extent -> io_time_reset may cause a transaction restart
* without returning an error, we need to check for that here:
*/
- if (!bch2_trans_relock(trans)) {
- ret = -EINTR;
+ ret = bch2_trans_relock(trans);
+ if (ret)
break;
- }
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
@@ -1126,8 +1121,6 @@ retry:
sectors = min(sectors, k.k->size - offset_into_extent);
- bch2_trans_unlock(trans);
-
if (readpages_iter)
readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
extent_partial_reads_expensive(k));
@@ -1156,11 +1149,13 @@ retry:
err:
bch2_trans_iter_exit(trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
if (ret) {
- bch_err_inum_ratelimited(c, inum.inum,
+ bch_err_inum_offset_ratelimited(c,
+ iter.pos.inode,
+ iter.pos.offset << 9,
"read error %i from btree lookup", ret);
rbio->bio.bi_status = BLK_STS_IOERR;
bio_endio(&rbio->bio);
@@ -1169,37 +1164,38 @@ err:
bch2_bkey_buf_exit(&sk, c);
}
-int bch2_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+void bch2_readahead(struct readahead_control *ractl)
{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
+ struct bch_io_opts opts;
struct btree_trans trans;
struct page *page;
struct readpages_iter readpages_iter;
int ret;
- ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
while ((page = readpage_iter_next(&readpages_iter))) {
pgoff_t index = readpages_iter.offset + readpages_iter.idx;
unsigned n = min_t(unsigned,
readpages_iter.nr_pages -
readpages_iter.idx,
- BIO_MAX_PAGES);
+ BIO_MAX_VECS);
struct bch_read_bio *rbio =
- rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
+ rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+ GFP_NOFS, &c->bio_read),
opts);
readpages_iter.idx++;
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
@@ -1208,12 +1204,10 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
&readpages_iter);
}
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
bch2_trans_exit(&trans);
kfree(readpages_iter.pages);
-
- return 0;
}
static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
@@ -1223,7 +1217,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
bch2_page_state_create(page, __GFP_NOFAIL);
- bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
rbio->bio.bi_iter.bi_sector =
(sector_t) page->index << PAGE_SECTORS_SHIFT;
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
@@ -1233,20 +1227,6 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
bch2_trans_exit(&trans);
}
-int bch2_readpage(struct file *file, struct page *page)
-{
- struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
- struct bch_read_bio *rbio;
-
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
- rbio->bio.bi_end_io = bch2_readpages_end_io;
-
- __bchfs_readpage(c, rbio, inode_inum(inode), page);
- return 0;
-}
-
static void bch2_read_single_page_end_io(struct bio *bio)
{
complete(bio->bi_private);
@@ -1258,11 +1238,14 @@ static int bch2_read_single_page(struct page *page,
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_read_bio *rbio;
+ struct bch_io_opts opts;
int ret;
DECLARE_COMPLETION_ONSTACK(done);
- rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
- io_opts(c, &inode->ei_inode));
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
+ opts);
rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_page_end_io;
@@ -1279,6 +1262,16 @@ static int bch2_read_single_page(struct page *page,
return 0;
}
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+ struct page *page = &folio->page;
+ int ret;
+
+ ret = bch2_read_single_page(page, page->mapping);
+ folio_unlock(folio);
+ return bch2_err_class(ret);
+}
+
/* writepages: */
struct bch_writepage_state {
@@ -1289,55 +1282,47 @@ struct bch_writepage_state {
static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
struct bch_inode_info *inode)
{
- return (struct bch_writepage_state) {
- .opts = io_opts(c, &inode->ei_inode)
- };
-}
+ struct bch_writepage_state ret = { 0 };
-static void bch2_writepage_io_free(struct closure *cl)
-{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
-
- bio_put(&io->op.wbio.bio);
+ bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
+ return ret;
}
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bvec;
- unsigned i, j;
-
- up(&io->op.c->io_in_flight);
+ unsigned i;
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
SetPageError(bvec->bv_page);
- mapping_set_error(io->inode->v.i_mapping, -EIO);
+ mapping_set_error(bvec->bv_page->mapping, -EIO);
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
@@ -1346,7 +1331,7 @@ static void bch2_writepage_io_done(struct closure *cl)
* racing with fallocate can cause us to add fewer sectors than
* expected - but we shouldn't add more sectors than expected:
*/
- WARN_ON(io->op.i_sectors_delta > 0);
+ WARN_ON_ONCE(io->op.i_sectors_delta > 0);
/*
* (error (due to going RO) halfway through a page can screw that up
@@ -1361,25 +1346,22 @@ static void bch2_writepage_io_done(struct closure *cl)
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
if (atomic_dec_and_test(&s->write_count))
end_page_writeback(bvec->bv_page);
}
- closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
{
struct bch_writepage_io *io = w->io;
- down(&io->op.c->io_in_flight);
-
w->io = NULL;
- closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
- continue_at(&io->cl, bch2_writepage_io_done, NULL);
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
@@ -1395,13 +1377,13 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
{
struct bch_write_op *op;
- w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES,
+ w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+ REQ_OP_WRITE,
+ GFP_NOFS,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
- closure_init(&w->io->cl, NULL);
w->io->inode = inode;
-
op = &w->io->op;
bch2_write_op_init(op, c, w->opts);
op->target = w->opts.foreground_target;
@@ -1410,6 +1392,8 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
op->subvol = inode->ei_subvol;
op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
+ op->devs_need_flush = &inode->ei_devs_need_flush;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
@@ -1515,9 +1499,9 @@ do_io:
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio) ||
+ bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
- (BIO_MAX_PAGES * PAGE_SIZE) ||
+ (BIO_MAX_VECS * PAGE_SIZE) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
bch2_writepage_do_io(w);
@@ -1532,8 +1516,13 @@ do_io:
sectors << 9, offset << 9));
/* Check for writing past i_size: */
- WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
- round_up(i_size, block_bytes(c)));
+ WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+ round_up(i_size, block_bytes(c)) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+ "writing past i_size: %llu > %llu (unrounded %llu)\n",
+ bio_end_sector(&w->io->op.wbio.bio) << 9,
+ round_up(i_size, block_bytes(c)),
+ i_size);
w->io->op.res.sectors += reserved_sectors;
w->io->op.i_sectors_delta -= dirty_sectors;
@@ -1561,27 +1550,13 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
if (w.io)
bch2_writepage_do_io(&w);
blk_finish_plug(&plug);
- return ret;
-}
-
-int bch2_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
- struct bch_writepage_state w =
- bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
- int ret;
-
- ret = __bch2_writepage(page, wbc, &w);
- if (w.io)
- bch2_writepage_do_io(&w);
-
- return ret;
+ return bch2_err_class(ret);
}
/* buffered writes: */
int bch2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
@@ -1599,9 +1574,9 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
bch2_page_reservation_init(c, inode, res);
*fsdata = res;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
- page = grab_cache_page_write_begin(mapping, index, flags);
+ page = grab_cache_page_write_begin(mapping, index);
if (!page)
goto err_unlock;
@@ -1631,11 +1606,10 @@ out:
if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate) {
ret = bch2_page_state_set(c, inode_inum(inode), &page, 1);
if (ret)
- goto out;
+ goto err;
}
- ret = bch2_page_reservation_get(c, inode, page, res,
- offset, len, true);
+ ret = bch2_page_reservation_get(c, inode, page, res, offset, len);
if (ret) {
if (!PageUptodate(page)) {
/*
@@ -1657,10 +1631,10 @@ err:
put_page(page);
*pagep = NULL;
err_unlock:
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
kfree(res);
*fsdata = NULL;
- return ret;
+ return bch2_err_class(ret);
}
int bch2_write_end(struct file *file, struct address_space *mapping,
@@ -1701,7 +1675,7 @@ int bch2_write_end(struct file *file, struct address_space *mapping,
unlock_page(page);
put_page(page);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
bch2_page_reservation_put(c, inode, res);
kfree(res);
@@ -1732,7 +1706,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
bch2_page_reservation_init(c, inode, &res);
for (i = 0; i < nr_pages; i++) {
- pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
+ pages[i] = grab_cache_page_write_begin(mapping, index + i);
if (!pages[i]) {
nr_pages = i;
if (!i) {
@@ -1776,10 +1750,21 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
goto out;
}
+ /*
+ * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+ * supposed to write as much as we have disk space for.
+ *
+ * On failure here we should still write out a partial page if
+ * we aren't completely out of disk space - we don't do that
+ * yet:
+ */
ret = bch2_page_reservation_get(c, inode, page, &res,
- pg_offset, pg_len, true);
- if (ret)
- goto out;
+ pg_offset, pg_len);
+ if (unlikely(ret)) {
+ if (!reserved)
+ goto out;
+ break;
+ }
reserved += pg_len;
}
@@ -1788,13 +1773,13 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
for (i = 0; i < nr_pages; i++)
flush_dcache_page(pages[i]);
- while (copied < len) {
+ while (copied < reserved) {
struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
- unsigned pg_len = min_t(unsigned, len - copied,
+ unsigned pg_len = min_t(unsigned, reserved - copied,
PAGE_SIZE - pg_offset);
- unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
- iter, pg_offset, pg_len);
+ unsigned pg_copied = copy_page_from_iter_atomic(page,
+ pg_offset, pg_len, iter);
if (!pg_copied)
break;
@@ -1807,7 +1792,6 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
}
flush_dcache_page(page);
- iov_iter_advance(iter, pg_copied);
copied += pg_copied;
if (pg_copied != pg_len)
@@ -1860,7 +1844,7 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
ssize_t written = 0;
int ret = 0;
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
do {
unsigned offset = pos & (PAGE_SIZE - 1);
@@ -1877,11 +1861,11 @@ again:
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
- if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
bytes = min_t(unsigned long, iov_iter_count(iter),
PAGE_SIZE - offset);
- if (unlikely(iov_iter_fault_in_readable(iter, bytes))) {
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
ret = -EFAULT;
break;
}
@@ -1918,25 +1902,13 @@ again:
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(iter));
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
return written ? written : ret;
}
/* O_DIRECT reads */
-static void bio_release_pages(struct bio *bio, bool mark_dirty)
-{
- struct bio_vec *bvec;
- unsigned i;
-
- bio_for_each_segment_all(bvec, bio, i) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- put_page(bvec->bv_page);
- }
-}
-
static void bio_check_or_release(struct bio *bio, bool check_dirty)
{
if (check_dirty) {
@@ -1951,7 +1923,7 @@ static void bch2_dio_read_complete(struct closure *cl)
{
struct dio_read *dio = container_of(cl, struct dio_read, cl);
- dio->req->ki_complete(dio->req, dio->ret, 0);
+ dio->req->ki_complete(dio->req, dio->ret);
bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
}
@@ -1979,7 +1951,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
struct file *file = req->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
+ struct bch_io_opts opts;
struct dio_read *dio;
struct bio *bio;
loff_t offset = req->ki_pos;
@@ -1987,6 +1959,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
size_t shorten;
ssize_t ret;
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
if ((offset|iter->count) & (block_bytes(c) - 1))
return -EINVAL;
@@ -1999,8 +1973,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
iter->count -= shorten;
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
@@ -2034,12 +2010,14 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
goto start;
while (iter->count) {
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
- bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC;
bio->bi_iter.bi_sector = offset >> 9;
bio->bi_private = dio;
@@ -2089,11 +2067,13 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (iocb->ki_flags & IOCB_DIRECT) {
struct blk_plug plug;
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret < 0)
- return ret;
+ if (unlikely(mapping->nrpages)) {
+ ret = filemap_write_and_wait_range(mapping,
+ iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret < 0)
+ goto out;
+ }
file_accessed(file);
@@ -2104,12 +2084,12 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (ret >= 0)
iocb->ki_pos += ret;
} else {
- bch2_pagecache_add_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_get(inode);
ret = generic_file_read_iter(iocb, iter);
- bch2_pagecache_add_put(&inode->ei_pagecache_lock);
+ bch2_pagecache_add_put(inode);
}
-
- return ret;
+out:
+ return bch2_err_class(ret);
}
/* O_DIRECT writes */
@@ -2137,7 +2117,7 @@ retry:
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, err) {
- if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
+ if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
break;
if (k.k->p.snapshot != snapshot ||
@@ -2151,39 +2131,172 @@ retry:
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (err == -EINTR)
+ if (bch2_err_matches(err, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
return err ? false : ret;
}
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ return bch2_check_range_allocated(c, inode_inum(inode),
+ dio->op.pos.offset, bio_sectors(bio),
+ dio->op.opts.data_replicas,
+ dio->op.opts.compression != 0);
+}
+
static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+ struct iovec *iov = dio->inline_vecs;
+
+ if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+ iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
+
+ dio->free_iov = true;
+ }
-static long bch2_dio_write_loop(struct dio_write *dio)
+ memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.iov = iov;
+ return 0;
+}
+
+static void bch2_dio_write_flush_done(struct closure *cl)
+{
+ struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ struct bch_fs *c = dio->op.c;
+
+ closure_debug_destroy(cl);
+
+ dio->op.error = bch2_journal_error(&c->journal);
+
+ bch2_dio_write_done(dio);
+}
+
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ dio->flush = 0;
+
+ closure_init(&dio->op.cl, NULL);
+
+ if (!dio->op.error) {
+ ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+ if (ret) {
+ dio->op.error = ret;
+ } else {
+ bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
+ }
+
+ if (dio->sync) {
+ closure_sync(&dio->op.cl);
+ closure_debug_destroy(&dio->op.cl);
+ } else {
+ continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
+ }
+}
+
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{
- bool kthread = (current->flags & PF_KTHREAD) != 0;
struct kiocb *req = dio->req;
- struct address_space *mapping = req->ki_filp->f_mapping;
- struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_inode_info *inode = dio->inode;
+ bool sync = dio->sync;
+ long ret;
+
+ if (unlikely(dio->flush)) {
+ bch2_dio_write_flush(dio);
+ if (!sync)
+ return -EIOCBQUEUED;
+ }
+
+ bch2_pagecache_block_put(inode);
+
+ if (dio->free_iov)
+ kfree(dio->iter.iov);
+
+ ret = dio->op.error ?: ((long) dio->written << 9);
+ bio_put(&dio->op.wbio.bio);
+
+ /* inode->i_dio_count is our ref on inode and thus bch_fs */
+ inode_dio_end(&inode->v);
+
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+
+ if (!sync) {
+ req->ki_complete(req, ret);
+ ret = -EIOCBQUEUED;
+ }
+ return ret;
+}
+
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
struct bio *bio = &dio->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i, unaligned, iter_count;
+
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
+
+ if (dio->extending) {
+ spin_lock(&inode->v.i_lock);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+ __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
+
+ if (unlikely(dio->op.error))
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
+}
+
+static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct address_space *mapping = dio->mapping;
+ struct bch_inode_info *inode = dio->inode;
+ struct bch_io_opts opts;
+ struct bio *bio = &dio->op.wbio.bio;
+ unsigned unaligned, iter_count;
bool sync = dio->sync, dropped_locks;
long ret;
- if (dio->loop)
- goto loop;
-
- down(&c->io_in_flight);
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
while (1) {
iter_count = dio->iter.count;
- if (kthread)
- use_mm(dio->mm);
- BUG_ON(current->faults_disabled_mapping);
+ EBUG_ON(current->faults_disabled_mapping);
current->faults_disabled_mapping = mapping;
ret = bio_iov_iter_get_pages(bio, &dio->iter);
@@ -2191,8 +2304,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
dropped_locks = fdm_dropped_locks();
current->faults_disabled_mapping = NULL;
- if (kthread)
- unuse_mm(dio->mm);
/*
* If the fault handler returned an error but also signalled
@@ -2229,113 +2340,93 @@ static long bch2_dio_write_loop(struct dio_write *dio)
goto err;
}
- bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
- dio->op.end_io = bch2_dio_write_loop_async;
+ bch2_write_op_init(&dio->op, c, opts);
+ dio->op.end_io = sync
+ ? NULL
+ : bch2_dio_write_loop_async;
dio->op.target = dio->op.opts.foreground_target;
dio->op.write_point = writepoint_hashed((unsigned long) current);
dio->op.nr_replicas = dio->op.opts.data_replicas;
dio->op.subvol = inode->ei_subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+ dio->op.devs_need_flush = &inode->ei_devs_need_flush;
- if ((req->ki_flags & IOCB_DSYNC) &&
- !c->opts.journal_flush_disabled)
- dio->op.flags |= BCH_WRITE_FLUSH;
+ if (sync)
+ dio->op.flags |= BCH_WRITE_SYNC;
dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+ ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+ bio_sectors(bio), true);
+ if (unlikely(ret))
+ goto err;
+
ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
dio->op.opts.data_replicas, 0);
if (unlikely(ret) &&
- !bch2_check_range_allocated(c, inode_inum(inode),
- dio->op.pos.offset, bio_sectors(bio),
- dio->op.opts.data_replicas,
- dio->op.opts.compression != 0))
+ !bch2_dio_write_check_allocated(dio))
goto err;
task_io_account_write(bio->bi_iter.bi_size);
- if (!dio->sync && !dio->loop && dio->iter.count) {
- struct iovec *iov = dio->inline_vecs;
-
- if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
- iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
- GFP_KERNEL);
- if (unlikely(!iov)) {
- dio->sync = sync = true;
- goto do_io;
- }
-
- dio->free_iov = true;
- }
+ if (unlikely(dio->iter.count) &&
+ !dio->sync &&
+ !dio->loop &&
+ bch2_dio_write_copy_iov(dio))
+ dio->sync = sync = true;
- memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
- dio->iter.iov = iov;
- }
-do_io:
dio->loop = true;
closure_call(&dio->op.cl, bch2_write, NULL, NULL);
- if (sync)
- wait_for_completion(&dio->done);
- else
+ if (!sync)
return -EIOCBQUEUED;
-loop:
- i_sectors_acct(c, inode, &dio->quota_res,
- dio->op.i_sectors_delta);
- req->ki_pos += (u64) dio->op.written << 9;
- dio->written += dio->op.written;
- spin_lock(&inode->v.i_lock);
- if (req->ki_pos > inode->v.i_size)
- i_size_write(&inode->v, req->ki_pos);
- spin_unlock(&inode->v.i_lock);
+ bch2_dio_write_end(dio);
- bio_for_each_segment_all(bv, bio, i)
- put_page(bv->bv_page);
- bio->bi_vcnt = 0;
-
- if (dio->op.error) {
- set_bit(EI_INODE_ERROR, &inode->ei_flags);
+ if (likely(!dio->iter.count) || dio->op.error)
break;
- }
- if (!dio->iter.count)
- break;
+ bio_reset(bio, NULL, REQ_OP_WRITE);
+ }
+out:
+ return bch2_dio_write_done(dio);
+err:
+ dio->op.error = ret;
- bio_reset(bio);
- reinit_completion(&dio->done);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+ struct bvec_iter_all iter;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
}
- ret = dio->op.error ?: ((long) dio->written << 9);
-err:
- up(&c->io_in_flight);
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ goto out;
+}
- if (dio->free_iov)
- kfree(dio->iter.iov);
-
- bio_for_each_segment_all(bv, bio, i)
- put_page(bv->bv_page);
- bio_put(bio);
+static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
+{
+ struct mm_struct *mm = dio->mm;
- /* inode->i_dio_count is our ref on inode and thus bch_fs */
- inode_dio_end(&inode->v);
+ bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
- if (!sync) {
- req->ki_complete(req, ret, 0);
- ret = -EIOCBQUEUED;
- }
- return ret;
+ if (mm)
+ kthread_use_mm(mm);
+ bch2_dio_write_loop(dio);
+ if (mm)
+ kthread_unuse_mm(mm);
}
static void bch2_dio_write_loop_async(struct bch_write_op *op)
{
struct dio_write *dio = container_of(op, struct dio_write, op);
- if (dio->sync)
- complete(&dio->done);
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ bch2_dio_write_done(dio);
else
- bch2_dio_write_loop(dio);
+ bch2_dio_write_continue(dio);
}
static noinline
@@ -2373,7 +2464,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
goto err;
inode_dio_begin(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
extending = req->ki_pos + iter->count > inode->v.i_size;
if (!extending) {
@@ -2381,30 +2472,33 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
locked = false;
}
- bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_WRITE,
+ GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
- init_completion(&dio->done);
dio->req = req;
+ dio->mapping = mapping;
+ dio->inode = inode;
dio->mm = current->mm;
dio->loop = false;
+ dio->extending = extending;
dio->sync = is_sync_kiocb(req) || extending;
+ dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
dio->free_iov = false;
dio->quota_res.sectors = 0;
dio->written = 0;
dio->iter = *iter;
+ dio->op.c = c;
- ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
- iter->count >> 9, true);
- if (unlikely(ret))
- goto err_put_bio;
-
- ret = write_invalidate_inode_pages_range(mapping,
- req->ki_pos,
- req->ki_pos + iter->count - 1);
- if (unlikely(ret))
- goto err_put_bio;
+ if (unlikely(mapping->nrpages)) {
+ ret = write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter->count - 1);
+ if (unlikely(ret))
+ goto err_put_bio;
+ }
ret = bch2_dio_write_loop(dio);
err:
@@ -2412,8 +2506,7 @@ err:
inode_unlock(&inode->v);
return ret;
err_put_bio:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
goto err;
@@ -2425,8 +2518,10 @@ ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct bch_inode_info *inode = file_bch_inode(file);
ssize_t ret;
- if (iocb->ki_flags & IOCB_DIRECT)
- return bch2_direct_write(iocb, from);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = bch2_direct_write(iocb, from);
+ goto out;
+ }
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(&inode->v);
@@ -2453,8 +2548,8 @@ unlock:
if (ret > 0)
ret = generic_write_sync(iocb, ret);
-
- return ret;
+out:
+ return bch2_err_class(ret);
}
/* fsync: */
@@ -2463,19 +2558,21 @@ unlock:
* inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
* insert trigger: look up the btree inode instead
*/
-static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
- struct bch_inode_unpacked inode;
+ struct bch_inode_unpacked u;
int ret;
if (c->opts.journal_flush_disabled)
return 0;
- ret = bch2_inode_find_by_inum(c, inum, &inode);
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
if (ret)
return ret;
- return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -2486,9 +2583,9 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = file_write_and_wait_range(file, start, end);
ret2 = sync_inode_metadata(&inode->v, 1);
- ret3 = bch2_flush_inode(c, inode_inum(inode));
+ ret3 = bch2_flush_inode(c, inode);
- return ret ?: ret2 ?: ret3;
+ return bch2_err_class(ret ?: ret2 ?: ret3);
}
/* truncate: */
@@ -2510,19 +2607,15 @@ retry:
if (ret)
goto err;
- for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
- break;
-
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
if (bkey_extent_is_data(k.k)) {
ret = 1;
break;
}
- }
start = iter.pos;
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
@@ -2558,8 +2651,8 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
* page
*/
ret = range_has_data(c, inode->ei_subvol,
- POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
- POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0)
return ret;
@@ -2622,7 +2715,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
* redirty the full page:
*/
page_mkclean(page);
- __set_page_dirty_nobuffers(page);
+ filemap_dirty_folio(mapping, page_folio(page));
unlock:
unlock_page(page);
put_page(page);
@@ -2669,7 +2762,7 @@ static int bch2_extend(struct user_namespace *mnt_userns,
truncate_setsize(&inode->v, iattr->ia_size);
- return bch2_setattr_nonsize(inode, iattr);
+ return bch2_setattr_nonsize(mnt_userns, inode, iattr);
}
static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
@@ -2714,7 +2807,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
}
inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
if (ret)
@@ -2729,8 +2822,10 @@ int bch2_truncate(struct user_namespace *mnt_userns,
if (ret)
goto err;
- WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
- inode->v.i_size < inode_u.bi_size);
+ WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+ inode->v.i_size < inode_u.bi_size,
+ "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+ (u64) inode->v.i_size, inode_u.bi_size);
if (iattr->ia_size > inode->v.i_size) {
ret = bch2_extend(mnt_userns, inode, &inode_u, iattr);
@@ -2779,9 +2874,11 @@ int bch2_truncate(struct user_namespace *mnt_userns,
U64_MAX, &i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta);
- WARN_ON(!inode->v.i_size && inode->v.i_blocks &&
- !bch2_journal_error(&c->journal));
-
+ bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
+ !bch2_journal_error(&c->journal), c,
+ "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks,
+ inode->ei_inode.bi_sectors);
if (unlikely(ret))
goto err;
@@ -2789,10 +2886,10 @@ int bch2_truncate(struct user_namespace *mnt_userns,
ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
mutex_unlock(&inode->ei_update_lock);
- ret = bch2_setattr_nonsize(inode, iattr);
+ ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
err:
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- return ret;
+ bch2_pagecache_block_put(inode);
+ return bch2_err_class(ret);
}
/* fallocate: */
@@ -2823,7 +2920,7 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len
truncate_pagecache_range(&inode->v, offset, end - 1);
- if (block_start < block_end ) {
+ if (block_start < block_end) {
s64 i_sectors_delta = 0;
ret = bch2_fpunch(c, inode_inum(inode),
@@ -2910,7 +3007,8 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
bch2_trans_copy_iter(&dst, &src);
bch2_trans_copy_iter(&del, &src);
- while (ret == 0 || ret == -EINTR) {
+ while (ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
@@ -2936,7 +3034,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
k = insert
? bch2_btree_iter_peek_prev(&src)
- : bch2_btree_iter_peek(&src);
+ : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
if ((ret = bkey_err(k)))
continue;
@@ -2944,13 +3042,13 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
break;
if (insert &&
- bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
+ bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
break;
reassemble:
bch2_bkey_buf_reassemble(&copy, c, k);
if (insert &&
- bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
+ bkey_lt(bkey_start_pos(k.k), move_pos))
bch2_cut_front(move_pos, copy.k);
copy.k->k.p.offset += shift >> 9;
@@ -2960,7 +3058,7 @@ reassemble:
if (ret)
continue;
- if (bkey_cmp(atomic_end, copy.k->k.p)) {
+ if (!bkey_eq(atomic_end, copy.k->k.p)) {
if (insert) {
move_pos = atomic_end;
move_pos.offset -= shift >> 9;
@@ -2978,13 +3076,7 @@ reassemble:
next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
- if (copy.k->k.size == k.k->size) {
- /*
- * If we're moving the entire extent, we can skip
- * running triggers:
- */
- trigger_flags |= BTREE_TRIGGER_NORUN;
- } else {
+ if (copy.k->k.size != k.k->size) {
/* We might end up splitting compressed extents: */
unsigned nr_ptrs =
bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
@@ -3035,20 +3127,19 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
struct btree_trans trans;
struct btree_iter iter;
struct bpos end_pos = POS(inode->v.i_ino, end_sector);
- unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
+ struct bch_io_opts opts;
int ret = 0;
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
POS(inode->v.i_ino, start_sector),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
+ while (!ret && bkey_lt(iter.pos, end_pos)) {
s64 i_sectors_delta = 0;
- struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
- struct bkey_i_reservation reservation;
struct bkey_s_c k;
unsigned sectors;
u32 snapshot;
@@ -3067,8 +3158,8 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
goto bkey_err;
/* already reserved */
- if (k.k->type == KEY_TYPE_reservation &&
- bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
+ if (bkey_extent_is_reservation(k) &&
+ bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
bch2_btree_iter_advance(&iter);
continue;
}
@@ -3079,16 +3170,12 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
continue;
}
- bkey_reservation_init(&reservation.k_i);
- reservation.k.type = KEY_TYPE_reservation;
- reservation.k.p = k.k->p;
- reservation.k.size = k.k->size;
-
- bch2_cut_front(iter.pos, &reservation.k_i);
- bch2_cut_back(end_pos, &reservation.k_i);
+ /*
+ * XXX: for nocow mode, we should promote shared extents to
+ * unshared here
+ */
- sectors = reservation.k.size;
- reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
+ sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
if (!bkey_extent_is_allocation(k.k)) {
ret = bch2_quota_reservation_add(c, inode,
@@ -3098,34 +3185,23 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
goto bkey_err;
}
- if (reservation.v.nr_replicas < replicas ||
- bch2_bkey_sectors_compressed(k)) {
- ret = bch2_disk_reservation_get(c, &disk_res, sectors,
- replicas, 0);
- if (unlikely(ret))
- goto bkey_err;
-
- reservation.v.nr_replicas = disk_res.nr_replicas;
- }
-
- ret = bch2_extent_update(&trans, inode_inum(inode), &iter,
- &reservation.k_i,
- &disk_res, NULL,
- 0, &i_sectors_delta, true);
+ ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter,
+ sectors, opts, &i_sectors_delta,
+ writepoint_hashed((unsigned long) current));
if (ret)
goto bkey_err;
+
i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
bkey_err:
bch2_quota_reservation_put(c, inode, &quota_res);
- bch2_disk_reservation_put(c, &disk_res);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
}
bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
- if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
+ if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
@@ -3176,7 +3252,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
* so that the VFS cache i_size is consistent with the btree i_size:
*/
if (ret &&
- !(ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)))
+ !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
return ret;
if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
@@ -3204,12 +3280,16 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
long ret;
- if (!percpu_ref_tryget(&c->writes))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
return -EROFS;
inode_lock(&inode->v);
inode_dio_wait(&inode->v);
- bch2_pagecache_block_get(&inode->ei_pagecache_lock);
+ bch2_pagecache_block_get(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ goto err;
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
ret = bchfs_fallocate(inode, mode, offset, len);
@@ -3221,242 +3301,65 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
-
-
- bch2_pagecache_block_put(&inode->ei_pagecache_lock);
+err:
+ bch2_pagecache_block_put(inode);
inode_unlock(&inode->v);
- percpu_ref_put(&c->writes);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
- return ret;
+ return bch2_err_class(ret);
}
-static int generic_access_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- struct inode *inode = file->f_mapping->host;
- loff_t max_size = inode->i_sb->s_maxbytes;
-
- if (!(file->f_flags & O_LARGEFILE))
- max_size = MAX_NON_LFS;
-
- if (unlikely(pos >= max_size))
- return -EFBIG;
- *count = min(*count, max_size - pos);
- return 0;
-}
-
-static int generic_write_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- loff_t limit = rlimit(RLIMIT_FSIZE);
-
- if (limit != RLIM_INFINITY) {
- if (pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- *count = min(*count, limit - pos);
- }
-
- return generic_access_check_limits(file, pos, count);
-}
-
-static int generic_remap_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *req_count, unsigned int remap_flags)
+/*
+ * Take a quota reservation for unallocated blocks in a given file range
+ * Does not check pagecache
+ */
+static int quota_reserve_range(struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 start, u64 end)
{
- struct inode *inode_in = file_in->f_mapping->host;
- struct inode *inode_out = file_out->f_mapping->host;
- uint64_t count = *req_count;
- uint64_t bcount;
- loff_t size_in, size_out;
- loff_t bs = inode_out->i_sb->s_blocksize;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ u64 sectors = end - start;
+ u64 pos = start;
int ret;
- /* The start of both ranges must be aligned to an fs block. */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
- return -EINVAL;
-
- /* Ensure offsets don't wrap. */
- if (pos_in + count < pos_in || pos_out + count < pos_out)
- return -EINVAL;
-
- size_in = i_size_read(inode_in);
- size_out = i_size_read(inode_out);
-
- /* Dedupe requires both ranges to be within EOF. */
- if ((remap_flags & REMAP_FILE_DEDUP) &&
- (pos_in >= size_in || pos_in + count > size_in ||
- pos_out >= size_out || pos_out + count > size_out))
- return -EINVAL;
-
- /* Ensure the infile range is within the infile. */
- if (pos_in >= size_in)
- return -EINVAL;
- count = min(count, size_in - (uint64_t)pos_in);
-
- ret = generic_access_check_limits(file_in, pos_in, &count);
- if (ret)
- return ret;
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
- ret = generic_write_check_limits(file_out, pos_out, &count);
+ ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot);
if (ret)
- return ret;
-
- /*
- * If the user wanted us to link to the infile's EOF, round up to the
- * next block boundary for this check.
- *
- * Otherwise, make sure the count is also block-aligned, having
- * already confirmed the starting offsets' block alignment.
- */
- if (pos_in + count == size_in) {
- bcount = ALIGN(size_in, bs) - pos_in;
- } else {
- if (!IS_ALIGNED(count, bs))
- count = ALIGN_DOWN(count, bs);
- bcount = count;
- }
-
- /* Don't allow overlapped cloning within the same file. */
- if (inode_in == inode_out &&
- pos_out + bcount > pos_in &&
- pos_out < pos_in + bcount)
- return -EINVAL;
-
- /*
- * We shortened the request but the caller can't deal with that, so
- * bounce the request back to userspace.
- */
- if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
- return -EINVAL;
-
- *req_count = count;
- return 0;
-}
-
-static int generic_remap_check_len(struct inode *inode_in,
- struct inode *inode_out,
- loff_t pos_out,
- loff_t *len,
- unsigned int remap_flags)
-{
- u64 blkmask = i_blocksize(inode_in) - 1;
- loff_t new_len = *len;
-
- if ((*len & blkmask) == 0)
- return 0;
-
- if ((remap_flags & REMAP_FILE_DEDUP) ||
- pos_out + *len < i_size_read(inode_out))
- new_len &= ~blkmask;
-
- if (new_len == *len)
- return 0;
-
- if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
- *len = new_len;
- return 0;
- }
-
- return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
-}
-
-static int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *len, unsigned int remap_flags)
-{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- bool same_inode = (inode_in == inode_out);
- int ret;
-
- /* Don't touch certain kinds of inodes */
- if (IS_IMMUTABLE(inode_out))
- return -EPERM;
-
- if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
- return -ETXTBSY;
-
- /* Don't reflink dirs, pipes, sockets... */
- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
- return -EISDIR;
- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
- return -EINVAL;
-
- /* Zero length dedupe exits immediately; reflink goes to EOF. */
- if (*len == 0) {
- loff_t isize = i_size_read(inode_in);
+ goto err;
- if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
- return 0;
- if (pos_in > isize)
- return -EINVAL;
- *len = isize - pos_in;
- if (*len == 0)
- return 0;
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(&trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+ !(ret = bkey_err(k))) {
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+ bch2_btree_iter_advance(&iter);
}
+ pos = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
- /* Check that we don't violate system file offset limits. */
- ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
- remap_flags);
- if (ret)
- return ret;
-
- /* Wait for the completion of any pending IOs on both files */
- inode_dio_wait(inode_in);
- if (!same_inode)
- inode_dio_wait(inode_out);
-
- ret = filemap_write_and_wait_range(inode_in->i_mapping,
- pos_in, pos_in + *len - 1);
- if (ret)
- return ret;
-
- ret = filemap_write_and_wait_range(inode_out->i_mapping,
- pos_out, pos_out + *len - 1);
- if (ret)
- return ret;
-
- /*
- * Check that the extents are the same.
- */
- if (remap_flags & REMAP_FILE_DEDUP) {
- bool is_same = false;
-
- ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
- inode_out, pos_out, *len, &is_same);
- if (ret)
- return ret;
- if (!is_same)
- return -EBADE;
- }
+ bch2_trans_exit(&trans);
- ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
- remap_flags);
if (ret)
return ret;
- /* If can't alter the file contents, we're done. */
- if (!(remap_flags & REMAP_FILE_DEDUP)) {
- /* Update the timestamps, since we can alter file contents. */
- if (!(file_out->f_mode & FMODE_NOCMTIME)) {
- ret = file_update_time(file_out);
- if (ret)
- return ret;
- }
-
- /*
- * Clear the security bits if the process is not being run by
- * root. This keeps people from modifying setuid and setgid
- * binaries.
- */
- ret = file_remove_privs(file_out);
- if (ret)
- return ret;
- }
-
- return 0;
+ return bch2_quota_reservation_add(c, inode, res, sectors, true);
}
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
@@ -3466,6 +3369,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct bch_inode_info *src = file_bch_inode(file_src);
struct bch_inode_info *dst = file_bch_inode(file_dst);
struct bch_fs *c = src->v.i_sb->s_fs_info;
+ struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
u64 aligned_len;
loff_t ret = 0;
@@ -3486,8 +3390,6 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- file_update_time(file_dst);
-
inode_dio_wait(&src->v);
inode_dio_wait(&dst->v);
@@ -3504,6 +3406,13 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
if (ret)
goto err;
+ ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
+ (pos_dst + aligned_len) >> 9);
+ if (ret)
+ goto err;
+
+ file_update_time(file_dst);
+
mark_pagecache_unallocated(src, pos_src >> 9,
(pos_src + aligned_len) >> 9);
@@ -3520,8 +3429,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
*/
ret = min((u64) ret << 9, (u64) len);
- /* XXX get a quota reservation */
- i_sectors_acct(c, dst, NULL, i_sectors_delta);
+ i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
spin_lock(&dst->v.i_lock);
if (pos_dst + ret > dst->v.i_size)
@@ -3530,18 +3438,19 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
IS_SYNC(file_inode(file_dst)))
- ret = bch2_flush_inode(c, inode_inum(dst));
+ ret = bch2_flush_inode(c, dst);
err:
+ bch2_quota_reservation_put(c, dst, &quota_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
- return ret;
+ return bch2_err_class(ret);
}
/* fseek: */
-static int page_data_offset(struct page *page, unsigned offset)
+static int folio_data_offset(struct folio *folio, unsigned offset)
{
- struct bch_page_state *s = bch2_page_state(page);
+ struct bch_page_state *s = bch2_page_state(&folio->page);
unsigned i;
if (s)
@@ -3556,36 +3465,38 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
loff_t end_offset)
{
- struct address_space *mapping = vinode->i_mapping;
- struct page *page;
+ struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
pgoff_t end_index = end_offset >> PAGE_SHIFT;
pgoff_t index = start_index;
+ unsigned i;
loff_t ret;
int offset;
- while (index <= end_index) {
- if (find_get_pages_range(mapping, &index, end_index, 1, &page)) {
- lock_page(page);
+ folio_batch_init(&fbatch);
- offset = page_data_offset(page,
- page->index == start_index
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ folio_lock(folio);
+ offset = folio_data_offset(folio,
+ folio->index == start_index
? start_offset & (PAGE_SIZE - 1)
: 0);
if (offset >= 0) {
- ret = clamp(((loff_t) page->index << PAGE_SHIFT) +
+ ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
offset,
start_offset, end_offset);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
return ret;
}
-
- unlock_page(page);
- put_page(page);
- } else {
- break;
+ folio_unlock(folio);
}
+ folio_batch_release(&fbatch);
+ cond_resched();
}
return end_offset;
@@ -3615,11 +3526,11 @@ retry:
if (ret)
goto err;
- for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
- if (k.k->p.inode != inode->v.i_ino) {
- break;
- } else if (bkey_extent_is_data(k.k)) {
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ POS(inode->v.i_ino, U64_MAX),
+ 0, k, ret) {
+ if (bkey_extent_is_data(k.k)) {
next_data = max(offset, bkey_start_offset(k.k) << 9);
break;
} else if (k.k->p.offset >> 9 > isize)
@@ -3627,7 +3538,7 @@ retry:
}
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
@@ -3742,7 +3653,7 @@ retry:
}
bch2_trans_iter_exit(&trans, &iter);
err:
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
@@ -3757,22 +3668,31 @@ err:
loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
{
+ loff_t ret;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
case SEEK_END:
- return generic_file_llseek(file, offset, whence);
+ ret = generic_file_llseek(file, offset, whence);
+ break;
case SEEK_DATA:
- return bch2_seek_data(file, offset);
+ ret = bch2_seek_data(file, offset);
+ break;
case SEEK_HOLE:
- return bch2_seek_hole(file, offset);
+ ret = bch2_seek_hole(file, offset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return -EINVAL;
+ return bch2_err_class(ret);
}
void bch2_fs_fsio_exit(struct bch_fs *c)
{
+ bioset_exit(&c->nocow_flush_bioset);
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
bioset_exit(&c->writepage_bioset);
@@ -3786,14 +3706,22 @@ int bch2_fs_fsio_init(struct bch_fs *c)
if (bioset_init(&c->writepage_bioset,
4, offsetof(struct bch_writepage_io, op.wbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_read_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+ if (bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio),
- BIOSET_NEED_BVECS) ||
- bioset_init(&c->dio_write_bioset,
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+ if (bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS))
- ret = -ENOMEM;
+ return -BCH_ERR_ENOMEM_dio_write_bioset_init;
+
+ if (bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
+ return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
pr_verbose_init(c->opts, "ret %i", ret);
return ret;