summaryrefslogtreecommitdiff
path: root/libbcachefs/fs-io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-07-10 20:31:34 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-07-15 17:36:15 -0400
commitc8bec83e307f28751c433ba1d3f648429fb5a34c (patch)
tree6e70e0cf8f25117f706214d86a0689ee8495dca0 /libbcachefs/fs-io.c
parent1c156d5c4667c1c2e2949b229dfef75696196d35 (diff)
Update bcachefs sources to e14d7c7195 bcachefs: Compression levels
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'libbcachefs/fs-io.c')
-rw-r--r--libbcachefs/fs-io.c85
1 files changed, 62 insertions, 23 deletions
diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c
index a8060052..6b691b2b 100644
--- a/libbcachefs/fs-io.c
+++ b/libbcachefs/fs-io.c
@@ -35,6 +35,8 @@
#include <trace/events/writeback.h>
+static void bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned);
+
struct folio_vec {
struct folio *fv_folio;
size_t fv_offset;
@@ -1972,7 +1974,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
darray_for_each(folios, fi) {
struct folio *f = *fi;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
- unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
+ unsigned f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
if (!f_copied) {
folios_trunc(&folios, fi);
@@ -3373,6 +3375,8 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
struct quota_res quota_res = { 0 };
struct bkey_s_c k;
unsigned sectors;
+ bool is_allocation;
+ u64 hole_start, hole_end;
u32 snapshot;
bch2_trans_begin(&trans);
@@ -3388,6 +3392,10 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if ((ret = bkey_err(k)))
goto bkey_err;
+ hole_start = iter.pos.offset;
+ hole_end = bpos_min(k.k->p, end_pos).offset;
+ is_allocation = bkey_extent_is_allocation(k.k);
+
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
@@ -3401,17 +3409,26 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
continue;
}
- /*
- * XXX: for nocow mode, we should promote shared extents to
- * unshared here
- */
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ ret = drop_locks_do(&trans,
+ (bch2_clamp_data_hole(&inode->v,
+ &hole_start,
+ &hole_end,
+ opts.data_replicas), 0));
+ bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+
+ if (ret)
+ goto bkey_err;
+
+ if (hole_start == hole_end)
+ continue;
+ }
- sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
+ sectors = hole_end - hole_start;
- if (!bkey_extent_is_allocation(k.k)) {
+ if (!is_allocation) {
ret = bch2_quota_reservation_add(c, inode,
- &quota_res,
- sectors, true);
+ &quota_res, sectors, true);
if (unlikely(ret))
goto bkey_err;
}
@@ -3423,15 +3440,15 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
goto bkey_err;
i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
+
+ drop_locks_do(&trans,
+ (mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
bkey_err:
bch2_quota_reservation_put(c, inode, &quota_res);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
}
- bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
- mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
-
if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
@@ -3679,14 +3696,16 @@ err:
/* fseek: */
-static int folio_data_offset(struct folio *folio, loff_t pos)
+static int folio_data_offset(struct folio *folio, loff_t pos,
+ unsigned min_replicas)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
if (s)
for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
- if (s->s[i].state >= SECTOR_dirty)
+ if (s->s[i].state >= SECTOR_dirty &&
+ s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
return i << SECTOR_SHIFT;
return -1;
@@ -3694,7 +3713,8 @@ static int folio_data_offset(struct folio *folio, loff_t pos)
static loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
@@ -3713,7 +3733,8 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
folio_lock(folio);
offset = folio_data_offset(folio,
- max(folio_pos(folio), start_offset));
+ max(folio_pos(folio), start_offset),
+ min_replicas);
if (offset >= 0) {
ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
@@ -3775,7 +3796,7 @@ err:
if (next_data > offset)
next_data = bch2_seek_pagecache_data(&inode->v,
- offset, next_data);
+ offset, next_data, 0);
if (next_data >= isize)
return -ENXIO;
@@ -3783,7 +3804,8 @@ err:
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
-static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
+static bool folio_hole_offset(struct address_space *mapping, loff_t *offset,
+ unsigned min_replicas)
{
struct folio *folio;
struct bch_folio *s;
@@ -3800,7 +3822,8 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
sectors = folio_sectors(folio);
for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
- if (s->s[i].state < SECTOR_dirty) {
+ if (s->s[i].state < SECTOR_dirty ||
+ s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
*offset = max(*offset,
folio_pos(folio) + (i << SECTOR_SHIFT));
goto unlock;
@@ -3815,18 +3838,34 @@ unlock:
static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
loff_t start_offset,
- loff_t end_offset)
+ loff_t end_offset,
+ unsigned min_replicas)
{
struct address_space *mapping = vinode->i_mapping;
loff_t offset = start_offset;
while (offset < end_offset &&
- !folio_hole_offset(mapping, &offset))
+ !folio_hole_offset(mapping, &offset, min_replicas))
;
return min(offset, end_offset);
}
+static void bch2_clamp_data_hole(struct inode *inode,
+ u64 *hole_start,
+ u64 *hole_end,
+ unsigned min_replicas)
+{
+ *hole_start = bch2_seek_pagecache_hole(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+
+ if (*hole_start == *hole_end)
+ return;
+
+ *hole_end = bch2_seek_pagecache_data(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas) >> 9;
+}
+
static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
@@ -3856,12 +3895,12 @@ retry:
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE);
+ offset, MAX_LFS_FILESIZE, 0);
break;
} else if (!bkey_extent_is_data(k.k)) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9);
+ k.k->p.offset << 9, 0);
if (next_hole < k.k->p.offset << 9)
break;