summaryrefslogtreecommitdiff
path: root/fs/bcachefs/reflink.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-08-30 15:18:31 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:11 -0400
commit67e0dd8f0d8b4bf09098c4692abcb43a20089dff (patch)
tree8ba50f2d86b09cae23a39a02982abff3524e2f45 /fs/bcachefs/reflink.c
parent8f54337dc6825f323f7761c182d98efdd180ce70 (diff)
bcachefs: btree_path
This splits btree_iter into two components: btree_iter is now the externally visible componont, and it points to a btree_path which is now reference counted. This means we no longer have to clone iterators up front if they might be mutated - btree_path can be shared by multiple iterators, and cloned if an iterator would mutate a shared btree_path. This will help us use iterators more efficiently, as well as slimming down the main long lived state in btree_trans, and significantly cleans up the logic for iterator lifetimes. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/reflink.c')
-rw-r--r--fs/bcachefs/reflink.c76
1 files changed, 37 insertions, 39 deletions
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 3d9c5c5b0eba..576cfbccf5b5 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -116,7 +116,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
struct bkey_i *orig)
{
struct bch_fs *c = trans->c;
- struct btree_iter *reflink_iter;
+ struct btree_iter reflink_iter = { NULL };
struct bkey_s_c k;
struct bkey_i *r_v;
struct bkey_i_reflink_p *r_p;
@@ -129,8 +129,8 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
for_each_btree_key(trans, reflink_iter, BTREE_ID_reflink,
POS(0, c->reflink_hint),
BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
- if (reflink_iter->pos.inode) {
- bch2_btree_iter_set_pos(reflink_iter, POS_MIN);
+ if (reflink_iter.pos.inode) {
+ bch2_btree_iter_set_pos(&reflink_iter, POS_MIN);
continue;
}
@@ -142,7 +142,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
goto err;
/* rewind iter to start of hole, if necessary: */
- bch2_btree_iter_set_pos_to_extent_start(reflink_iter);
+ bch2_btree_iter_set_pos_to_extent_start(&reflink_iter);
r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
ret = PTR_ERR_OR_ZERO(r_v);
@@ -151,7 +151,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bkey_init(&r_v->k);
r_v->k.type = bkey_type_to_indirect(&orig->k);
- r_v->k.p = reflink_iter->pos;
+ r_v->k.p = reflink_iter.pos;
bch2_key_resize(&r_v->k, orig->k.size);
r_v->k.version = orig->k.version;
@@ -161,7 +161,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
*refcount = 0;
memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
- ret = bch2_trans_update(trans, reflink_iter, r_v, 0);
+ ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
if (ret)
goto err;
@@ -172,9 +172,8 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, 0);
err:
- if (!IS_ERR(reflink_iter))
- c->reflink_hint = reflink_iter->pos.offset;
- bch2_trans_iter_put(trans, reflink_iter);
+ c->reflink_hint = reflink_iter.pos.offset;
+ bch2_trans_iter_exit(trans, &reflink_iter);
return ret;
}
@@ -184,7 +183,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
struct bkey_s_c k;
int ret;
- for_each_btree_key_continue(iter, 0, k, ret) {
+ for_each_btree_key_continue(*iter, 0, k, ret) {
if (bkey_cmp(iter->pos, end) >= 0)
break;
@@ -203,7 +202,7 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 new_i_size, s64 *i_sectors_delta)
{
struct btree_trans trans;
- struct btree_iter *dst_iter, *src_iter;
+ struct btree_iter dst_iter, src_iter;
struct bkey_s_c src_k;
struct bkey_buf new_dst, new_src;
struct bpos dst_end = dst_start, src_end = src_start;
@@ -223,13 +222,13 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_bkey_buf_init(&new_src);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
- src_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, src_start,
- BTREE_ITER_INTENT);
- dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, dst_start,
- BTREE_ITER_INTENT);
+ bch2_trans_iter_init(&trans, &src_iter, BTREE_ID_extents, src_start,
+ BTREE_ITER_INTENT);
+ bch2_trans_iter_init(&trans, &dst_iter, BTREE_ID_extents, dst_start,
+ BTREE_ITER_INTENT);
while ((ret == 0 || ret == -EINTR) &&
- bkey_cmp(dst_iter->pos, dst_end) < 0) {
+ bkey_cmp(dst_iter.pos, dst_end) < 0) {
struct disk_reservation disk_res = { 0 };
bch2_trans_begin(&trans);
@@ -239,31 +238,31 @@ s64 bch2_remap_range(struct bch_fs *c,
break;
}
- dst_done = dst_iter->pos.offset - dst_start.offset;
+ dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(src_iter, src_want);
+ bch2_btree_iter_set_pos(&src_iter, src_want);
- src_k = get_next_src(src_iter, src_end);
+ src_k = get_next_src(&src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
continue;
- if (bkey_cmp(src_want, src_iter->pos) < 0) {
- ret = bch2_fpunch_at(&trans, dst_iter,
+ if (bkey_cmp(src_want, src_iter.pos) < 0) {
+ ret = bch2_fpunch_at(&trans, &dst_iter,
bpos_min(dst_end,
- POS(dst_iter->pos.inode, dst_iter->pos.offset +
- src_iter->pos.offset - src_want.offset)),
+ POS(dst_iter.pos.inode, dst_iter.pos.offset +
+ src_iter.pos.offset - src_want.offset)),
journal_seq, i_sectors_delta);
continue;
}
if (src_k.k->type != KEY_TYPE_reflink_p) {
- bch2_btree_iter_set_pos_to_extent_start(src_iter);
+ bch2_btree_iter_set_pos_to_extent_start(&src_iter);
bch2_bkey_buf_reassemble(&new_src, c, src_k);
src_k = bkey_i_to_s_c(new_src.k);
- ret = bch2_make_extent_indirect(&trans, src_iter,
+ ret = bch2_make_extent_indirect(&trans, &src_iter,
new_src.k);
if (ret)
continue;
@@ -286,43 +285,42 @@ s64 bch2_remap_range(struct bch_fs *c,
BUG();
}
- new_dst.k->k.p = dst_iter->pos;
+ new_dst.k->k.p = dst_iter.pos;
bch2_key_resize(&new_dst.k->k,
min(src_k.k->p.offset - src_want.offset,
- dst_end.offset - dst_iter->pos.offset));
- ret = bch2_extent_update(&trans, dst_iter, new_dst.k,
+ dst_end.offset - dst_iter.pos.offset));
+ ret = bch2_extent_update(&trans, &dst_iter, new_dst.k,
&disk_res, journal_seq,
new_i_size, i_sectors_delta,
true);
bch2_disk_reservation_put(c, &disk_res);
}
- bch2_trans_iter_put(&trans, dst_iter);
- bch2_trans_iter_put(&trans, src_iter);
+ bch2_trans_iter_exit(&trans, &dst_iter);
+ bch2_trans_iter_exit(&trans, &src_iter);
- BUG_ON(!ret && bkey_cmp(dst_iter->pos, dst_end));
- BUG_ON(bkey_cmp(dst_iter->pos, dst_end) > 0);
+ BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end));
+ BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0);
- dst_done = dst_iter->pos.offset - dst_start.offset;
- new_i_size = min(dst_iter->pos.offset << 9, new_i_size);
+ dst_done = dst_iter.pos.offset - dst_start.offset;
+ new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
do {
struct bch_inode_unpacked inode_u;
- struct btree_iter *inode_iter;
+ struct btree_iter inode_iter = { NULL };
bch2_trans_begin(&trans);
- inode_iter = bch2_inode_peek(&trans, &inode_u,
+ ret2 = bch2_inode_peek(&trans, &inode_iter, &inode_u,
dst_start.inode, BTREE_ITER_INTENT);
- ret2 = PTR_ERR_OR_ZERO(inode_iter);
if (!ret2 &&
inode_u.bi_size < new_i_size) {
inode_u.bi_size = new_i_size;
- ret2 = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
+ ret2 = bch2_inode_write(&trans, &inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL, journal_seq, 0);
}
- bch2_trans_iter_put(&trans, inode_iter);
+ bch2_trans_iter_exit(&trans, &inode_iter);
} while (ret2 == -EINTR);
ret = bch2_trans_exit(&trans) ?: ret;