summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/acl.c14
-rw-r--r--fs/bcachefs/alloc_background.c152
-rw-r--r--fs/bcachefs/alloc_foreground.c14
-rw-r--r--fs/bcachefs/backpointers.c39
-rw-r--r--fs/bcachefs/bcachefs.h7
-rw-r--r--fs/bcachefs/btree_cache.c2
-rw-r--r--fs/bcachefs/btree_gc.c40
-rw-r--r--fs/bcachefs/btree_io.c26
-rw-r--r--fs/bcachefs/btree_iter.c237
-rw-r--r--fs/bcachefs/btree_iter.h278
-rw-r--r--fs/bcachefs/btree_key_cache.c60
-rw-r--r--fs/bcachefs/btree_node_scan.c23
-rw-r--r--fs/bcachefs/btree_trans_commit.c29
-rw-r--r--fs/bcachefs/btree_types.h15
-rw-r--r--fs/bcachefs/btree_update.c113
-rw-r--r--fs/bcachefs/btree_update.h4
-rw-r--r--fs/bcachefs/btree_update_interior.c67
-rw-r--r--fs/bcachefs/btree_write_buffer.c23
-rw-r--r--fs/bcachefs/buckets.c8
-rw-r--r--fs/bcachefs/data_update.c14
-rw-r--r--fs/bcachefs/dirent.c39
-rw-r--r--fs/bcachefs/disk_accounting.c5
-rw-r--r--fs/bcachefs/ec.c25
-rw-r--r--fs/bcachefs/errcode.h8
-rw-r--r--fs/bcachefs/extent_update.c10
-rw-r--r--fs/bcachefs/extents.c18
-rw-r--r--fs/bcachefs/extents.h2
-rw-r--r--fs/bcachefs/fast_list.h2
-rw-r--r--fs/bcachefs/fs-io-buffered.c10
-rw-r--r--fs/bcachefs/fs-io-direct.c4
-rw-r--r--fs/bcachefs/fs-io-pagecache.c2
-rw-r--r--fs/bcachefs/fs-io.c18
-rw-r--r--fs/bcachefs/fs.c54
-rw-r--r--fs/bcachefs/fsck.c185
-rw-r--r--fs/bcachefs/inode.c53
-rw-r--r--fs/bcachefs/io_misc.c37
-rw-r--r--fs/bcachefs/io_read.c35
-rw-r--r--fs/bcachefs/io_read.h4
-rw-r--r--fs/bcachefs/io_write.c32
-rw-r--r--fs/bcachefs/journal.c12
-rw-r--r--fs/bcachefs/journal_io.c17
-rw-r--r--fs/bcachefs/logged_ops.c2
-rw-r--r--fs/bcachefs/lru.c14
-rw-r--r--fs/bcachefs/migrate.c10
-rw-r--r--fs/bcachefs/move.c159
-rw-r--r--fs/bcachefs/move.h12
-rw-r--r--fs/bcachefs/movinggc.c2
-rw-r--r--fs/bcachefs/namei.c112
-rw-r--r--fs/bcachefs/progress.c3
-rw-r--r--fs/bcachefs/progress.h3
-rw-r--r--fs/bcachefs/quota.c4
-rw-r--r--fs/bcachefs/rebalance.c88
-rw-r--r--fs/bcachefs/recovery.c19
-rw-r--r--fs/bcachefs/recovery_passes.c2
-rw-r--r--fs/bcachefs/recovery_passes.h6
-rw-r--r--fs/bcachefs/reflink.c52
-rw-r--r--fs/bcachefs/sb-members_format.h2
-rw-r--r--fs/bcachefs/snapshot.c155
-rw-r--r--fs/bcachefs/snapshot.h30
-rw-r--r--fs/bcachefs/str_hash.c11
-rw-r--r--fs/bcachefs/str_hash.h24
-rw-r--r--fs/bcachefs/subvolume.c27
-rw-r--r--fs/bcachefs/subvolume.h20
-rw-r--r--fs/bcachefs/super-io.c4
-rw-r--r--fs/bcachefs/super.c10
-rw-r--r--fs/bcachefs/sysfs.c8
-rw-r--r--fs/bcachefs/tests.c146
-rw-r--r--fs/bcachefs/xattr.c6
68 files changed, 1356 insertions, 1312 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 307824d6eccb..3befa1f36e72 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -138,8 +138,8 @@ static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
acl = allocate_dropping_locks(trans, ret,
posix_acl_alloc(count, _gfp));
- if (!acl)
- return ERR_PTR(-ENOMEM);
+ if (!acl && !ret)
+ ret = bch_err_throw(trans->c, ENOMEM_acl);
if (ret) {
kfree(acl);
return ERR_PTR(ret);
@@ -273,7 +273,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_iter iter = {};
+ struct btree_iter iter = { NULL };
struct posix_acl *acl = NULL;
if (rcu)
@@ -303,7 +303,7 @@ err:
if (!IS_ERR_OR_NULL(acl))
set_cached_acl(&inode->v, type, acl);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return acl;
}
@@ -343,7 +343,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter inode_iter = {};
+ struct btree_iter inode_iter = { NULL };
struct bch_inode_unpacked inode_u;
struct posix_acl *acl;
umode_t mode;
@@ -379,7 +379,7 @@ retry:
ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
btree_err:
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
@@ -431,7 +431,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
*new_acl = acl;
acl = NULL;
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (!IS_ERR_OR_NULL(acl))
kfree(acl);
return ret;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 4c1604fd80f9..1c2cd841e8a0 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -20,6 +20,7 @@
#include "enumerated_ref.h"
#include "error.h"
#include "lru.h"
+#include "progress.h"
#include "recovery.h"
#include "varint.h"
@@ -337,9 +338,10 @@ void bch2_alloc_v4_swab(struct bkey_s k)
}
static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c,
- unsigned dev, const struct bch_alloc_v4 *a)
+ struct bkey_s_c k,
+ const struct bch_alloc_v4 *a)
{
- struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL;
+ struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, k.k->p.inode) : NULL;
prt_newline(out);
printbuf_indent_add(out, 2);
@@ -348,11 +350,14 @@ static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *
bch2_prt_data_type(out, a->data_type);
prt_newline(out);
prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty);
- prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty);
+ if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, journal_seq_empty))
+ prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty);
+
prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
- prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
+ if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, stripe_sectors))
+ prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
prt_printf(out, "stripe %u\n", a->stripe);
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
@@ -372,12 +377,12 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
- __bch2_alloc_v4_to_text(out, c, k.k->p.inode, a);
+ __bch2_alloc_v4_to_text(out, c, k, a);
}
void bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
- __bch2_alloc_v4_to_text(out, c, k.k->p.inode, bkey_s_c_to_alloc_v4(k).v);
+ __bch2_alloc_v4_to_text(out, c, k, bkey_s_c_to_alloc_v4(k).v);
}
void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
@@ -385,7 +390,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
if (k.k->type == KEY_TYPE_alloc_v4) {
void *src, *dst;
- *out = *bkey_s_c_to_alloc_v4(k).v;
+ bkey_val_copy(out, bkey_s_c_to_alloc_v4(k));
src = alloc_v4_backpointers(out);
SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
@@ -482,7 +487,7 @@ bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_i
goto err;
return a;
err:
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ERR_PTR(ret);
}
@@ -501,18 +506,18 @@ struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans,
if ((void *) k.v >= trans->mem &&
(void *) k.v < trans->mem + trans->mem_top) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v);
}
struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
if (IS_ERR(a)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return a;
}
ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return unlikely(ret) ? ERR_PTR(ret) : a;
}
@@ -635,7 +640,7 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -656,17 +661,17 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
}
if (k.k->p.offset < ca->mi.first_bucket) {
- bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket));
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
continue;
}
if (k.k->p.offset >= ca->mi.nbuckets) {
- bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -752,7 +757,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
ret = bch2_btree_bit_mod_iter(trans, &iter, set);
fsck_err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -788,7 +793,7 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
g->v.gens[offset] = gen;
ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1039,10 +1044,9 @@ invalid_bucket:
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
* extents style btrees, but works on non-extents btrees:
*/
-static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
{
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
if (bkey_err(k))
return k;
@@ -1053,9 +1057,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt
struct btree_iter iter2;
struct bpos next;
- bch2_trans_copy_iter(trans, &iter2, iter);
+ bch2_trans_copy_iter(&iter2, iter);
- struct btree_path *path = btree_iter_path(trans, iter);
+ struct btree_path *path = btree_iter_path(iter->trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
@@ -1065,9 +1069,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt
* btree node min/max is a closed interval, upto takes a half
* open interval:
*/
- k = bch2_btree_iter_peek_max(trans, &iter2, end);
+ k = bch2_btree_iter_peek_max(&iter2, end);
next = iter2.pos;
- bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_iter_exit(&iter2);
BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
@@ -1107,14 +1111,13 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
return *ca != NULL;
}
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bch_dev **ca, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
+ struct bch_dev **ca, struct bkey *hole)
{
- struct bch_fs *c = trans->c;
+ struct bch_fs *c = iter->trans->c;
struct bkey_s_c k;
again:
- k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole);
+ k = bch2_get_key_or_hole(iter, POS_MAX, hole);
if (bkey_err(k))
return k;
@@ -1127,7 +1130,7 @@ again:
if (!next_bucket(c, ca, &hole_start))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(trans, iter, hole_start);
+ bch2_btree_iter_set_pos(iter, hole_start);
goto again;
}
@@ -1168,8 +1171,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
a = bch2_alloc_to_v4(alloc_k, &a_convert);
- bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p);
- k = bch2_btree_iter_peek_slot(trans, discard_iter);
+ bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
+ k = bch2_btree_iter_peek_slot(discard_iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1182,8 +1185,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
return ret;
}
- bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
- k = bch2_btree_iter_peek_slot(trans, freespace_iter);
+ bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+ k = bch2_btree_iter_peek_slot(freespace_iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1196,8 +1199,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
return ret;
}
- bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
- k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1246,9 +1249,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (!ca->mi.freespace_initialized)
return 0;
- bch2_btree_iter_set_pos(trans, freespace_iter, start);
+ bch2_btree_iter_set_pos(freespace_iter, start);
- k = bch2_btree_iter_peek_slot(trans, freespace_iter);
+ k = bch2_btree_iter_peek_slot(freespace_iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1294,9 +1297,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
unsigned i, gens_offset, gens_end_offset;
int ret;
- bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
- k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1360,7 +1363,7 @@ static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct
ret = k.k->type != KEY_TYPE_set
? __bch2_check_discard_freespace_key(trans, &iter, &gen, FSCK_ERR_SILENT)
: 0;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1431,8 +1434,8 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i
*gen = a->gen;
out:
fsck_err:
- bch2_set_btree_iter_dontneed(trans, &alloc_iter);
- bch2_trans_iter_exit(trans, &alloc_iter);
+ bch2_set_btree_iter_dontneed(&alloc_iter);
+ bch2_trans_iter_exit(&alloc_iter);
return ret;
delete:
if (!async_repair) {
@@ -1549,6 +1552,9 @@ int bch2_check_alloc_info(struct bch_fs *c)
struct bkey_s_c k;
int ret = 0;
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc));
+
CLASS(btree_trans, trans)(c);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_prefetch);
@@ -1564,7 +1570,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
bch2_trans_begin(trans);
- k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole);
+ k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -1572,6 +1578,8 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (!k.k)
break;
+ progress_update_iter(trans, &progress, &iter);
+
if (k.k->type) {
next = bpos_nosnap_successor(k.k->p);
@@ -1602,17 +1610,17 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(trans, &iter, next);
+ bch2_btree_iter_set_pos(&iter, next);
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &bucket_gens_iter);
- bch2_trans_iter_exit(trans, &freespace_iter);
- bch2_trans_iter_exit(trans, &discard_iter);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&bucket_gens_iter);
+ bch2_trans_iter_exit(&freespace_iter);
+ bch2_trans_iter_exit(&discard_iter);
+ bch2_trans_iter_exit(&iter);
bch2_dev_put(ca);
ca = NULL;
@@ -1630,7 +1638,7 @@ bkey_err:
BTREE_ITER_prefetch);
while (1) {
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(trans, &iter);
+ k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
@@ -1647,9 +1655,9 @@ bkey_err:
break;
}
- bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos));
+ bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -1673,7 +1681,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
CLASS(printbuf, buf)();
int ret;
- alloc_k = bch2_btree_iter_peek(trans, alloc_iter);
+ alloc_k = bch2_btree_iter_peek(alloc_iter);
if (!alloc_k.k)
return 0;
@@ -1732,12 +1740,16 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc));
+
CLASS(btree_trans, trans)(c);
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)) ?:
- bch2_check_stripe_to_lru_refs(trans);
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed);
+ }))?: bch2_check_stripe_to_lru_refs(trans);
bch2_bkey_buf_exit(&last_flushed, c);
return ret;
@@ -1785,7 +1797,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bpos pos = need_discard_iter->pos;
- struct btree_iter iter = {};
+ struct btree_iter iter = { NULL };
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
CLASS(printbuf, buf)();
@@ -1878,7 +1890,7 @@ fsck_err:
discard_in_flight_remove(ca, iter.pos.offset);
if (!ret)
s->seen++;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1958,7 +1970,7 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans,
ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true);
out:
fsck_err:
- bch2_trans_iter_exit(trans, &need_discard_iter);
+ bch2_trans_iter_exit(&need_discard_iter);
return ret;
}
@@ -2051,7 +2063,7 @@ static int invalidate_one_bp(struct btree_trans *trans,
bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx);
err:
- bch2_trans_iter_exit(trans, &extent_iter);
+ bch2_trans_iter_exit(&extent_iter);
return ret;
}
@@ -2152,7 +2164,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
--*nr_to_invalidate;
out:
fsck_err:
- bch2_trans_iter_exit(trans, &alloc_iter);
+ bch2_trans_iter_exit(&alloc_iter);
return ret;
}
@@ -2161,9 +2173,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter
{
struct bkey_s_c k;
again:
- k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
if (!k.k && !*wrapped) {
- bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0));
+ bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
*wrapped = true;
goto again;
}
@@ -2213,9 +2225,9 @@ restart_err:
if (ret)
break;
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
err:
bch2_bkey_buf_exit(&last_flushed, c);
enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates);
@@ -2281,7 +2293,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
break;
}
- k = bch2_get_key_or_hole(trans, &iter, end, &hole);
+ k = bch2_get_key_or_hole(&iter, end, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -2300,7 +2312,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
} else {
struct bkey_i *freespace;
@@ -2320,7 +2332,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(trans, &iter, k.k->p);
+ bch2_btree_iter_set_pos(&iter, k.k->p);
}
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -2329,7 +2341,7 @@ bkey_err:
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret < 0) {
bch_err_msg(ca, ret, "initializing free space");
@@ -2433,7 +2445,7 @@ static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index fd1415524e46..70895afc0d0d 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -321,7 +321,7 @@ again:
bucket = sector_to_bucket(ca,
round_up(bucket_to_sector(ca, bucket) + 1,
1ULL << ca->mi.btree_bitmap_shift));
- bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket));
+ bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
req->counters.buckets_seen++;
req->counters.skipped_mi_btree_bitmap++;
continue;
@@ -348,12 +348,12 @@ again:
? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl)
: NULL;
next:
- bch2_set_btree_iter_dontneed(trans, &citer);
- bch2_trans_iter_exit(trans, &citer);
+ bch2_set_btree_iter_dontneed(&citer);
+ bch2_trans_iter_exit(&citer);
if (ob)
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
alloc_cursor = iter.pos.offset;
@@ -409,7 +409,7 @@ again:
1ULL << ca->mi.btree_bitmap_shift));
alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
- bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor));
+ bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
req->counters.skipped_mi_btree_bitmap++;
goto next;
}
@@ -418,7 +418,7 @@ again:
if (ob) {
if (!IS_ERR(ob))
*dev_alloc_cursor = iter.pos.offset;
- bch2_set_btree_iter_dontneed(trans, &iter);
+ bch2_set_btree_iter_dontneed(&iter);
break;
}
@@ -430,7 +430,7 @@ next:
break;
}
fail:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
BUG_ON(ob && ret);
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index bd26ab3e6812..42c321d42721 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -180,7 +180,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0);
err:
- bch2_trans_iter_exit(trans, &bp_iter);
+ bch2_trans_iter_exit(&bp_iter);
return ret;
}
@@ -282,7 +282,7 @@ static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans,
0,
bp.v->level - 1,
0);
- struct btree *b = bch2_btree_iter_peek_node(trans, iter);
+ struct btree *b = bch2_btree_iter_peek_node(iter);
if (IS_ERR_OR_NULL(b))
goto err;
@@ -300,7 +300,7 @@ static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans,
b = ret ? ERR_PTR(ret) : NULL;
}
err:
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return b;
}
@@ -322,9 +322,9 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans,
0,
bp.v->level,
iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
if (bkey_err(k)) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return k;
}
@@ -344,7 +344,7 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans,
extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
return k;
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
if (!bp.v->level) {
int ret = backpointer_target_not_found(trans, bp, k, last_flushed, commit);
@@ -384,7 +384,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
return 0;
struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = {};
+ struct btree_iter alloc_iter = { NULL };
struct bkey_s_c alloc_k;
CLASS(printbuf, buf)();
int ret = 0;
@@ -420,7 +420,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
}
out:
fsck_err:
- bch2_trans_iter_exit(trans, &alloc_iter);
+ bch2_trans_iter_exit(&alloc_iter);
return ret;
}
@@ -559,8 +559,8 @@ static int check_bp_exists(struct btree_trans *trans,
out:
err:
fsck_err:
- bch2_trans_iter_exit(trans, &other_extent_iter);
- bch2_trans_iter_exit(trans, &bp_iter);
+ bch2_trans_iter_exit(&other_extent_iter);
+ bch2_trans_iter_exit(&bp_iter);
return ret;
check_existing_bp:
/* Do we have a backpointer for a different extent? */
@@ -720,13 +720,13 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans,
retry:
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
- b = bch2_btree_iter_peek_node(trans, &iter);
+ b = bch2_btree_iter_peek_node(&iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
if (b != btree_node_root(c, b)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
goto retry;
}
@@ -735,7 +735,7 @@ retry:
k = bkey_i_to_s_c(&b->key);
ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -835,6 +835,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}));
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -924,7 +925,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
sectors[alloc_counter] += bp.v->bucket_len;
};
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -1016,7 +1017,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
{
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -1024,7 +1025,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
if (b)
bch2_node_pin(trans->c, b);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1060,6 +1061,7 @@ static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans,
bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, path->level - 1);
}));
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -1089,6 +1091,7 @@ static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans,
ret;
}));
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -1179,7 +1182,7 @@ static int check_bucket_backpointer_pos_mismatch(struct btree_trans *trans,
return ret;
ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed);
- bch2_trans_iter_exit(trans, &alloc_iter);
+ bch2_trans_iter_exit(&alloc_iter);
return ret;
}
@@ -1239,7 +1242,7 @@ static int check_one_backpointer(struct btree_trans *trans,
if (ret)
return ret;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 8a6f886b5bf2..45c15bdaa6f4 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -1277,4 +1277,11 @@ static inline int bch2_fs_casefold_enabled(struct bch_fs *c)
return 0;
}
+static inline const char *strip_bch2(const char *msg)
+{
+ if (!strncmp("bch2_", msg, 5))
+ return msg + 5;
+ return msg;
+}
+
#endif /* _BCACHEFS_H */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 23ed7393f07f..25b01e750880 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -511,7 +511,7 @@ restart:
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++;
- --touched;;
+ --touched;
} else if (!btree_node_reclaim(c, b)) {
__bch2_btree_node_hash_remove(bc, b);
__btree_node_data_free(b);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 34cb8a4324dc..ce3c7750a922 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -44,10 +44,6 @@
#include <linux/rcupdate.h>
#include <linux/sched/task.h>
-#define DROP_THIS_NODE 10
-#define DROP_PREV_NODE 11
-#define DID_FILL_FROM_SCAN 12
-
/*
* Returns true if it's a btree we can easily reconstruct, or otherwise won't
* cause data loss if it's missing:
@@ -252,7 +248,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
return ret;
*pulled_from_scan = cur->data->min_key;
- ret = DID_FILL_FROM_SCAN;
+ ret = bch_err_throw(c, topology_repair_did_fill_from_scan);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
"btree node with incorrect min_key%s", buf.buf))
@@ -263,7 +259,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_next_node,
"btree node overwritten by next node%s", buf.buf))
- ret = DROP_PREV_NODE;
+ ret = bch_err_throw(c, topology_repair_drop_prev_node);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
"btree node with incorrect max_key%s", buf.buf))
@@ -274,7 +270,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_prev_node,
"btree node overwritten by prev node%s", buf.buf))
- ret = DROP_THIS_NODE;
+ ret = bch_err_throw(c, topology_repair_drop_this_node);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
"btree node with incorrect min_key%s", buf.buf))
@@ -314,7 +310,7 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
return ret;
*pulled_from_scan = b->key.k.p;
- ret = DID_FILL_FROM_SCAN;
+ ret = bch_err_throw(c, topology_repair_did_fill_from_scan);
} else {
ret = set_node_max(c, child, b->key.k.p);
}
@@ -391,15 +387,15 @@ again:
ret = lockrestart_do(trans,
btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan));
- if (ret < 0)
+ if (ret && !bch2_err_matches(ret, BCH_ERR_topology_repair))
goto err;
- if (ret == DID_FILL_FROM_SCAN) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) {
new_pass = true;
ret = 0;
}
- if (ret == DROP_THIS_NODE) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) {
six_unlock_read(&cur->c.lock);
bch2_btree_node_evict(trans, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
@@ -414,7 +410,7 @@ again:
six_unlock_read(&prev->c.lock);
prev = NULL;
- if (ret == DROP_PREV_NODE) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_prev_node)) {
bch_info(c, "dropped prev node");
bch2_btree_node_evict(trans, prev_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
@@ -436,7 +432,7 @@ again:
BUG_ON(cur);
ret = lockrestart_do(trans,
btree_repair_node_end(trans, b, prev, pulled_from_scan));
- if (ret == DID_FILL_FROM_SCAN) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) {
new_pass = true;
ret = 0;
}
@@ -477,7 +473,7 @@ again:
six_unlock_read(&cur->c.lock);
cur = NULL;
- if (ret == DROP_THIS_NODE) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) {
bch2_btree_node_evict(trans, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
@@ -504,7 +500,7 @@ again:
if (mustfix_fsck_err_on(!have_child,
c, btree_node_topology_interior_node_empty,
"empty interior btree node at %s", buf.buf))
- ret = DROP_THIS_NODE;
+ ret = bch_err_throw(c, topology_repair_drop_this_node);
err:
fsck_err:
if (!IS_ERR_OR_NULL(prev))
@@ -521,7 +517,8 @@ fsck_err:
bch2_bkey_buf_exit(&prev_k, c);
bch2_bkey_buf_exit(&cur_k, c);
- bch_err_fn(c, ret);
+ if (!bch2_err_matches(ret, BCH_ERR_topology_repair))
+ bch_err_fn(c, ret);
return ret;
}
@@ -592,7 +589,7 @@ recover:
ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
six_unlock_read(&b->c.lock);
- if (ret == DROP_THIS_NODE) {
+ if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) {
scoped_guard(mutex, &c->btree_cache.lock)
bch2_btree_node_hash_remove(&c->btree_cache, b);
@@ -716,6 +713,7 @@ static int bch2_gc_btree(struct btree_trans *trans,
gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
}));
+ bch2_trans_iter_exit(&iter);
if (ret)
goto err;
}
@@ -728,13 +726,13 @@ retry_root:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
0, bch2_btree_id_root(c, btree)->b->c.level, 0);
- struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err_root;
if (b != btree_node_root(c, b)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
goto retry_root;
}
@@ -742,7 +740,7 @@ retry_root:
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
err_root:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
} while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
err:
bch_err_fn(c, ret);
@@ -1231,7 +1229,7 @@ int bch2_gc_gens(struct bch_fs *c)
BCH_TRANS_COMMIT_no_enospc, ({
ca = bch2_dev_iterate(c, ca, k.k->p.inode);
if (!ca) {
- bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
}
bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index bd86dd7151a1..8a03cd75a64f 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1405,10 +1405,8 @@ static void btree_node_read_work(struct work_struct *work)
ret = bch2_bkey_pick_read_device(c,
bkey_i_to_s_c(&b->key),
&failed, &rb->pick, -1);
- if (ret <= 0) {
- set_btree_node_read_error(b);
+ if (ret <= 0)
break;
- }
ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
rb->have_ioref = ca != NULL;
@@ -1442,27 +1440,21 @@ start:
bch2_maybe_corrupt_bio(bio, bch2_btree_read_corrupt_ratio);
ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf);
- if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
- ret == -BCH_ERR_btree_node_read_err_must_retry)
- continue;
-
- if (ret)
- set_btree_node_read_error(b);
-
- break;
+ if (ret != -BCH_ERR_btree_node_read_err_want_retry &&
+ ret != -BCH_ERR_btree_node_read_err_must_retry)
+ break;
}
bch2_io_failures_to_text(&buf, c, &failed);
- if (btree_node_read_error(b))
- bch2_btree_lost_data(c, &buf, b->c.btree_id);
-
/*
* only print retry success if we read from a replica with no errors
*/
- if (btree_node_read_error(b))
+ if (ret) {
+ set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, &buf, b->c.btree_id);
prt_printf(&buf, "ret %s", bch2_err_str(ret));
- else if (failed.nr) {
+ } else if (failed.nr) {
if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev))
prt_printf(&buf, "retry success");
else
@@ -2019,7 +2011,7 @@ static void btree_node_scrub_work(struct work_struct *work)
bch_err_fn_ratelimited(c, ret);
}
- bch2_bkey_buf_exit(&scrub->key, c);;
+ bch2_bkey_buf_exit(&scrub->key, c);
btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
kfree(scrub);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index cc771affa511..2220198d7e2d 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -240,8 +240,10 @@ void __bch2_trans_verify_paths(struct btree_trans *trans)
__bch2_btree_path_verify(trans, path);
}
-static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
+static void __bch2_btree_iter_verify(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
+
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -270,12 +272,9 @@ static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
-static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
- struct btree_iter *iter, struct bkey_s_c k)
+static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
{
- struct btree_iter copy;
- struct bkey_s_c prev;
- int ret = 0;
+ struct btree_trans *trans = iter->trans;
if (!(iter->flags & BTREE_ITER_filter_snapshots))
return 0;
@@ -287,16 +286,16 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
iter->snapshot,
k.k->p.snapshot));
- bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
- BTREE_ITER_nopreserve|
- BTREE_ITER_all_snapshots);
- prev = bch2_btree_iter_prev(trans, &copy);
+ CLASS(btree_iter, copy)(trans, iter->btree_id, iter->pos,
+ BTREE_ITER_nopreserve|
+ BTREE_ITER_all_snapshots);
+ struct bkey_s_c prev = bch2_btree_iter_prev(&copy);
if (!prev.k)
- goto out;
+ return 0;
- ret = bkey_err(prev);
+ int ret = bkey_err(prev);
if (ret)
- goto out;
+ return ret;
if (bkey_eq(prev.k->p, k.k->p) &&
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
@@ -312,9 +311,8 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
iter->snapshot,
buf1.buf, buf2.buf);
}
-out:
- bch2_trans_iter_exit(trans, &copy);
- return ret;
+
+ return 0;
}
void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
@@ -364,11 +362,10 @@ static inline void bch2_btree_path_verify(struct btree_trans *trans,
__bch2_btree_path_verify(trans, path);
}
-static inline void bch2_btree_iter_verify(struct btree_trans *trans,
- struct btree_iter *iter)
+static inline void bch2_btree_iter_verify(struct btree_iter *iter)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
- __bch2_btree_iter_verify(trans, iter);
+ __bch2_btree_iter_verify(iter);
}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
@@ -377,11 +374,11 @@ static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
__bch2_btree_iter_verify_entry_exit(iter);
}
-static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
+static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter,
struct bkey_s_c k)
{
return static_branch_unlikely(&bch2_debug_check_iterators)
- ? __bch2_btree_iter_verify_ret(trans, iter, k)
+ ? __bch2_btree_iter_verify_ret(iter, k)
: 0;
}
@@ -891,7 +888,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
struct btree_path *path,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct btree_path_level *l = path_l(path);
@@ -943,7 +940,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans,
static __always_inline int btree_path_down(struct btree_trans *trans,
struct btree_path *path,
- unsigned flags,
+ enum btree_iter_update_trigger_flags flags,
unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
@@ -1151,7 +1148,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
*/
int bch2_btree_path_traverse_one(struct btree_trans *trans,
btree_path_idx_t path_idx,
- unsigned flags,
+ enum btree_iter_update_trigger_flags flags,
unsigned long trace_ip)
{
struct btree_path *path = &trans->paths[path_idx];
@@ -1732,7 +1729,8 @@ static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
btree_path_idx_t bch2_path_get(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned level,
- unsigned flags, unsigned long ip)
+ enum btree_iter_update_trigger_flags flags,
+ unsigned long ip)
{
struct btree_path *path;
bool cached = flags & BTREE_ITER_cached;
@@ -1863,8 +1861,10 @@ hole:
return (struct bkey_s_c) { u, NULL };
}
-void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
+void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
+
if (!iter->path || trans->restarted)
return;
@@ -1876,14 +1876,17 @@ void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *
/* Btree iterators: */
int __must_check
-__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
+__bch2_btree_iter_traverse(struct btree_iter *iter)
{
- return bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
}
int __must_check
-bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
+bch2_btree_iter_traverse(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
+ int ret;
+
bch2_trans_verify_not_unlocked_or_in_restart(trans);
iter->path = bch2_btree_path_set_pos(trans, iter->path,
@@ -1891,7 +1894,7 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
if (ret)
return ret;
@@ -1903,14 +1906,14 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
/* Iterate across nodes (leaf and interior nodes) */
-struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
- struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -1932,7 +1935,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
return b;
err:
@@ -1941,26 +1944,26 @@ err:
}
/* Only kept for -tools */
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
- struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
{
struct btree *b;
- while (b = bch2_btree_iter_peek_node(trans, iter),
+ while (b = bch2_btree_iter_peek_node(iter),
bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
- bch2_trans_begin(trans);
+ bch2_trans_begin(iter->trans);
return b;
}
-struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -2034,7 +2037,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
return b;
err:
@@ -2044,7 +2047,7 @@ err:
/* Iterate across keys (in leaf nodes only) */
-inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
+inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2053,11 +2056,11 @@ inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_successor(iter, pos);
- bch2_btree_iter_set_pos(trans, iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
return ret;
}
-inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
+inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2066,7 +2069,7 @@ inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_predecessor(iter, pos);
- bch2_btree_iter_set_pos(trans, iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
return ret;
}
@@ -2198,9 +2201,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans,
* bkey_s_c_null:
*/
static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos pos)
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
{
+ struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey u;
struct bkey_s_c k;
@@ -2246,14 +2249,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btr
return k;
}
-static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
+ struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
int ret;
EBUG_ON(btree_iter_path(trans, iter)->cached);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2263,7 +2266,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(trans, iter, iter->pos);
+ bch2_btree_iter_set_pos(iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2273,7 +2276,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2285,10 +2288,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
!bkey_deleted(k.k) &&
- (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
k = k2;
if (bkey_err(k)) {
- bch2_btree_iter_set_pos(trans, iter, iter->pos);
+ bch2_btree_iter_set_pos(iter, iter->pos);
break;
}
}
@@ -2321,13 +2324,13 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct
search_key = bpos_successor(l->b->key.k.p);
} else {
/* End of btree: */
- bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
if (trace___btree_iter_peek_enabled()) {
CLASS(printbuf, buf)();
@@ -2348,15 +2351,14 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct
/**
* bch2_btree_iter_peek_max() - returns first key greater than or equal to
* iterator's current position
- * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys less than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
{
+ struct btree_trans *trans = iter->trans;
struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
struct bpos iter_pos = iter->pos;
@@ -2378,7 +2380,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree
}
while (1) {
- k = __bch2_btree_iter_peek(trans, iter, search_key);
+ k = __bch2_btree_iter_peek(iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2492,9 +2494,9 @@ out_no_locked:
if (!(iter->flags & BTREE_ITER_all_snapshots))
iter->pos.snapshot = iter->snapshot;
- ret = bch2_btree_iter_verify_ret(trans, iter, k);
+ ret = bch2_btree_iter_verify_ret(iter, k);
if (unlikely(ret)) {
- bch2_btree_iter_set_pos(trans, iter, iter->pos);
+ bch2_btree_iter_set_pos(iter, iter->pos);
k = bkey_s_c_err(ret);
}
@@ -2515,7 +2517,7 @@ out_no_locked:
return k;
end:
- bch2_btree_iter_set_pos(trans, iter, end);
+ bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2523,25 +2525,24 @@ end:
/**
* bch2_btree_iter_next() - returns first key greater than iterator's current
* position
- * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(trans, iter))
+ if (!bch2_btree_iter_advance(iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek(trans, iter);
+ return bch2_btree_iter_peek(iter);
}
-static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
{
+ struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2551,7 +2552,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st
int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(trans, iter, iter->pos);
+ bch2_btree_iter_set_pos(iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2561,7 +2562,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2578,10 +2579,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
!bkey_deleted(k.k) &&
- (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
k = k2;
if (bkey_err(k2)) {
- bch2_btree_iter_set_pos(trans, iter, iter->pos);
+ bch2_btree_iter_set_pos(iter, iter->pos);
break;
}
}
@@ -2602,27 +2603,25 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st
search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
- bch2_btree_iter_set_pos(trans, iter, POS_MIN);
+ bch2_btree_iter_set_pos(iter, POS_MIN);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
return k;
}
/**
* bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
* iterator's current position
- * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys greater than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
{
if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
!bkey_eq(iter->pos, POS_MAX) &&
@@ -2637,7 +2636,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
* real visible extents - easiest to just use peek_slot() (which
* internally uses peek() for extents)
*/
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
if (bkey_err(k))
return k;
@@ -2647,6 +2646,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
return k;
}
+ struct btree_trans *trans = iter->trans;
struct bpos search_key = iter->pos;
struct bkey_s_c k;
btree_path_idx_t saved_path = 0;
@@ -2662,7 +2662,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
}
while (1) {
- k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
+ k = __bch2_btree_iter_peek_prev(iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2744,7 +2744,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
}
/* Extents can straddle iter->pos: */
- iter->pos = bpos_min(iter->pos, k.k->p);;
+ iter->pos = bpos_min(iter->pos, k.k->p);
if (iter->flags & BTREE_ITER_filter_snapshots)
iter->pos.snapshot = iter->snapshot;
@@ -2753,7 +2753,7 @@ out_no_locked:
bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
if (trace_btree_iter_peek_prev_min_enabled()) {
CLASS(printbuf, buf)();
@@ -2769,7 +2769,7 @@ out_no_locked:
}
return k;
end:
- bch2_btree_iter_set_pos(trans, iter, end);
+ bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2777,27 +2777,27 @@ end:
/**
* bch2_btree_iter_prev() - returns first key less than iterator's current
* position
- * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(trans, iter))
+ if (!bch2_btree_iter_rewind(iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_prev(trans, iter);
+ return bch2_btree_iter_peek_prev(iter);
}
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
struct bpos search_key;
struct bkey_s_c k, k2;
int ret;
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(trans, iter);
+ bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
@@ -2815,7 +2815,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
goto out2;
}
- bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
}
search_key = btree_iter_search_key(iter);
@@ -2858,10 +2858,11 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
!bkey_deleted(k.k) &&
- (k2 = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
+ (k2 = btree_trans_peek_key_cache(iter, iter->pos)).k) {
k = k2;
- if (!bkey_err(k))
- iter->k = *k.k;
+ if (bkey_err(k))
+ goto out;
+ iter->k = *k.k;
}
if (unlikely(k.k->type == KEY_TYPE_whiteout &&
@@ -2880,21 +2881,21 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
if (iter->flags & BTREE_ITER_intent) {
struct btree_iter iter2;
- bch2_trans_copy_iter(trans, &iter2, iter);
- k = bch2_btree_iter_peek_max(trans, &iter2, end);
+ bch2_trans_copy_iter(&iter2, iter);
+ k = bch2_btree_iter_peek_max(&iter2, end);
if (k.k && !bkey_err(k)) {
swap(iter->key_cache_path, iter2.key_cache_path);
iter->k = iter2.k;
k.k = &iter->k;
}
- bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_iter_exit(&iter2);
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek_max(trans, iter, end);
+ k = bch2_btree_iter_peek_max(iter, end);
if (unlikely(bkey_err(k)))
- bch2_btree_iter_set_pos(trans, iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
else
iter->pos = pos;
}
@@ -2923,8 +2924,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
}
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(trans, iter);
- ret = bch2_btree_iter_verify_ret(trans, iter, k);
+ bch2_btree_iter_verify(iter);
+ ret = bch2_btree_iter_verify_ret(iter, k);
if (unlikely(ret))
k = bkey_s_c_err(ret);
out2:
@@ -2944,31 +2945,31 @@ out2:
return k;
}
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(trans, iter))
+ if (!bch2_btree_iter_advance(iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(trans, iter);
+ return bch2_btree_iter_peek_slot(iter);
}
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(trans, iter))
+ if (!bch2_btree_iter_rewind(iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(trans, iter);
+ return bch2_btree_iter_peek_slot(iter);
}
/* Obsolete, but still used by rust wrapper in -tools */
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
{
struct bkey_s_c k;
- while (btree_trans_too_many_iters(trans) ||
- (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
+ while (btree_trans_too_many_iters(iter->trans) ||
+ (k = bch2_btree_iter_peek_type(iter, iter->flags),
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(trans);
+ bch2_trans_begin(iter->trans);
return k;
}
@@ -3100,8 +3101,10 @@ static inline void btree_path_list_add(struct btree_trans *trans,
btree_trans_verify_sorted_refs(trans);
}
-void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
+void bch2_trans_iter_exit(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
+
if (iter->update_path)
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
@@ -3114,12 +3117,13 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
+ iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
struct btree_iter *iter,
enum btree_id btree_id, struct bpos pos,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, 0, flags),
@@ -3132,7 +3136,7 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
struct bpos pos,
unsigned locks_want,
unsigned depth,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
flags |= BTREE_ITER_not_extents;
flags |= BTREE_ITER_snapshot_field;
@@ -3153,9 +3157,10 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
BUG_ON(iter->min_depth != depth);
}
-void bch2_trans_copy_iter(struct btree_trans *trans,
- struct btree_iter *dst, struct btree_iter *src)
+void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
{
+ struct btree_trans *trans = src->trans;
+
*dst = *src;
#ifdef TRACK_PATH_ALLOCATED
dst->ip_allocated = _RET_IP_;
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 53074ed62e09..689553c0849a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -235,12 +235,14 @@ bch2_btree_path_set_pos(struct btree_trans *trans,
int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
btree_path_idx_t,
- unsigned, unsigned long);
+ enum btree_iter_update_trigger_flags,
+ unsigned long);
static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *);
static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
- btree_path_idx_t path, unsigned flags)
+ btree_path_idx_t path,
+ enum btree_iter_update_trigger_flags flags)
{
bch2_trans_verify_not_unlocked_or_in_restart(trans);
@@ -251,7 +253,9 @@ static inline int __must_check bch2_btree_path_traverse(struct btree_trans *tran
}
btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
- unsigned, unsigned, unsigned, unsigned long);
+ unsigned, unsigned,
+ enum btree_iter_update_trigger_flags,
+ unsigned long);
btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
unsigned, struct bpos);
@@ -404,37 +408,36 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct
void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
-int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
-int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
+int __must_check bch2_btree_iter_traverse(struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
-struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
-static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
- struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
{
- return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
+ return bch2_btree_iter_peek_max(iter, SPOS_MAX);
}
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos);
-static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
{
- return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
+ return bch2_btree_iter_peek_prev_min(iter, POS_MIN);
}
-struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
-bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
-bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
+bool bch2_btree_iter_advance(struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_iter *);
static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
@@ -445,9 +448,10 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
iter->k.size = 0;
}
-static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
- struct btree_iter *iter, struct bpos new_pos)
+static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
+ struct btree_trans *trans = iter->trans;
+
if (unlikely(iter->update_path))
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
@@ -465,22 +469,21 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
-static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
- struct btree_iter *iter, u32 snapshot)
+static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
{
struct bpos pos = iter->pos;
iter->snapshot = snapshot;
pos.snapshot = snapshot;
- bch2_btree_iter_set_pos(trans, iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
}
-void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
+void bch2_trans_iter_exit(struct btree_iter *);
-static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
- unsigned btree_id,
- unsigned level,
- unsigned flags)
+static inline enum btree_iter_update_trigger_flags
+bch2_btree_iter_flags(struct btree_trans *trans,
+ unsigned btree_id, unsigned level,
+ enum btree_iter_update_trigger_flags flags)
{
if (level || !btree_id_cached(trans->c, btree_id)) {
flags &= ~BTREE_ITER_cached;
@@ -508,15 +511,16 @@ static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
+ enum btree_id btree, struct bpos pos,
unsigned locks_want,
unsigned depth,
- unsigned flags,
+ enum btree_iter_update_trigger_flags flags,
unsigned long ip)
{
+ iter->trans = trans;
iter->update_path = 0;
iter->key_cache_path = 0;
- iter->btree_id = btree_id;
+ iter->btree_id = btree;
iter->min_depth = 0;
iter->flags = flags;
iter->snapshot = pos.snapshot;
@@ -526,33 +530,51 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
#ifdef CONFIG_BCACHEFS_DEBUG
iter->ip_allocated = ip;
#endif
- iter->path = bch2_path_get(trans, btree_id, iter->pos,
- locks_want, depth, flags, ip);
+ iter->path = bch2_path_get(trans, btree, iter->pos, locks_want, depth, flags, ip);
}
void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
- enum btree_id, struct bpos, unsigned);
+ enum btree_id, struct bpos,
+ enum btree_iter_update_trigger_flags);
static inline void bch2_trans_iter_init(struct btree_trans *trans,
struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags)
{
- if (__builtin_constant_p(btree_id) &&
+ if (__builtin_constant_p(btree) &&
__builtin_constant_p(flags))
- bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, 0, flags),
+ bch2_trans_iter_init_common(trans, iter, btree, pos, 0, 0,
+ bch2_btree_iter_flags(trans, btree, 0, flags),
_THIS_IP_);
else
- bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
+ bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags);
+}
+
+static inline struct btree_iter bch2_trans_iter_class_init(struct btree_trans *trans,
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags)
+{
+ struct btree_iter iter;
+ bch2_trans_iter_init(trans, &iter, btree, pos, flags);
+ return iter;
}
+DEFINE_CLASS(btree_iter, struct btree_iter,
+ bch2_trans_iter_exit(&_T),
+ bch2_trans_iter_class_init(trans, btree, pos, flags),
+ struct btree_trans *trans,
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags);
+
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos,
- unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
+ unsigned, unsigned,
+ enum btree_iter_update_trigger_flags);
+
+void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
-void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
+void bch2_set_btree_iter_dontneed(struct btree_iter *);
#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
void bch2_trans_kmalloc_trace_to_text(struct printbuf *,
@@ -623,27 +645,28 @@ static __always_inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *tr
static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type)
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags,
+ enum bch_bkey_type type)
{
struct bkey_s_c k;
- bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
- k = bch2_btree_iter_peek_slot(trans, iter);
+ bch2_trans_iter_init(trans, iter, btree, pos, flags);
+ k = bch2_btree_iter_peek_slot(iter);
if (!bkey_err(k) && type && k.k->type != type)
k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
if (unlikely(bkey_err(k)))
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return k;
}
static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
struct btree_iter *iter,
- unsigned btree_id, struct bpos pos,
- unsigned flags)
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags)
{
- return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
+ return __bch2_bkey_get_iter(trans, iter, btree, pos, flags, 0);
}
#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
@@ -665,16 +688,17 @@ do { \
} while (0)
static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
- unsigned btree_id, struct bpos pos,
- unsigned flags, unsigned type,
+ enum btree_id btree, struct bpos pos,
+ enum btree_iter_update_trigger_flags flags,
+ enum bch_bkey_type type,
unsigned val_size, void *val)
{
struct btree_iter iter;
- struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
+ struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree, pos, flags, type);
int ret = bkey_err(k);
if (!ret) {
__bkey_val_copy(val, val_size, k);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
}
return ret;
@@ -699,17 +723,17 @@ u32 bch2_trans_begin(struct btree_trans *);
int _ret3 = 0; \
do { \
_ret3 = lockrestart_do((_trans), ({ \
- struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
+ struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
if (!_b) \
break; \
\
PTR_ERR_OR_ZERO(_b) ?: (_do); \
})) ?: \
lockrestart_do((_trans), \
- PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
+ PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
} while (!_ret3); \
\
- bch2_trans_iter_exit((_trans), &(_iter)); \
+ bch2_trans_iter_exit(&(_iter)); \
_ret3; \
})
@@ -718,34 +742,31 @@ u32 bch2_trans_begin(struct btree_trans *);
__for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _do)
-static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned flags)
+static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
+ enum btree_iter_update_trigger_flags flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
- bch2_btree_iter_peek_prev(trans, iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
+ bch2_btree_iter_peek_prev(iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned flags)
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
+ enum btree_iter_update_trigger_flags flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
- bch2_btree_iter_peek(trans, iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
+ bch2_btree_iter_peek(iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
- struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter,
struct bpos end,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
if (!(flags & BTREE_ITER_slots))
- return bch2_btree_iter_peek_max(trans, iter, end);
+ return bch2_btree_iter_peek_max(iter, end);
if (bkey_gt(iter->pos, end))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(trans, iter);
+ return bch2_btree_iter_peek_slot(iter);
}
int __bch2_btree_trans_too_many_iters(struct btree_trans *);
@@ -801,7 +822,7 @@ transaction_restart: \
if (!_ret2) \
bch2_trans_verify_not_restarted(_trans, _restart_count);\
\
- _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \
+ _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \
})
#define for_each_btree_key_max_continue(_trans, _iter, \
@@ -812,62 +833,52 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), \
+ (_k) = bch2_btree_iter_peek_max_type(&(_iter), \
_end, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
\
- bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
})
#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \
for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
-#define for_each_btree_key_max(_trans, _iter, _btree_id, \
- _start, _end, _flags, _k, _do) \
-({ \
- bch2_trans_begin(trans); \
- \
- struct btree_iter _iter; \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
- for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\
+#define for_each_btree_key_max(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _do) \
+({ \
+ bch2_trans_begin(trans); \
+ \
+ CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \
+ for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do); \
})
-#define for_each_btree_key(_trans, _iter, _btree_id, \
- _start, _flags, _k, _do) \
- for_each_btree_key_max(_trans, _iter, _btree_id, _start, \
- SPOS_MAX, _flags, _k, _do)
+#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k, _do) \
+ for_each_btree_key_max(_trans, _iter, _btree_id, _start, SPOS_MAX, _flags, _k, _do)
-#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
- _start, _flags, _k, _do) \
-({ \
- struct btree_iter _iter; \
- struct bkey_s_c _k; \
- int _ret3 = 0; \
- \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
- do { \
- _ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), \
- (_flags)); \
- if (!(_k).k) \
- break; \
- \
- bkey_err(_k) ?: (_do); \
- })); \
- } while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter))); \
- \
- bch2_trans_iter_exit((_trans), &(_iter)); \
- _ret3; \
+#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _do) \
+({ \
+ int _ret3 = 0; \
+ \
+ CLASS(btree_iter, iter)((_trans), (_btree_id), (_start), (_flags)); \
+ \
+ do { \
+ _ret3 = lockrestart_do(_trans, ({ \
+ struct bkey_s_c _k = \
+ bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
+ if (!(_k).k) \
+ break; \
+ \
+ bkey_err(_k) ?: (_do); \
+ })); \
+ } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
+ \
+ _ret3; \
})
#define for_each_btree_key_commit(_trans, _iter, _btree_id, \
@@ -894,38 +905,37 @@ transaction_restart: \
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
- struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
+ (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(_trans, &(_iter)))
+ bch2_btree_iter_advance(&(_iter)))
-#define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
+#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\
for (; \
- (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags), \
+ (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(_trans, &(_iter)))
+ bch2_btree_iter_advance(&(_iter)))
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
SPOS_MAX, _flags, _k, _ret)
-#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_rewind(_trans, &(_iter)))
+#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_rewind(&(_iter)))
-#define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret) \
- for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
+#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
+ for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
/*
* This should not be used in a fastpath, without first trying _do in
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index ebba14da92b4..d69cf9435872 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -254,11 +254,13 @@ static int btree_key_cache_create(struct btree_trans *trans,
struct bkey_i *new_k = allocate_dropping_locks(trans, ret,
kmalloc(key_u64s * sizeof(u64), _gfp));
- if (unlikely(!new_k)) {
+ if (unlikely(!new_k && !ret)) {
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_id_str(ck->key.btree_id), key_u64s);
ret = bch_err_throw(c, ENOMEM_btree_key_cache_fill);
- } else if (ret) {
+ }
+
+ if (unlikely(ret)) {
kfree(new_k);
goto err;
}
@@ -321,19 +323,16 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
}
struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
- bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos,
- BTREE_ITER_intent|
- BTREE_ITER_key_cache_fill|
- BTREE_ITER_cached_nofill);
+ CLASS(btree_iter, iter)(trans, ck_path->btree_id, ck_path->pos,
+ BTREE_ITER_intent|
+ BTREE_ITER_key_cache_fill|
+ BTREE_ITER_cached_nofill);
iter.flags &= ~BTREE_ITER_with_journal;
- k = bch2_btree_iter_peek_slot(trans, &iter);
- ret = bkey_err(k);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
+ int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
/* Recheck after btree lookup, before allocating: */
ck_path = trans->paths + ck_path_idx;
@@ -343,15 +342,13 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k);
if (ret)
- goto err;
+ return ret;
if (trace_key_cache_fill_enabled())
do_trace_key_cache_fill(trans, ck_path, k);
out:
/* We're not likely to need this iterator again: */
- bch2_set_btree_iter_dontneed(trans, &iter);
-err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_set_btree_iter_dontneed(&iter);
return ret;
}
@@ -407,7 +404,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans,
btree_node_unlock(trans, path, 0);
path->l[0].b = ERR_PTR(ret);
}
- } else {
+ } else if (!(flags & BTREE_ITER_cached_nofill)) {
BUG_ON(path->uptodate);
BUG_ON(!path->nodes_locked);
}
@@ -423,35 +420,34 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
- struct btree_iter c_iter, b_iter;
struct bkey_cached *ck = NULL;
int ret;
- bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
- BTREE_ITER_slots|
- BTREE_ITER_intent|
- BTREE_ITER_all_snapshots);
- bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
- BTREE_ITER_cached|
- BTREE_ITER_intent);
+ CLASS(btree_iter, b_iter)(trans, key.btree_id, key.pos,
+ BTREE_ITER_slots|
+ BTREE_ITER_intent|
+ BTREE_ITER_all_snapshots);
+ CLASS(btree_iter, c_iter)(trans, key.btree_id, key.pos,
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
b_iter.flags &= ~BTREE_ITER_with_key_cache;
- ret = bch2_btree_iter_traverse(trans, &c_iter);
+ ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
- goto out;
+ return ret;
ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
if (!ck)
- goto out;
+ return 0;
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
if (evict)
goto evict;
- goto out;
+ return 0;
}
if (journal_seq && ck->journal.seq != journal_seq)
- goto out;
+ return 0;
trans->journal_res.seq = ck->journal.seq;
@@ -468,7 +464,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
!test_bit(JOURNAL_space_low, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
- struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter);
+ struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter);
ret = bkey_err(btree_k);
if (ret)
goto err;
@@ -527,8 +523,6 @@ evict:
}
}
out:
- bch2_trans_iter_exit(trans, &b_iter);
- bch2_trans_iter_exit(trans, &c_iter);
return ret;
}
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index d997e3818c30..4b7b5ca74ba1 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -158,14 +158,6 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX)
return;
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ);
- bio->bi_iter.bi_sector = offset;
- bch2_bio_map(bio, b->data, c->opts.btree_node_size);
-
- submit_time = local_clock();
- submit_bio_wait(bio);
- bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status);
-
rcu_read_lock();
struct found_btree_node n = {
.btree_id = BTREE_NODE_ID(bn),
@@ -182,6 +174,14 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
};
rcu_read_unlock();
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ);
+ bio->bi_iter.bi_sector = offset;
+ bch2_bio_map(bio, b->data, c->opts.btree_node_size);
+
+ submit_time = local_clock();
+ submit_bio_wait(bio);
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status);
+
found_btree_node_to_key(&b->key, &n);
CLASS(printbuf, buf)();
@@ -270,6 +270,9 @@ static int read_btree_nodes(struct find_btree_nodes *f)
int ret = 0;
closure_init_stack(&cl);
+ CLASS(printbuf, buf)();
+
+ prt_printf(&buf, "scanning for btree nodes on");
for_each_online_member(c, ca, BCH_DEV_READ_REF_btree_node_scan) {
if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
@@ -295,10 +298,14 @@ static int read_btree_nodes(struct find_btree_nodes *f)
break;
}
+ prt_printf(&buf, " %s", ca->name);
+
closure_get(&cl);
enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
wake_up_process(t);
}
+
+ bch_notice(c, "%s", buf.buf);
err:
while (closure_sync_timeout(&cl, sysctl_hung_task_timeout_secs * HZ / 2))
;
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 1f9965ae610c..8b94a8156fbf 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -772,12 +772,13 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
trans->journal_res.offset += trans->journal_entries.u64s;
trans->journal_res.u64s -= trans->journal_entries.u64s;
- memcpy_u64s_small(bch2_journal_add_entry(j, &trans->journal_res,
- BCH_JSET_ENTRY_write_buffer_keys,
- BTREE_ID_accounting, 0,
- trans->accounting.u64s)->_data,
- btree_trans_subbuf_base(trans, &trans->accounting),
- trans->accounting.u64s);
+ if (trans->accounting.u64s)
+ memcpy_u64s_small(bch2_journal_add_entry(j, &trans->journal_res,
+ BCH_JSET_ENTRY_write_buffer_keys,
+ BTREE_ID_accounting, 0,
+ trans->accounting.u64s)->_data,
+ btree_trans_subbuf_base(trans, &trans->accounting),
+ trans->accounting.u64s);
if (trans->journal_seq)
*trans->journal_seq = trans->journal_res.seq;
@@ -968,7 +969,7 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans,
BUG_ON(current != c->recovery_task);
struct bkey_i *accounting;
-
+retry:
percpu_down_read(&c->mark_lock);
for (accounting = btree_trans_subbuf_base(trans, &trans->accounting);
accounting != btree_trans_subbuf_top(trans, &trans->accounting);
@@ -1024,13 +1025,17 @@ fatal_err:
bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret));
percpu_down_read(&c->mark_lock);
revert_fs_usage:
- BUG();
- /* error path not handled by __bch2_trans_commit() */
for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting);
i != accounting;
i = bkey_next(i))
bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags);
percpu_up_read(&c->mark_lock);
+
+ if (bch2_err_matches(ret, BCH_ERR_btree_insert_need_mark_replicas)) {
+ ret = drop_locks_do(trans, bch2_accounting_update_sb(trans));
+ if (!ret)
+ goto retry;
+ }
return ret;
}
@@ -1065,11 +1070,15 @@ int __bch2_trans_commit(struct btree_trans *trans, enum bch_trans_commit_flags f
EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
- journal_u64s = jset_u64s(trans->accounting.u64s);
+ journal_u64s = 0;
+
trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
if (trans->journal_transaction_names)
journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
+ if (trans->accounting.u64s)
+ journal_u64s += jset_u64s(trans->accounting.u64s);
+
trans_for_each_update(trans, i) {
struct btree_path *path = trans->paths + i->path;
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 76adf75617aa..ffa250008d91 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -364,6 +364,7 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
+ struct btree_trans *trans;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
@@ -485,7 +486,7 @@ typedef DARRAY(struct trans_kmalloc_trace) darray_trans_kmalloc_trace;
struct btree_trans_subbuf {
u16 base;
u16 u64s;
- u16 size;;
+ u16 size;
};
struct btree_trans {
@@ -854,15 +855,15 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
return type != BKEY_TYPE_btree && btree_id_is_extents(type - 1);
}
-static inline bool btree_type_has_snapshots(enum btree_id btree)
-{
- const u64 mask = 0
+static const u64 btree_has_snapshots_mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_snapshots)) << nr)
- BCH_BTREE_IDS()
+BCH_BTREE_IDS()
#undef x
- ;
+;
- return BIT_ULL(btree) & mask;
+static inline bool btree_type_has_snapshots(enum btree_id btree)
+{
+ return BIT_ULL(btree) & btree_has_snapshots_mask;
}
static inline bool btree_type_has_snapshot_field(enum btree_id btree)
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index f514a8ad7a89..1b1b5bb9e915 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -117,7 +117,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -143,7 +143,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
struct bkey_i *update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
ret = PTR_ERR_OR_ZERO(update);
if (ret) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
break;
}
@@ -154,7 +154,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, update,
BTREE_UPDATE_internal_snapshot_node);
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
break;
@@ -268,18 +268,16 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
struct bkey_i *insert,
enum btree_iter_update_trigger_flags flags)
{
- struct btree_iter iter;
- struct bkey_s_c k;
enum btree_id btree_id = orig_iter->btree_id;
- int ret = 0;
- bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
- BTREE_ITER_intent|
- BTREE_ITER_with_updates|
- BTREE_ITER_not_extents);
- k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
- if ((ret = bkey_err(k)))
- goto err;
+ CLASS(btree_iter, iter)(trans, btree_id, bkey_start_pos(&insert->k),
+ BTREE_ITER_intent|
+ BTREE_ITER_with_updates|
+ BTREE_ITER_not_extents);
+ struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ int ret = bkey_err(k);
+ if (ret)
+ return ret;
if (!k.k)
goto out;
@@ -287,7 +285,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
ret = extent_front_merge(trans, &iter, k, &insert, flags);
if (ret)
- goto err;
+ return ret;
}
goto next;
@@ -298,15 +296,15 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
if (ret)
- goto err;
+ return ret;
if (done)
goto out;
next:
- bch2_btree_iter_advance(trans, &iter);
- k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
+ bch2_btree_iter_advance(&iter);
+ k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
- goto err;
+ return ret;
if (!k.k)
goto out;
}
@@ -314,15 +312,12 @@ next:
if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
ret = extent_back_merge(trans, &iter, insert, k);
if (ret)
- goto err;
+ return ret;
}
out:
- if (!bkey_deleted(&insert->k))
- ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
-err:
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
+ return !bkey_deleted(&insert->k)
+ ? bch2_btree_insert_nonextent(trans, btree_id, insert, flags)
+ : 0;
}
static inline struct btree_insert_entry *
@@ -594,13 +589,13 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
enum btree_id btree, struct bpos end)
{
bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
int ret = bkey_err(k);
if (ret)
goto err;
- bch2_btree_iter_advance(trans, iter);
- k = bch2_btree_iter_peek_slot(trans, iter);
+ bch2_btree_iter_advance(iter);
+ k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -614,7 +609,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
return 0;
err:
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -629,29 +624,21 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
enum btree_id btree, struct bkey_i *k,
enum btree_iter_update_trigger_flags flags)
{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, btree, k->k.p,
- BTREE_ITER_cached|
- BTREE_ITER_not_extents|
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ CLASS(btree_iter, iter)(trans, btree, k->k.p,
+ BTREE_ITER_cached|
+ BTREE_ITER_not_extents|
+ BTREE_ITER_intent);
+ return bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, k, flags);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
}
-int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
+int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id btree,
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
- BTREE_ITER_intent|flags);
- int ret = bch2_btree_iter_traverse(trans, &iter) ?:
- bch2_trans_update(trans, &iter, k, flags);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ CLASS(btree_iter, iter)(trans, btree, bkey_start_pos(&k->k),
+ BTREE_ITER_intent|flags);
+ return bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, k, flags);
}
/**
@@ -693,31 +680,25 @@ int bch2_btree_delete(struct btree_trans *trans,
enum btree_id btree, struct bpos pos,
enum btree_iter_update_trigger_flags flags)
{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, btree, pos,
- BTREE_ITER_cached|
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ CLASS(btree_iter, iter)(trans, btree, pos,
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
+ return bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter, flags);
- bch2_trans_iter_exit(trans, &iter);
-
- return ret;
}
-int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
+int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id btree,
struct bpos start, struct bpos end,
enum btree_iter_update_trigger_flags flags,
u64 *journal_seq)
{
u32 restart_count = trans->restart_count;
- struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
- bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent|flags);
- while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) {
+ CLASS(btree_iter, iter)(trans, btree, start, BTREE_ITER_intent|flags);
+
+ while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete;
@@ -767,7 +748,6 @@ err:
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &iter);
return ret ?: trans_was_restarted(trans, restart_count);
}
@@ -808,13 +788,10 @@ int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter,
int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
struct bpos pos, bool set)
{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(trans, &iter) ?:
- bch2_btree_bit_mod_iter(trans, &iter, set);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_bit_mod_iter(trans, &iter, set);
}
int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 633de3b3ac28..6790e0254a63 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -382,7 +382,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
? ERR_CAST(k.k)
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
if (IS_ERR(ret))
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -409,7 +409,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
ret = bch2_trans_update(trans, iter, mut, flags);
if (ret) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ERR_PTR(ret);
}
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 312ef203b27b..5f4f82967105 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -14,6 +14,7 @@
#include "btree_locking.h"
#include "buckets.h"
#include "clock.h"
+#include "disk_groups.h"
#include "enumerated_ref.h"
#include "error.h"
#include "extents.h"
@@ -277,6 +278,36 @@ static void bch2_btree_node_free_never_used(struct btree_update *as,
bch2_trans_node_drop(trans, b);
}
+static bool can_use_btree_node(struct bch_fs *c,
+ struct disk_reservation *res,
+ unsigned target,
+ struct bkey_s_c k)
+{
+ if (!bch2_bkey_devs_rw(c, k))
+ return false;
+
+ if (target && !bch2_bkey_in_target(c, k, target))
+ return false;
+
+ unsigned durability = bch2_bkey_durability(c, k);
+
+ if (durability >= res->nr_replicas)
+ return true;
+
+ struct bch_devs_mask devs = target_rw_devs(c, BCH_DATA_btree, target);
+
+ guard(rcu)();
+
+ unsigned durability_available = 0, i;
+ for_each_set_bit(i, devs.d, BCH_SB_MEMBERS_MAX) {
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, i);
+ if (ca)
+ durability_available += ca->mi.durability;
+ }
+
+ return durability >= durability_available;
+}
+
static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct disk_reservation *res,
struct closure *cl,
@@ -303,10 +334,14 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
mutex_lock(&c->btree_reserve_cache_lock);
if (c->btree_reserve_cache_nr > nr_reserve) {
for (struct btree_alloc *a = c->btree_reserve_cache;
- a < c->btree_reserve_cache + c->btree_reserve_cache_nr;
- a++) {
- if (target && !bch2_bkey_in_target(c, bkey_i_to_s_c(&a->k), target))
+ a < c->btree_reserve_cache + c->btree_reserve_cache_nr;) {
+ /* check if it has sufficient durability */
+
+ if (!can_use_btree_node(c, res, target, bkey_i_to_s_c(&a->k))) {
+ bch2_open_buckets_put(c, &a->ob);
+ *a = c->btree_reserve_cache[--c->btree_reserve_cache_nr];
continue;
+ }
bkey_copy(&b->key, &a->k);
b->ob = a->ob;
@@ -2031,7 +2066,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
sib_path = bch2_path_get(trans, btree, sib_pos,
U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
- ret = bch2_btree_path_traverse(trans, sib_path, false);
+ ret = bch2_btree_path_traverse(trans, sib_path, 0);
if (ret)
goto err;
@@ -2185,7 +2220,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(trans, iter);
+ int ret = bch2_btree_iter_traverse(iter);
if (ret)
goto err;
@@ -2200,7 +2235,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
BUG_ON(!btree_node_hashed(b));
return 0;
err:
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -2280,7 +2315,7 @@ int bch2_btree_node_rewrite_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter,
btree, k->k.p,
BTREE_MAX_DEPTH, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto out;
@@ -2290,7 +2325,7 @@ int bch2_btree_node_rewrite_key(struct btree_trans *trans,
? bch2_btree_node_rewrite(trans, &iter, b, 0, flags)
: -ENOENT;
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -2305,14 +2340,14 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans,
/* Traverse one depth lower to get a pointer to the node itself: */
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0);
- struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
ret = bch2_btree_node_rewrite(trans, &iter, b, target, flags);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -2326,7 +2361,7 @@ int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *trans,
return ret == -BCH_ERR_btree_node_dying ? 0 : ret;
ret = bch2_btree_node_rewrite(trans, &iter, b, 0, flags);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -2449,7 +2484,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bool skip_triggers)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter2 = {};
+ struct btree_iter iter2 = { NULL };
struct btree *parent;
int ret;
@@ -2473,7 +2508,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
- bch2_trans_copy_iter(trans, &iter2, iter);
+ bch2_trans_copy_iter(&iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_intent,
@@ -2487,7 +2522,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->paths_sorted = false;
- ret = bch2_btree_iter_traverse(trans, &iter2) ?:
+ ret = bch2_btree_iter_traverse(&iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
if (ret)
goto err;
@@ -2527,7 +2562,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b);
out:
- bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_iter_exit(&iter2);
return ret;
err:
if (new_hash) {
@@ -2598,7 +2633,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
commit_flags, skip_triggers);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 9cfc3edce39a..afad11831e1d 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -145,7 +145,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
- ret = bch2_btree_iter_traverse(trans, iter);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
return ret;
@@ -203,19 +203,14 @@ static int
btree_write_buffered_insert(struct btree_trans *trans,
struct btree_write_buffered_key *wb)
{
- struct btree_iter iter;
- int ret;
-
- bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
- BTREE_ITER_cached|BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, wb->btree, bkey_start_pos(&wb->k.k),
+ BTREE_ITER_cached|BTREE_ITER_intent);
trans->journal_res.seq = wb->journal_seq;
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ return bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, &wb->k,
BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
}
static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
@@ -285,7 +280,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_iter iter = {};
+ struct btree_iter iter = { NULL };
size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
bool write_locked = false;
bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
@@ -366,7 +361,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
write_locked = false;
ret = lockrestart_do(trans,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ bch2_btree_iter_traverse(&iter) ?:
bch2_foreground_maybe_merge(trans, iter.path, 0,
BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_journal_reclaim|
@@ -378,12 +373,12 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
}
if (!iter.path || iter.btree_id != k->btree) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
}
- bch2_btree_iter_set_pos(trans, &iter, k->k.k.p);
+ bch2_btree_iter_set_pos(&iter, k->k.k.p);
btree_iter_path(trans, &iter)->preserve = false;
bool accounting_accumulated = false;
@@ -412,7 +407,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
struct btree_path *path = btree_iter_path(trans, &iter);
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
goto err;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 5aab527e3e7c..0a357005e9e8 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -372,11 +372,11 @@ found:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, new,
BTREE_UPDATE_internal_snapshot_node|
BTREE_TRIGGER_norun);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -694,7 +694,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
acc.replicas.data_type = data_type;
ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -995,7 +995,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index ccedc93fe0ef..01838a3a189d 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -258,11 +258,10 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
struct bch_write_op *op)
{
struct bch_fs *c = op->c;
- struct btree_iter iter;
struct data_update *m = container_of(op, struct data_update, op);
int ret = 0;
- bch2_trans_iter_init(trans, &iter, m->btree_id,
+ CLASS(btree_iter, iter)(trans, m->btree_id,
bkey_start_pos(&bch2_keylist_front(&op->insert_keys)->k),
BTREE_ITER_slots|BTREE_ITER_intent);
@@ -283,7 +282,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -456,7 +455,7 @@ restart_drop_extra_replicas:
if (ret)
goto err;
- bch2_btree_iter_set_pos(trans, &iter, next_pos);
+ bch2_btree_iter_set_pos(&iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size);
if (trace_io_move_finish_enabled())
@@ -483,11 +482,10 @@ nowork:
count_event(c, io_move_fail);
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
goto next;
}
out:
- bch2_trans_iter_exit(trans, &iter);
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
}
@@ -553,10 +551,10 @@ int bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots);
ret = lockrestart_do(trans, ({
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
bkey_err(k);
}));
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
break;
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index dd60c47528da..2abeb1a7c9d3 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -214,11 +214,13 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
struct qstr d_name = bch2_dirent_get_name(d);
- prt_printf(out, "%.*s", d_name.len, d_name.name);
+ prt_bytes(out, d_name.name, d_name.len);
if (d.v->d_casefold) {
+ prt_str(out, " (casefold ");
struct qstr d_name = bch2_dirent_get_lookup_name(d);
- prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name);
+ prt_bytes(out, d_name.name, d_name.len);
+ prt_char(out, ')');
}
prt_str(out, " ->");
@@ -404,8 +406,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct qstr src_name_lookup, dst_name_lookup;
- struct btree_iter src_iter = {};
- struct btree_iter dst_iter = {};
+ struct btree_iter src_iter = { NULL };
+ struct btree_iter dst_iter = { NULL };
struct bkey_s_c old_src, old_dst = bkey_s_c_null;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
struct bpos dst_pos =
@@ -565,16 +567,16 @@ out_set_src:
}
if (delete_src) {
- bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
- ret = bch2_btree_iter_traverse(trans, &src_iter) ?:
+ bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(&src_iter) ?:
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
if (delete_dst) {
- bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
- ret = bch2_btree_iter_traverse(trans, &dst_iter) ?:
+ bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(&dst_iter) ?:
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
@@ -584,8 +586,8 @@ out_set_src:
*src_offset = new_src->k.p.offset;
*dst_offset = new_dst->k.p.offset;
out:
- bch2_trans_iter_exit(trans, &src_iter);
- bch2_trans_iter_exit(trans, &dst_iter);
+ bch2_trans_iter_exit(&src_iter);
+ bch2_trans_iter_exit(&dst_iter);
return ret;
}
@@ -612,7 +614,7 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans,
ret = -ENOENT;
err:
if (ret)
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -625,7 +627,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
int ret = lockrestart_do(trans,
bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -645,7 +647,7 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32
ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -735,31 +737,28 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
ret = bch_err_throw(trans->c, ENOENT_inode);
found:
bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
struct bch_inode_unpacked dir_inode;
struct bch_hash_info dir_hash_info;
- int ret;
- ret = lookup_first_inode(trans, pos.inode, &dir_inode);
+ int ret = lookup_first_inode(trans, pos.inode, &dir_inode);
if (ret)
goto err;
dir_hash_info = bch2_hash_info_init(c, &dir_inode);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, pos, BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ ret = bch2_btree_iter_traverse(&iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash_info, &iter,
BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter);
err:
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index 219e37738aee..f96530c70262 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -778,12 +778,13 @@ int bch2_accounting_read(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
continue;
}
accounting_read_key(trans, k);
}));
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -965,7 +966,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
continue;
}
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 62dda821247e..e735b1e9b275 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -800,7 +800,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
}
bkey_reassemble(&stripe->key, k);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -967,7 +967,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1)
ret = bch2_btree_delete_at(trans, &iter, 0);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1063,7 +1063,7 @@ static int ec_stripe_key_update(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1087,7 +1087,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
if (bp.v->level) {
struct btree_iter node_iter;
struct btree *b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed);
- bch2_trans_iter_exit(trans, &node_iter);
+ bch2_trans_iter_exit(&node_iter);
if (!b)
return 0;
@@ -1149,7 +1149,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, n, 0);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1809,9 +1809,9 @@ static int __get_existing_stripe(struct btree_trans *trans,
ret = 1;
}
out:
- bch2_set_btree_iter_dontneed(trans, &iter);
+ bch2_set_btree_iter_dontneed(&iter);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1883,7 +1883,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &lru_iter);
+ bch2_trans_iter_exit(&lru_iter);
if (!ret)
ret = bch_err_throw(c, stripe_alloc_blocked);
if (ret == 1)
@@ -1922,7 +1922,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
- bch2_btree_iter_set_pos(trans, &iter, start_pos);
+ bch2_btree_iter_set_pos(&iter, start_pos);
continue;
}
@@ -1948,7 +1948,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
s->new_stripe.key.k.p = iter.pos;
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
err:
bch2_disk_reservation_put(c, &s->res);
@@ -2060,6 +2060,9 @@ allocated:
BUG_ON(trans->restarted);
return h;
err:
+ if (waiting &&
+ !bch2_err_matches(ret, BCH_ERR_operation_blocked))
+ closure_wake_up(&c->freelist_wait);
bch2_ec_stripe_head_put(c, h);
return ERR_PTR(ret);
}
@@ -2152,7 +2155,7 @@ static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, s
return ret;
ret = bch2_invalidate_stripe_to_dev(trans, &iter, s.s_c, k_a.k->p.inode, flags);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 2de0dc91a69e..cec8b0f47d3d 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -90,6 +90,8 @@
x(ENOMEM, ENOMEM_disk_accounting) \
x(ENOMEM, ENOMEM_stripe_head_alloc) \
x(ENOMEM, ENOMEM_journal_read_bucket) \
+ x(ENOMEM, ENOMEM_acl) \
+ x(ENOMEM, ENOMEM_move_extent) \
x(ENOSPC, ENOSPC_disk_reservation) \
x(ENOSPC, ENOSPC_bucket_alloc) \
x(ENOSPC, ENOSPC_disk_label_add) \
@@ -216,9 +218,13 @@
x(EINVAL, varint_decode_error) \
x(EINVAL, erasure_coding_found_btree_node) \
x(EINVAL, option_negative) \
+ x(EINVAL, topology_repair) \
+ x(BCH_ERR_topology_repair, topology_repair_drop_this_node) \
+ x(BCH_ERR_topology_repair, topology_repair_drop_prev_node) \
+ x(BCH_ERR_topology_repair, topology_repair_did_fill_from_scan) \
x(EOPNOTSUPP, may_not_use_incompat_feature) \
x(EOPNOTSUPP, no_casefolding_without_utf8) \
- x(EOPNOTSUPP, casefolding_disabled) \
+ x(EOPNOTSUPP, casefolding_disabled) \
x(EOPNOTSUPP, casefold_opt_is_dir_only) \
x(EOPNOTSUPP, unsupported_fsx_flag) \
x(EOPNOTSUPP, unsupported_fa_flag) \
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index e76e58a568bf..0c1f6f2ec02c 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -92,7 +92,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
break;
}
@@ -108,14 +108,14 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
unsigned nr_iters = 0;
struct btree_iter copy;
- bch2_trans_copy_iter(trans, &copy, iter);
+ bch2_trans_copy_iter(&copy, iter);
- int ret = bch2_btree_iter_traverse(trans, &copy);
+ int ret = bch2_btree_iter_traverse(&copy);
if (ret)
goto err;
struct bkey_s_c k;
- for_each_btree_key_max_continue_norestart(trans, copy, *end, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(copy, *end, 0, k, ret) {
unsigned offset = 0;
if (bkey_gt(iter->pos, bkey_start_pos(k.k)))
@@ -126,7 +126,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
break;
}
err:
- bch2_trans_iter_exit(trans, &copy);
+ bch2_trans_iter_exit(&copy);
return ret < 0 ? ret : 0;
}
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index b36ecfc0ab9d..b879a586b7f6 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -282,9 +282,9 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
if (have_pick)
return 1;
- if (!have_dirty_ptrs)
+ if (!have_dirty_ptrs && !bkey_is_btree_ptr(k.k))
return 0;
- if (have_missing_devs)
+ if (have_missing_devs || !have_dirty_ptrs)
return bch_err_throw(c, no_device_to_read_from);
if (have_csum_errors)
return bch_err_throw(c, data_read_csum_err);
@@ -1006,6 +1006,20 @@ const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned
return NULL;
}
+bool bch2_bkey_devs_rw(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+
+ guard(rcu)();
+ bkey_for_each_ptr(ptrs, ptr) {
+ CLASS(bch2_dev_tryget, ca)(c, ptr->dev);
+ if (!ca || ca->mi.state != BCH_MEMBER_STATE_rw)
+ return false;
+ }
+
+ return true;
+}
+
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index f212f91c278d..35ee03cd5065 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -614,6 +614,8 @@ static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsig
return (void *) bch2_bkey_has_device_c(k.s_c, dev);
}
+bool bch2_bkey_devs_rw(struct bch_fs *, struct bkey_s_c);
+
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
bool bch2_bkey_in_target(struct bch_fs *, struct bkey_s_c, unsigned);
diff --git a/fs/bcachefs/fast_list.h b/fs/bcachefs/fast_list.h
index 73c9bf591fd6..f67df3f72ee2 100644
--- a/fs/bcachefs/fast_list.h
+++ b/fs/bcachefs/fast_list.h
@@ -9,7 +9,7 @@ struct fast_list_pcpu;
struct fast_list {
GENRADIX(void *) items;
- struct ida slots_allocated;;
+ struct ida slots_allocated;
struct fast_list_pcpu __percpu
*buffer;
};
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index f2389054693a..0005569ecace 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -157,7 +157,6 @@ static void bchfs_read(struct btree_trans *trans,
struct readpages_iter *readpages_iter)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
struct bkey_buf sk;
int flags = BCH_READ_retry_if_stale|
BCH_READ_may_promote;
@@ -167,7 +166,7 @@ static void bchfs_read(struct btree_trans *trans,
bch2_bkey_buf_init(&sk);
bch2_trans_begin(trans);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents,
POS(inum.inum, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_slots);
while (1) {
@@ -183,12 +182,12 @@ static void bchfs_read(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(trans, &iter,
+ bch2_btree_iter_set_pos(&iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -251,7 +250,6 @@ err:
!bch2_err_matches(ret, BCH_ERR_transaction_restart))
break;
}
- bch2_trans_iter_exit(trans, &iter);
if (ret) {
CLASS(printbuf, buf)();
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 73d44875faf2..8d5b2468f4cd 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -127,7 +127,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
* the dirtying of requests that are internal from the kernel (i.e. from
* loopback), because we'll deadlock on page_lock.
*/
- dio->should_dirty = iter_is_iovec(iter);
+ dio->should_dirty = user_backed_iter(iter);
blk_start_plug(&plug);
@@ -281,7 +281,7 @@ retry:
}
offset = iter.pos.offset;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
err:
if (bch2_err_matches(err, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
index 2a6705186c44..469492f6264a 100644
--- a/fs/bcachefs/fs-io-pagecache.c
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -635,6 +635,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
goto out;
}
+ inode->ei_last_dirtied = (unsigned long) current;
+
bch2_set_folio_dirty(c, inode, folio, &res, offset, len);
bch2_folio_reservation_put(c, inode, &res);
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 93ad33f0953a..de0d965f3fde 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -206,7 +206,7 @@ static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_in
ret = bch2_inode_write(trans, &iter, &u);
}
fsck_err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -626,15 +626,14 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
u64 start_sector, u64 end_sector)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
struct bpos end_pos = POS(inode->v.i_ino, end_sector);
struct bch_io_opts opts;
int ret = 0;
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ CLASS(btree_trans, trans)(c);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents,
POS(inode->v.i_ino, start_sector),
BTREE_ITER_slots|BTREE_ITER_intent);
@@ -657,9 +656,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
if ((ret = bkey_err(k)))
goto bkey_err;
@@ -670,13 +669,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
continue;
}
@@ -697,7 +696,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
}
- bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start));
+ bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
if (ret)
goto bkey_err;
@@ -747,7 +746,6 @@ bkey_err:
bch2_quota_reservation_put(c, inode, &quota_res);
}
- bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 2789b30add10..3b289f696612 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -141,7 +141,7 @@ retry:
if (!ret)
bch2_inode_update_after_write(trans, inode, &inode_u, fields);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
@@ -692,7 +692,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
if (ret)
goto err;
out:
- bch2_trans_iter_exit(trans, &dirent_iter);
+ bch2_trans_iter_exit(&dirent_iter);
return inode;
err:
inode = ERR_PTR(ret);
@@ -1131,7 +1131,7 @@ retry:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc);
btree_err:
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
@@ -1295,8 +1295,14 @@ static int bch2_fill_extent(struct bch_fs *c,
flags|
FIEMAP_EXTENT_DELALLOC|
FIEMAP_EXTENT_UNWRITTEN);
+ } else if (k.k->type == KEY_TYPE_error) {
+ return 0;
} else {
- BUG();
+ WARN_ONCE(1, "unhandled key type %s",
+ k.k->type < KEY_TYPE_MAX
+ ? bch2_bkey_types[k.k->type]
+ : "(unknown)");
+ return 0;
}
}
@@ -1391,21 +1397,20 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans,
if (ret)
return ret;
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(inode->ei_inum.inum, start, snapshot), 0);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents,
+ SPOS(inode->ei_inum.inum, start, snapshot), 0);
struct bkey_s_c k =
- bch2_btree_iter_peek_max(trans, &iter, POS(inode->ei_inum.inum, end));
+ bch2_btree_iter_peek_max(&iter, POS(inode->ei_inum.inum, end));
ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
u64 pagecache_end = k.k ? max(start, bkey_start_offset(k.k)) : end;
ret = bch2_next_fiemap_pagecache_extent(trans, inode, start, pagecache_end, cur);
if (ret)
- goto err;
+ return ret;
struct bpos pagecache_start = bkey_start_pos(&cur->kbuf.k->k);
@@ -1441,7 +1446,7 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans,
ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent,
&cur->kbuf);
if (ret)
- goto err;
+ return ret;
struct bkey_i *k = cur->kbuf.k;
sectors = min_t(unsigned, sectors, k->k.size - offset_into_extent);
@@ -1453,9 +1458,8 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans,
k->k.p = iter.pos;
k->k.p.offset += k->k.size;
}
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+
+ return 0;
}
static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
@@ -1942,8 +1946,6 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child
struct bch_inode_info *inode = to_bch_ei(child->d_inode);
struct bch_inode_info *dir = to_bch_ei(parent->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter iter1;
- struct btree_iter iter2;
struct bkey_s_c k;
struct bkey_s_c_dirent d;
struct bch_inode_unpacked inode_u;
@@ -1957,10 +1959,10 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child
return -EINVAL;
CLASS(btree_trans, trans)(c);
- bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents,
- POS(dir->ei_inode.bi_inum, 0), 0);
- bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents,
- POS(dir->ei_inode.bi_inum, 0), 0);
+ CLASS(btree_iter, iter1)(trans, BTREE_ID_dirents,
+ POS(dir->ei_inode.bi_inum, 0), 0);
+ CLASS(btree_iter, iter2)(trans, BTREE_ID_dirents,
+ POS(dir->ei_inode.bi_inum, 0), 0);
retry:
bch2_trans_begin(trans);
@@ -1968,17 +1970,17 @@ retry:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &iter1, snapshot);
- bch2_btree_iter_set_snapshot(trans, &iter2, snapshot);
+ bch2_btree_iter_set_snapshot(&iter1, snapshot);
+ bch2_btree_iter_set_snapshot(&iter2, snapshot);
ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
if (ret)
goto err;
if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
- bch2_btree_iter_set_pos(trans, &iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
+ bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
- k = bch2_btree_iter_peek_slot(trans, &iter1);
+ k = bch2_btree_iter_peek_slot(&iter1);
ret = bkey_err(k);
if (ret)
goto err;
@@ -2002,7 +2004,7 @@ retry:
* File with multiple hardlinks and our backref is to the wrong
* directory - linear search:
*/
- for_each_btree_key_continue_norestart(trans, iter2, 0, k, ret) {
+ for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
if (k.k->p.inode > dir->ei_inode.bi_inum)
break;
@@ -2033,8 +2035,6 @@ err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- bch2_trans_iter_exit(trans, &iter1);
- bch2_trans_iter_exit(trans, &iter2);
return ret;
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index df0aa2522b18..f971e6993f2b 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -15,6 +15,7 @@
#include "io_misc.h"
#include "keylist.h"
#include "namei.h"
+#include "progress.h"
#include "recovery_passes.h"
#include "snapshot.h"
#include "super.h"
@@ -125,7 +126,7 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans,
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
*target = le64_to_cpu(d.v->d_inum);
*type = d.v->d_type;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return 0;
}
@@ -155,7 +156,7 @@ static int find_snapshot_tree_subvol(struct btree_trans *trans,
}
ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol);
found:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -166,7 +167,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
- struct btree_iter lostfound_iter = {};
+ struct btree_iter lostfound_iter = { NULL };
u64 inum = 0;
unsigned d_type = 0;
int ret;
@@ -201,7 +202,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
return ret;
subvol->v.inode = cpu_to_le64(reattaching_inum);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
}
subvol_inum root_inum = {
@@ -274,8 +275,8 @@ create_lostfound:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
- ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
+ bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
+ ret = bch2_btree_iter_traverse(&lostfound_iter);
if (ret)
goto err;
@@ -291,7 +292,7 @@ create_lostfound:
BTREE_UPDATE_internal_snapshot_node);
err:
bch_err_msg(c, ret, "creating lost+found");
- bch2_trans_iter_exit(trans, &lostfound_iter);
+ bch2_trans_iter_exit(&lostfound_iter);
return ret;
}
@@ -357,7 +358,7 @@ static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32
ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node);
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -382,7 +383,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *
return ret;
subvol->v.fs_path_parent = BCACHEFS_ROOT_SUBVOL;
- bch2_trans_iter_exit(trans, &subvol_iter);
+ bch2_trans_iter_exit(&subvol_iter);
u64 root_inum;
ret = subvol_lookup(trans, inode->bi_parent_subvol,
@@ -497,7 +498,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
}
return ret;
@@ -531,7 +532,7 @@ static int remove_backpointer(struct btree_trans *trans,
int ret = bkey_err(d) ?:
dirent_points_to_inode(c, d, inode) ?:
bch2_fsck_remove_dirent(trans, d.k->p);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -582,9 +583,9 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
new_inode.bi_subvol = subvolid;
int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
- bch2_btree_iter_traverse(trans, &inode_iter) ?:
+ bch2_btree_iter_traverse(&inode_iter) ?:
bch2_inode_write(trans, &inode_iter, &new_inode);
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
if (ret)
return ret;
@@ -619,7 +620,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
s->v.subvol = cpu_to_le32(subvolid);
SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
@@ -632,7 +633,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
if (!st->v.master_subvol)
st->v.master_subvol = cpu_to_le32(subvolid);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return 0;
}
@@ -644,11 +645,8 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32
switch (btree) {
case BTREE_ID_extents: {
- struct btree_iter iter = {};
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
- struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
- bch2_trans_iter_exit(trans, &iter);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0));
int ret = bkey_err(k);
if (ret)
return ret;
@@ -866,7 +864,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -906,7 +904,7 @@ static int get_visible_inodes(struct btree_trans *trans,
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1048,7 +1046,7 @@ static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
struct btree_iter iter;
struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1106,7 +1104,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans,
out:
ret = 0;
fsck_err:
- bch2_trans_iter_exit(trans, &dirent_iter);
+ bch2_trans_iter_exit(&dirent_iter);
bch_err_fn(c, ret);
return ret;
}
@@ -1331,11 +1329,16 @@ int bch2_check_inodes(struct bch_fs *c)
CLASS(btree_trans, trans)(c);
CLASS(snapshots_seen, s)();
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_inodes));
+
return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
POS_MIN,
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_inode(trans, &iter, k, &snapshot_root, &s));
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ check_inode(trans, &iter, k, &snapshot_root, &s);
+ }));
}
static int find_oldest_inode_needs_reattach(struct btree_trans *trans,
@@ -1374,7 +1377,7 @@ static int find_oldest_inode_needs_reattach(struct btree_trans *trans,
*inode = parent_inode;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1422,12 +1425,17 @@ fsck_err:
*/
int bch2_check_unreachable_inodes(struct bch_fs *c)
{
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_inodes));
+
CLASS(btree_trans, trans)(c);
return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
POS_MIN,
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_unreachable_inode(trans, &iter, k));
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ check_unreachable_inode(trans, &iter, k);
+ }));
}
static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode)
@@ -1546,7 +1554,7 @@ static int check_key_has_inode(struct btree_trans *trans,
out:
err:
fsck_err:
- bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_iter_exit(&iter2);
bch_err_fn(c, ret);
return ret;
delete:
@@ -1583,7 +1591,7 @@ static int maybe_reconstruct_inum_btree(struct btree_trans *trans,
ret = 1;
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret <= 0)
return ret;
@@ -1729,16 +1737,16 @@ static int overlapping_extents_found(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
CLASS(printbuf, buf)();
- struct btree_iter iter1, iter2 = {};
+ struct btree_iter iter2 = {};
struct bkey_s_c k1, k2;
int ret;
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
- bch2_trans_iter_init(trans, &iter1, btree, pos1,
- BTREE_ITER_all_snapshots|
- BTREE_ITER_not_extents);
- k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
+ CLASS(btree_iter, iter1)(trans, btree, pos1,
+ BTREE_ITER_all_snapshots|
+ BTREE_ITER_not_extents);
+ k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX));
ret = bkey_err(k1);
if (ret)
goto err;
@@ -1758,12 +1766,12 @@ static int overlapping_extents_found(struct btree_trans *trans,
goto err;
}
- bch2_trans_copy_iter(trans, &iter2, &iter1);
+ bch2_trans_copy_iter(&iter2, &iter1);
while (1) {
- bch2_btree_iter_advance(trans, &iter2);
+ bch2_btree_iter_advance(&iter2);
- k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
+ k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX));
ret = bkey_err(k2);
if (ret)
goto err;
@@ -1832,8 +1840,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
}
fsck_err:
err:
- bch2_trans_iter_exit(trans, &iter2);
- bch2_trans_iter_exit(trans, &iter1);
+ bch2_trans_iter_exit(&iter2);
return ret;
}
@@ -1961,11 +1968,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
"extent type past end of inode %llu:%u, i_size %llu\n%s",
i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = bch2_fpunch_snapshot(trans,
- SPOS(i->inode.bi_inum,
- last_block,
- i->inode.bi_snapshot),
- POS(i->inode.bi_inum, U64_MAX));
+ ret = snapshots_seen_add_inorder(c, s, i->inode.bi_snapshot) ?:
+ bch2_fpunch_snapshot(trans,
+ SPOS(i->inode.bi_inum,
+ last_block,
+ i->inode.bi_snapshot),
+ POS(i->inode.bi_inum, U64_MAX));
if (ret)
goto err;
@@ -1975,6 +1983,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
}
}
+ ret = check_extent_overbig(trans, iter, k);
+ if (ret)
+ goto err;
+
ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
if (ret)
goto err;
@@ -2017,12 +2029,15 @@ int bch2_check_extents(struct bch_fs *c)
CLASS(inode_walker, w)();
CLASS(extent_ends, extent_ends)();
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_extents));
+
int ret = for_each_btree_key(trans, iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
+ progress_update_iter(trans, &progress, &iter);
bch2_disk_reservation_put(c, &res);
- check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
- check_extent_overbig(trans, &iter, k);
+ check_extent(trans, &iter, k, &w, &s, &extent_ends, &res);
})) ?:
check_i_sectors_notnested(trans, &w);
@@ -2035,11 +2050,15 @@ int bch2_check_indirect_extents(struct bch_fs *c)
CLASS(btree_trans, trans)(c);
struct disk_reservation res = { 0 };
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_reflink));
+
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
POS_MIN,
BTREE_ITER_prefetch, k,
&res, NULL,
BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
bch2_disk_reservation_put(c, &res);
check_extent_overbig(trans, &iter, k);
}));
@@ -2113,7 +2132,7 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
*subvolid = k.k->p.offset;
goto found;
}
@@ -2121,7 +2140,7 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su
if (!ret)
ret = -ENOENT;
found:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -2260,7 +2279,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
out:
err:
fsck_err:
- bch2_trans_iter_exit(trans, &subvol_iter);
+ bch2_trans_iter_exit(&subvol_iter);
return ret;
}
@@ -2401,17 +2420,15 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
- struct btree_iter delete_iter;
- bch2_trans_iter_init(trans, &delete_iter,
+ CLASS(btree_iter, delete_iter)(trans,
BTREE_ID_dirents,
SPOS(k.k->p.inode, k.k->p.offset, *i),
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(trans, &delete_iter) ?:
+ ret = bch2_btree_iter_traverse(&delete_iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
hash_info,
&delete_iter,
BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &delete_iter);
if (ret)
return ret;
@@ -2448,15 +2465,20 @@ int bch2_check_dirents(struct bch_fs *c)
CLASS(snapshots_seen, s)();
CLASS(inode_walker, dir)();
CLASS(inode_walker, target)();
+ struct progress_indicator_state progress;
bool need_second_pass = false, did_second_pass = false;
int ret;
again:
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_dirents));
+
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s,
- &need_second_pass)) ?:
+ &need_second_pass);
+ })) ?:
check_subdir_count_notnested(trans, &dir);
if (!ret && need_second_pass && !did_second_pass) {
@@ -2516,13 +2538,18 @@ int bch2_check_xattrs(struct bch_fs *c)
CLASS(btree_trans, trans)(c);
CLASS(inode_walker, inode)();
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_xattrs));
+
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
k,
NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- check_xattr(trans, &iter, k, &hash_info, &inode));
+ BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ check_xattr(trans, &iter, k, &hash_info, &inode);
+ }));
return ret;
}
@@ -2595,7 +2622,6 @@ int bch2_check_root(struct bch_fs *c)
static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
- struct btree_iter parent_iter = {};
CLASS(darray_u32, subvol_path)();
CLASS(printbuf, buf)();
int ret = 0;
@@ -2603,6 +2629,8 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
if (k.k->type != KEY_TYPE_subvolume)
return 0;
+ CLASS(btree_iter, parent_iter)(trans, BTREE_ID_subvolumes, POS_MIN, 0);
+
subvol_inum start = {
.subvol = k.k->p.offset,
.inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode),
@@ -2611,7 +2639,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) {
ret = darray_push(&subvol_path, k.k->p.offset);
if (ret)
- goto err;
+ return ret;
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
@@ -2630,20 +2658,18 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
ret = bch2_inum_to_path(trans, start, &buf);
if (ret)
- goto err;
+ return ret;
if (fsck_err(trans, subvol_loop, "%s", buf.buf))
ret = reattach_subvol(trans, s);
break;
}
- bch2_trans_iter_exit(trans, &parent_iter);
- bch2_trans_iter_init(trans, &parent_iter,
- BTREE_ID_subvolumes, POS(0, parent), 0);
- k = bch2_btree_iter_peek_slot(trans, &parent_iter);
+ bch2_btree_iter_set_pos(&parent_iter, POS(0, parent));
+ k = bch2_btree_iter_peek_slot(&parent_iter);
ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
if (fsck_err_on(k.k->type != KEY_TYPE_subvolume,
trans, subvol_unreachable,
@@ -2651,23 +2677,26 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, s.s_c),
buf.buf))) {
- ret = reattach_subvol(trans, s);
- break;
+ return reattach_subvol(trans, s);
}
}
fsck_err:
-err:
- bch2_trans_iter_exit(trans, &parent_iter);
return ret;
}
int bch2_check_subvolume_structure(struct bch_fs *c)
{
CLASS(btree_trans, trans)(c);
+
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_subvolumes));
+
return for_each_btree_key_commit(trans, iter,
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- check_subvol_path(trans, &iter, k));
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ check_subvol_path(trans, &iter, k);
+ }));
}
static int bch2_bi_depth_renumber_one(struct btree_trans *trans,
@@ -2691,7 +2720,7 @@ static int bch2_bi_depth_renumber_one(struct btree_trans *trans,
bch2_trans_commit(trans, NULL, NULL, 0);
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -2749,7 +2778,7 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k)
goto out;
if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
- bch2_trans_iter_exit(trans, &dirent_iter);
+ bch2_trans_iter_exit(&dirent_iter);
if (bch2_err_matches(ret, ENOENT)) {
printbuf_reset(&buf);
@@ -2759,13 +2788,13 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k)
goto out;
}
- bch2_trans_iter_exit(trans, &dirent_iter);
+ bch2_trans_iter_exit(&dirent_iter);
ret = darray_push(&path, inode.bi_inum);
if (ret)
return ret;
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
SPOS(0, inode.bi_dir, snapshot), 0);
@@ -2824,7 +2853,7 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k)
ret = bch2_bi_depth_renumber(trans, &path, snapshot, min_bi_depth);
out:
fsck_err:
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
bch_err_fn(c, ret);
return ret;
}
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 4a9725f30c4f..838da956b4e1 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -364,7 +364,7 @@ int __bch2_inode_peek(struct btree_trans *trans,
err:
if (warn)
bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -384,7 +384,7 @@ int bch2_inode_find_by_inum_snapshot(struct btree_trans *trans,
? bch2_inode_unpack(k, inode)
: -BCH_ERR_ENOENT_inode;
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -397,7 +397,7 @@ int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans,
ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0);
if (!ret)
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -410,7 +410,7 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans,
ret = bch2_inode_peek(trans, &iter, inode, inum, 0);
if (!ret)
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -441,7 +441,7 @@ int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum,
/* We're only called when we know we have an inode for @inum */
BUG_ON(!ret);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -703,7 +703,7 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter
if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot))
return k;
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
@@ -719,7 +719,7 @@ again:
bkey_is_inode(k.k))
return k;
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
pos = k.k->p;
goto again;
}
@@ -740,7 +740,7 @@ int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
ret = 1;
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -792,7 +792,7 @@ static int update_parent_inode_has_children(struct btree_trans *trans, struct bp
bkey_inode_flags_set(bkey_i_to_s(update), f ^ BCH_INODE_has_child_snapshot);
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -998,7 +998,7 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m
le32_add_cpu(&cursor->v.gen, 1);
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret ? ERR_PTR(ret) : cursor;
}
@@ -1026,7 +1026,7 @@ int bch2_inode_create(struct btree_trans *trans,
BTREE_ITER_intent);
struct bkey_s_c k;
again:
- while ((k = bch2_btree_iter_peek(trans, iter)).k &&
+ while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset)
@@ -1043,7 +1043,7 @@ again:
* we've found just one:
*/
pos = iter->pos.offset + 1;
- bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
+ bch2_btree_iter_set_pos(iter, POS(0, pos));
}
if (!ret && pos < max)
@@ -1053,21 +1053,21 @@ again:
ret = bch_err_throw(trans->c, ENOSPC_inode_create);
if (ret) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
/* Retry from start */
pos = start = min;
- bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
+ bch2_btree_iter_set_pos(iter, POS(0, pos));
le32_add_cpu(&cursor->v.gen, 1);
goto again;
found_slot:
- bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, snapshot));
- k = bch2_btree_iter_peek_slot(trans, iter);
+ bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
+ k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret;
}
@@ -1080,7 +1080,6 @@ found_slot:
static int bch2_inode_delete_keys(struct btree_trans *trans,
subvol_inum inum, enum btree_id id)
{
- struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i delete;
struct bpos end = POS(inum.inum, U64_MAX);
@@ -1091,8 +1090,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
* We're never going to be deleting partial extents, no need to use an
* extent iterator:
*/
- bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
- BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, id, POS(inum.inum, 0), BTREE_ITER_intent);
while (1) {
bch2_trans_begin(trans);
@@ -1101,9 +1099,9 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
- k = bch2_btree_iter_peek_max(trans, &iter, end);
+ k = bch2_btree_iter_peek_max(&iter, end);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1127,7 +1125,6 @@ err:
break;
}
- bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -1184,7 +1181,7 @@ retry:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
@@ -1305,7 +1302,7 @@ int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum,
static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = {};
+ struct btree_iter iter = { NULL };
struct bkey_i_inode_generation delete;
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
@@ -1358,7 +1355,7 @@ retry:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
@@ -1383,7 +1380,7 @@ next_parent:
bool unlinked = bkey_is_unlinked_inode(k);
pos = k.k->p;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (!unlinked)
return 0;
@@ -1503,7 +1500,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos,
}
out:
fsck_err:
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
return ret;
delete:
ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false);
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 5d6681c070ba..3f9defd144a4 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -43,7 +43,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
bch2_bkey_buf_init(&new);
closure_init_stack(&cl);
- k = bch2_btree_iter_peek_slot(trans, iter);
+ k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -190,12 +190,12 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(trans, iter, snapshot);
+ bch2_btree_iter_set_snapshot(iter, snapshot);
/*
* peek_max() doesn't have ideal semantics for extents:
*/
- k = bch2_btree_iter_peek_max(trans, iter, end_pos);
+ k = bch2_btree_iter_peek_max(iter, end_pos);
if (!k.k)
break;
@@ -222,16 +222,11 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
s64 *i_sectors_delta)
{
CLASS(btree_trans, trans)(c);
-
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, start),
- BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, start),
+ BTREE_ITER_intent);
int ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
- bch2_trans_iter_exit(trans, &iter);
-
return bch2_err_matches(ret, BCH_ERR_transaction_restart) ? 0 : ret;
}
@@ -251,7 +246,7 @@ static int truncate_set_isize(struct btree_trans *trans,
u64 new_i_size,
bool warn)
{
- struct btree_iter iter = {};
+ struct btree_iter iter = { NULL };
struct bch_inode_unpacked inode_u;
int ret;
@@ -259,7 +254,7 @@ static int truncate_set_isize(struct btree_trans *trans,
(inode_u.bi_size = new_i_size, 0) ?:
bch2_inode_write(trans, &iter, &inode_u);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -268,7 +263,6 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
u64 *i_sectors_delta)
{
struct bch_fs *c = trans->c;
- struct btree_iter fpunch_iter;
struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
u64 new_i_size = le64_to_cpu(op->v.new_i_size);
@@ -280,11 +274,10 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
if (ret)
goto err;
- bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
+ CLASS(btree_iter, fpunch_iter)(trans, BTREE_ID_extents,
POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
BTREE_ITER_intent);
ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
- bch2_trans_iter_exit(trans, &fpunch_iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
ret = 0;
@@ -366,7 +359,7 @@ static int adjust_i_size(struct btree_trans *trans, subvol_inum inum,
ret = bch2_inode_write(trans, &iter, &inode_u);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -416,7 +409,7 @@ case LOGGED_OP_FINSERT_start:
if (ret)
goto err;
} else {
- bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset));
+ bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -442,12 +435,12 @@ case LOGGED_OP_FINSERT_shift_extents:
if (ret)
goto btree_err;
- bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot));
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
k = insert
- ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0))
- : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX));
+ ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0))
+ : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX));
if ((ret = bkey_err(k)))
goto btree_err;
@@ -515,7 +508,7 @@ case LOGGED_OP_FINSERT_finish:
break;
}
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (warn_errors)
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index b8ccd8c930e1..587124046ca8 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -534,7 +534,7 @@ static void get_rbio_extent(struct btree_trans *trans,
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
}
static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
@@ -576,7 +576,7 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re
if (u && !ret)
bch2_bkey_buf_copy(&u->k, c, new);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -611,7 +611,7 @@ retry:
bkey_i_to_s_c(u->k.k),
0, failed, flags, -1);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_data_read_retry))
@@ -794,7 +794,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, new,
BTREE_UPDATE_internal_snapshot_node);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1021,13 +1021,10 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
struct bch_extent_ptr ptr)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
CLASS(printbuf, buf)();
- int ret;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- PTR_BUCKET_POS(ca, &ptr),
- BTREE_ITER_cached);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_alloc,
+ PTR_BUCKET_POS(ca, &ptr),
+ BTREE_ITER_cached);
int gen = bucket_gen_get(ca, iter.pos.offset);
if (gen >= 0) {
@@ -1039,7 +1036,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
prt_printf(&buf, "memory gen: %u", gen);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter)));
+ int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (!ret) {
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
@@ -1057,8 +1054,6 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
}
bch2_fs_inconsistent(c, "%s", buf.buf);
-
- bch2_trans_iter_exit(trans, &iter);
}
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
@@ -1406,7 +1401,6 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
struct bkey_buf sk;
struct bkey_s_c k;
enum btree_id data_btree;
@@ -1415,9 +1409,9 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
EBUG_ON(rbio->data_update);
bch2_bkey_buf_init(&sk);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- POS(inum.inum, bvec_iter.bi_sector),
- BTREE_ITER_slots);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents,
+ POS(inum.inum, bvec_iter.bi_sector),
+ BTREE_ITER_slots);
while (1) {
data_btree = BTREE_ID_extents;
@@ -1429,12 +1423,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(trans, &iter,
+ bch2_btree_iter_set_pos(&iter,
POS(inum.inum, bvec_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1509,7 +1503,6 @@ err:
bch2_rbio_done(rbio);
}
- bch2_trans_iter_exit(trans, &iter);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
index 9d63d5914b20..1e1c0476bd03 100644
--- a/fs/bcachefs/io_read.h
+++ b/fs/bcachefs/io_read.h
@@ -108,12 +108,12 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans,
return ret;
if (bkey_deleted(k.k)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return bch_err_throw(c, missing_indirect_extent);
}
bch2_bkey_buf_reassemble(extent, c, k);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return 0;
}
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index d7620138e038..f71ff3193548 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -89,7 +89,12 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
new = ewma_add(old, io_latency, 5);
} while (!atomic64_try_cmpxchg(latency, &old, new));
- bch2_congested_acct(ca, io_latency, now, rw);
+ /*
+ * Only track read latency for congestion accounting: writes are subject
+ * to heavy queuing delays from page cache writeback:
+ */
+ if (rw == READ)
+ bch2_congested_acct(ca, io_latency, now, rw);
__bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
}
@@ -166,9 +171,9 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
*i_sectors_delta = 0;
*disk_sectors_delta = 0;
- bch2_trans_copy_iter(trans, &iter, extent_iter);
+ bch2_trans_copy_iter(&iter, extent_iter);
- for_each_btree_key_max_continue_norestart(trans, iter,
+ for_each_btree_key_max_continue_norestart(iter,
new->k.p, BTREE_ITER_slots, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
@@ -193,7 +198,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -290,7 +295,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
BTREE_UPDATE_internal_snapshot_node|
inode_update_flags);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -314,7 +319,7 @@ int bch2_extent_update(struct btree_trans *trans,
* path already traversed at iter->pos because
* bch2_trans_extent_update() will use it to attempt extent merging
*/
- ret = __bch2_btree_iter_traverse(trans, iter);
+ ret = __bch2_btree_iter_traverse(iter);
if (ret)
return ret;
@@ -359,7 +364,7 @@ int bch2_extent_update(struct btree_trans *trans,
if (i_sectors_delta_total)
*i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(trans, iter, next_pos);
+ bch2_btree_iter_set_pos(iter, next_pos);
return 0;
}
@@ -394,15 +399,14 @@ static int bch2_write_index_default(struct bch_write_op *op)
if (ret)
break;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- bkey_start_pos(&sk.k->k),
- BTREE_ITER_slots|BTREE_ITER_intent);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents,
+ bkey_start_pos(&sk.k->k),
+ BTREE_ITER_slots|BTREE_ITER_intent);
ret = bch2_extent_update(trans, inum, &iter, sk.k,
&op->res,
op->new_i_size, &op->i_sectors_delta,
op->flags & BCH_WRITE_check_enospc);
- bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -1340,7 +1344,7 @@ retry:
if (ret)
break;
- k = bch2_btree_iter_peek_slot(trans, &iter);
+ k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
break;
@@ -1425,10 +1429,10 @@ retry:
bch2_keylist_push(&op->insert_keys);
if (op->flags & BCH_WRITE_submitted)
break;
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
}
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 3ba1f9fd3402..07869436a964 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -182,6 +182,8 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
void bch2_journal_do_writes(struct journal *j)
{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
for (u64 seq = journal_last_unwritten_seq(j);
seq <= journal_cur_seq(j);
seq++) {
@@ -196,6 +198,7 @@ void bch2_journal_do_writes(struct journal *j)
if (!journal_state_seq_count(j, j->reservations, seq)) {
j->seq_write_started = seq;
w->write_started = true;
+ closure_get(&c->cl);
closure_call(&w->io, bch2_journal_write, j->wq, NULL);
}
@@ -1057,6 +1060,7 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
if (open && !*blocked) {
__bch2_journal_block(j);
+ s.v = atomic64_read_acquire(&j->reservations.counter);
*blocked = true;
}
@@ -1292,7 +1296,7 @@ int bch2_dev_journal_bucket_delete(struct bch_dev *ca, u64 b)
return -EINVAL;
}
- u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);;
+ u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!new_buckets)
return bch_err_throw(c, ENOMEM_set_nr_journal_buckets);
@@ -1467,6 +1471,10 @@ int bch2_fs_journal_start(struct journal *j, u64 last_seq, u64 cur_seq)
last_seq = cur_seq;
u64 nr = cur_seq - last_seq;
+ if (nr * sizeof(struct journal_entry_pin_list) > 1U << 30) {
+ bch_err(c, "too many ntjournal fifo (%llu open entries)", nr);
+ return bch_err_throw(c, ENOMEM_journal_pin_fifo);
+ }
/*
* Extra fudge factor, in case we crashed when the journal pin fifo was
@@ -1479,7 +1487,7 @@ int bch2_fs_journal_start(struct journal *j, u64 last_seq, u64 cur_seq)
nr = max(nr, JOURNAL_PIN);
init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
if (!j->pin.data) {
- bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
+ bch_err(c, "error allocating journal fifo (%llu open entries)", nr);
return bch_err_throw(c, ENOMEM_journal_pin_fifo);
}
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 2835250a14c4..093e4acad085 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -428,15 +428,22 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
bool first = true;
jset_entry_for_each_key(entry, k) {
- /* We may be called on entries that haven't been validated: */
- if (!k->k.u64s)
- break;
-
if (!first) {
prt_newline(out);
bch2_prt_jset_entry_type(out, entry->type);
prt_str(out, ": ");
}
+ /* We may be called on entries that haven't been validated: */
+ if (!k->k.u64s) {
+ prt_str(out, "(invalid, k->u64s 0)");
+ break;
+ }
+
+ if (bkey_next(k) > vstruct_last(entry)) {
+ prt_str(out, "(invalid, bkey overruns jset_entry)");
+ break;
+ }
+
bch2_btree_id_level_to_text(out, entry->btree_id, entry->level);
prt_char(out, ' ');
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
@@ -1820,6 +1827,8 @@ static CLOSURE_CALLBACK(journal_write_done)
if (do_discards)
bch2_do_discards(c);
+
+ closure_put(&c->cl);
}
static void journal_write_endio(struct bio *bio)
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
index 0367ea37e857..38cdacc6b067 100644
--- a/fs/bcachefs/logged_ops.c
+++ b/fs/bcachefs/logged_ops.c
@@ -81,7 +81,7 @@ static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
k->k.p = iter.pos;
ret = bch2_trans_update(trans, &iter, k, 0);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index ee14656c3fdd..39ae70e5c81b 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -9,6 +9,7 @@
#include "ec.h"
#include "error.h"
#include "lru.h"
+#include "progress.h"
#include "recovery.h"
/* KEY_TYPE_lru is obsolete: */
@@ -111,7 +112,7 @@ int bch2_lru_check_set(struct btree_trans *trans,
}
err:
fsck_err:
- bch2_trans_iter_exit(trans, &lru_iter);
+ bch2_trans_iter_exit(&lru_iter);
return ret;
}
@@ -196,7 +197,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
}
err:
fsck_err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -207,11 +208,16 @@ int bch2_check_lrus(struct bch_fs *c)
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_lru));
+
CLASS(btree_trans, trans)(c);
int ret = for_each_btree_key_commit(trans, iter,
BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_lru_key(trans, &iter, k, &last_flushed));
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ progress_update_iter(trans, &progress, &iter);
+ bch2_check_lru_key(trans, &iter, k, &last_flushed);
+ }));
bch2_bkey_buf_exit(&last_flushed, c);
return ret;
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index bd1e54e0efd5..a66d01d04e57 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -111,7 +111,7 @@ static int bch2_dev_btree_drop_key(struct btree_trans *trans,
ret = drop_btree_ptrs(trans, &iter, b, dev_idx, flags);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -163,7 +163,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(trans, &iter)) &&
+ (b = bch2_btree_iter_peek_node(&iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
bch2_progress_update_iter(trans, progress, &iter, "dropping metadata");
@@ -179,12 +179,12 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(trans, &iter);
+ bch2_btree_iter_next_node(&iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
goto err;
@@ -228,7 +228,7 @@ static int data_drop_bp(struct btree_trans *trans, unsigned dev_idx,
else
ret = bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 3f44bb54f91a..76cc13f62884 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -330,7 +330,7 @@ int bch2_move_extent(struct moving_context *ctxt,
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
- int ret = -ENOMEM;
+ int ret = 0;
if (trace_io_move_enabled())
trace_io_move2(c, k, &io_opts, &data_opts);
@@ -351,11 +351,10 @@ int bch2_move_extent(struct moving_context *ctxt,
struct moving_io *io = allocate_dropping_locks(trans, ret,
kzalloc(sizeof(struct moving_io), _gfp));
- if (!io)
- goto err;
-
+ if (!io && !ret)
+ ret = bch_err_throw(c, ENOMEM_move_extent);
if (ret)
- goto err_free;
+ goto err;
INIT_LIST_HEAD(&io->io_list);
io->write.ctxt = ctxt;
@@ -366,7 +365,7 @@ int bch2_move_extent(struct moving_context *ctxt,
ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
&io_opts, data_opts, iter->btree_id, k);
if (ret)
- goto err_free;
+ goto err;
io->write.op.end_io = move_write_done;
} else {
@@ -380,7 +379,7 @@ int bch2_move_extent(struct moving_context *ctxt,
ret = bch2_data_update_bios_init(&io->write, c, &io_opts);
if (ret)
- goto err_free;
+ goto err;
}
io->write.rbio.bio.bi_end_io = move_read_endio;
@@ -423,9 +422,8 @@ int bch2_move_extent(struct moving_context *ctxt,
BCH_READ_last_fragment,
data_opts.scrub ? data_opts.read_dev : -1);
return 0;
-err_free:
- kfree(io);
err:
+ kfree(io);
if (bch2_err_matches(ret, EROFS) ||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
return ret;
@@ -529,7 +527,7 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
bch2_inode_unpack(inode_k, &inode);
bch2_inode_opts_get(io_opts, c, &inode);
}
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
/* seem to be spinning here? */
out:
return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k);
@@ -596,14 +594,14 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans *
BTREE_ID_reflink, reflink_pos,
BTREE_ITER_not_extents);
- struct bkey_s_c k = bch2_btree_iter_peek(trans, iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(iter);
if (!k.k || bkey_err(k)) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return k;
}
if (bkey_lt(reflink_pos, bkey_start_pos(k.k))) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return bkey_s_c_null;
}
@@ -648,13 +646,13 @@ retry_root:
BTREE_ITER_prefetch|
BTREE_ITER_not_extents|
BTREE_ITER_all_snapshots);
- struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto root_err;
if (b != btree_node_root(c, b)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
goto retry_root;
}
@@ -678,7 +676,7 @@ retry_root:
root_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
goto retry_root;
}
@@ -698,7 +696,7 @@ root_err:
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(trans, &iter);
+ k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
@@ -719,7 +717,7 @@ root_err:
REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) {
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- bch2_trans_iter_exit(trans, &reflink_iter);
+ bch2_trans_iter_exit(&reflink_iter);
k = bch2_lookup_indirect_extent_for_move(trans, &reflink_iter, p);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -783,62 +781,62 @@ next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
- if (!bch2_btree_iter_advance(trans, &iter))
+ if (!bch2_btree_iter_advance(&iter))
break;
}
out:
- bch2_trans_iter_exit(trans, &reflink_iter);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&reflink_iter);
+ bch2_trans_iter_exit(&iter);
bch2_bkey_buf_exit(&sk, c);
per_snapshot_io_opts_exit(&snapshot_io_opts);
return ret;
}
-int __bch2_move_data(struct moving_context *ctxt,
- struct bbpos start,
- struct bbpos end,
- move_pred_fn pred, void *arg)
+static int bch2_move_data(struct bch_fs *c,
+ struct bbpos start,
+ struct bbpos end,
+ unsigned min_depth,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc,
+ move_pred_fn pred, void *arg)
{
- struct bch_fs *c = ctxt->trans->c;
- enum btree_id id;
int ret = 0;
- for (id = start.btree;
+ struct moving_context ctxt;
+ bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+
+ for (enum btree_id id = start.btree;
id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
id++) {
- ctxt->stats->pos = BBPOS(id, POS_MIN);
+ ctxt.stats->pos = BBPOS(id, POS_MIN);
- if (!btree_type_has_ptrs(id) ||
- !bch2_btree_id_root(c, id)->b)
+ if (!bch2_btree_id_root(c, id)->b)
continue;
- ret = bch2_move_data_btree(ctxt,
- id == start.btree ? start.pos : POS_MIN,
- id == end.btree ? end.pos : POS_MAX,
- pred, arg, id, 0);
+ unsigned min_depth_this_btree = min_depth;
+
+ if (!btree_type_has_ptrs(id))
+ min_depth_this_btree = max(min_depth_this_btree, 1);
+
+ for (unsigned level = min_depth_this_btree;
+ level < BTREE_MAX_DEPTH;
+ level++) {
+ ret = bch2_move_data_btree(&ctxt,
+ id == start.btree ? start.pos : POS_MIN,
+ id == end.btree ? end.pos : POS_MAX,
+ pred, arg, id, level);
+ if (ret)
+ break;
+ }
+
if (ret)
break;
}
- return ret;
-}
-
-int bch2_move_data(struct bch_fs *c,
- struct bbpos start,
- struct bbpos end,
- struct bch_ratelimit *rate,
- struct bch_move_stats *stats,
- struct write_point_specifier wp,
- bool wait_on_copygc,
- move_pred_fn pred, void *arg)
-{
- struct moving_context ctxt;
-
- bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
- int ret = __bch2_move_data(&ctxt, start, end, pred, arg);
bch2_moving_ctxt_exit(&ctxt);
-
return ret;
}
@@ -855,7 +853,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
struct bch_fs *c = trans->c;
bool is_kthread = current->flags & PF_KTHREAD;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- struct btree_iter iter = {}, bp_iter = {};
+ struct btree_iter iter = {};
struct bkey_buf sk;
struct bkey_s_c k;
struct bkey_buf last_flushed;
@@ -880,7 +878,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
*/
bch2_trans_begin(trans);
- bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0);
+ CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp_start, 0);
ret = bch2_btree_write_buffer_tryflush(trans);
if (!bch2_err_matches(ret, EROFS))
@@ -894,7 +892,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(trans, &bp_iter);
+ k = bch2_btree_iter_peek(&bp_iter);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -938,7 +936,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (!bp.v->level) {
ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k);
if (ret) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
continue;
}
}
@@ -951,13 +949,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
pred, arg, p);
if (!p) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
goto next;
}
if (data_opts.scrub &&
!bch2_dev_idx_is_online(c, data_opts.read_dev)) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
ret = bch_err_throw(c, device_offline);
break;
}
@@ -976,7 +974,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
else
ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -991,14 +989,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (ctxt->stats)
atomic64_add(sectors, &ctxt->stats->sectors_seen);
next:
- bch2_btree_iter_advance(trans, &bp_iter);
+ bch2_btree_iter_advance(&bp_iter);
}
while (check_mismatch_done < bucket_end)
bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++,
copygc, &last_flushed);
err:
- bch2_trans_iter_exit(trans, &bp_iter);
bch2_bkey_buf_exit(&sk, c);
bch2_bkey_buf_exit(&last_flushed, c);
return ret;
@@ -1114,7 +1111,7 @@ static int bch2_move_btree(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(trans, &iter)) &&
+ (b = bch2_btree_iter_peek_node(&iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
if (kthread && kthread_should_stop())
break;
@@ -1134,12 +1131,12 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(trans, &iter);
+ bch2_btree_iter_next_node(&iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (kthread && kthread_should_stop())
break;
@@ -1206,14 +1203,6 @@ static bool migrate_pred(struct bch_fs *c, void *arg,
return data_opts->rewrite_ptrs != 0;
}
-static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return rereplicate_pred(c, arg, b->c.btree_id, bkey_i_to_s_c(&b->key), io_opts, data_opts);
-}
-
/*
* Ancient versions of bcachefs produced packed formats which could represent
* keys that the in memory format cannot represent; this checks for those
@@ -1293,15 +1282,6 @@ static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
return data_opts->kill_ptrs != 0;
}
-static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return drop_extra_replicas_pred(c, arg, b->c.btree_id, bkey_i_to_s_c(&b->key),
- io_opts, data_opts);
-}
-
static bool scrub_pred(struct bch_fs *c, void *_arg,
enum btree_id btree, struct bkey_s_c k,
struct bch_io_opts *io_opts,
@@ -1359,14 +1339,11 @@ int bch2_data_job(struct bch_fs *c,
case BCH_DATA_OP_rereplicate:
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
- ret = bch2_move_btree(c, start, end,
- rereplicate_btree_pred, c, stats) ?: ret;
- ret = bch2_move_data(c, start, end,
- NULL,
- stats,
+ ret = bch2_move_data(c, start, end, 0, NULL, stats,
writepoint_hashed((unsigned long) current),
true,
rereplicate_pred, c) ?: ret;
+ bch2_btree_interior_updates_flush(c);
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_migrate:
@@ -1389,12 +1366,10 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_scan_old_btree_nodes(c, stats);
break;
case BCH_DATA_OP_drop_extra_replicas:
- ret = bch2_move_btree(c, start, end,
- drop_extra_replicas_btree_pred, c, stats) ?: ret;
- ret = bch2_move_data(c, start, end, NULL, stats,
- writepoint_hashed((unsigned long) current),
- true,
- drop_extra_replicas_pred, c) ?: ret;
+ ret = bch2_move_data(c, start, end, 0, NULL, stats,
+ writepoint_hashed((unsigned long) current),
+ true,
+ drop_extra_replicas_pred, c) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret;
break;
default:
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index fe92ca6d418d..481026ff99ab 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -128,18 +128,6 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *,
int bch2_move_data_btree(struct moving_context *, struct bpos, struct bpos,
move_pred_fn, void *, enum btree_id, unsigned);
-int __bch2_move_data(struct moving_context *,
- struct bbpos,
- struct bbpos,
- move_pred_fn, void *);
-int bch2_move_data(struct bch_fs *,
- struct bbpos start,
- struct bbpos end,
- struct bch_ratelimit *,
- struct bch_move_stats *,
- struct write_point_specifier,
- bool,
- move_pred_fn, void *);
int bch2_move_data_phys(struct bch_fs *, unsigned, u64, u64, unsigned,
struct bch_ratelimit *, struct bch_move_stats *,
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 9192b1fc3594..f391eceef4f4 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -90,7 +90,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
ret = lru_idx && lru_idx <= time;
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index 8fa108880f58..cfed2041c2c3 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -36,8 +36,8 @@ int bch2_create_trans(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = {};
- struct btree_iter inode_iter = {};
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
subvol_inum new_inum = dir;
u64 now = bch2_current_time(c);
u64 cpu = raw_smp_processor_id();
@@ -133,8 +133,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(trans, &dir_iter, dir_snapshot);
- ret = bch2_btree_iter_traverse(trans, &dir_iter);
+ bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
+ ret = bch2_btree_iter_traverse(&dir_iter);
if (ret)
goto err;
}
@@ -192,13 +192,13 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_depth = dir_u->bi_depth + 1;
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
- bch2_btree_iter_set_snapshot(trans, &inode_iter, snapshot);
+ bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
- ret = bch2_btree_iter_traverse(trans, &inode_iter) ?:
+ ret = bch2_btree_iter_traverse(&inode_iter) ?:
bch2_inode_write(trans, &inode_iter, new_inode);
err:
- bch2_trans_iter_exit(trans, &inode_iter);
- bch2_trans_iter_exit(trans, &dir_iter);
+ bch2_trans_iter_exit(&inode_iter);
+ bch2_trans_iter_exit(&dir_iter);
return ret;
}
@@ -208,8 +208,8 @@ int bch2_link_trans(struct btree_trans *trans,
const struct qstr *name)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = {};
- struct btree_iter inode_iter = {};
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(c);
u64 dir_offset = 0;
@@ -254,8 +254,8 @@ int bch2_link_trans(struct btree_trans *trans,
ret = bch2_inode_write(trans, &dir_iter, dir_u) ?:
bch2_inode_write(trans, &inode_iter, inode_u);
err:
- bch2_trans_iter_exit(trans, &dir_iter);
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&dir_iter);
+ bch2_trans_iter_exit(&inode_iter);
return ret;
}
@@ -267,9 +267,9 @@ int bch2_unlink_trans(struct btree_trans *trans,
bool deleting_subvol)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = {};
- struct btree_iter dirent_iter = {};
- struct btree_iter inode_iter = {};
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter dirent_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
struct bch_hash_info dir_hash;
subvol_inum inum;
u64 now = bch2_current_time(c);
@@ -315,7 +315,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
if (ret)
goto err;
- k = bch2_btree_iter_peek_slot(trans, &dirent_iter);
+ k = bch2_btree_iter_peek_slot(&dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -324,8 +324,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
* If we're deleting a subvolume, we need to really delete the
* dirent, not just emit a whiteout in the current snapshot:
*/
- bch2_btree_iter_set_snapshot(trans, &dirent_iter, k.k->p.snapshot);
- ret = bch2_btree_iter_traverse(trans, &dirent_iter);
+ bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(&dirent_iter);
if (ret)
goto err;
} else {
@@ -347,9 +347,9 @@ int bch2_unlink_trans(struct btree_trans *trans,
bch2_inode_write(trans, &dir_iter, dir_u) ?:
bch2_inode_write(trans, &inode_iter, inode_u);
err:
- bch2_trans_iter_exit(trans, &inode_iter);
- bch2_trans_iter_exit(trans, &dirent_iter);
- bch2_trans_iter_exit(trans, &dir_iter);
+ bch2_trans_iter_exit(&inode_iter);
+ bch2_trans_iter_exit(&dirent_iter);
+ bch2_trans_iter_exit(&dir_iter);
return ret;
}
@@ -393,7 +393,7 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p
return ret;
s->v.fs_path_parent = cpu_to_le32(new_parent);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return 0;
}
@@ -407,10 +407,10 @@ int bch2_rename_trans(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct bch_fs *c = trans->c;
- struct btree_iter src_dir_iter = {};
- struct btree_iter dst_dir_iter = {};
- struct btree_iter src_inode_iter = {};
- struct btree_iter dst_inode_iter = {};
+ struct btree_iter src_dir_iter = { NULL };
+ struct btree_iter dst_dir_iter = { NULL };
+ struct btree_iter src_inode_iter = { NULL };
+ struct btree_iter dst_inode_iter = { NULL };
struct bch_hash_info src_hash, dst_hash;
subvol_inum src_inum, dst_inum;
u64 src_offset, dst_offset;
@@ -582,15 +582,31 @@ int bch2_rename_trans(struct btree_trans *trans,
? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
: 0);
err:
- bch2_trans_iter_exit(trans, &dst_inode_iter);
- bch2_trans_iter_exit(trans, &src_inode_iter);
- bch2_trans_iter_exit(trans, &dst_dir_iter);
- bch2_trans_iter_exit(trans, &src_dir_iter);
+ bch2_trans_iter_exit(&dst_inode_iter);
+ bch2_trans_iter_exit(&src_inode_iter);
+ bch2_trans_iter_exit(&dst_dir_iter);
+ bch2_trans_iter_exit(&src_dir_iter);
return ret;
}
/* inum_to_path */
+static inline void reverse_bytes(void *b, size_t n)
+{
+ char *e = b + n, *s = b;
+
+ while (s < e) {
+ --e;
+ swap(*s, *e);
+ s++;
+ }
+}
+
+static inline void printbuf_reverse_from(struct printbuf *out, unsigned pos)
+{
+ reverse_bytes(out->buf + pos, out->pos - pos);
+}
+
static inline void prt_bytes_reversed(struct printbuf *out, const void *b, unsigned n)
{
bch2_printbuf_make_room(out, n);
@@ -610,15 +626,17 @@ static inline void prt_str_reversed(struct printbuf *out, const char *s)
prt_bytes_reversed(out, s, strlen(s));
}
-static inline void reverse_bytes(void *b, size_t n)
+__printf(2, 3)
+static inline void prt_printf_reversed(struct printbuf *out, const char *fmt, ...)
{
- char *e = b + n, *s = b;
+ unsigned orig_pos = out->pos;
- while (s < e) {
- --e;
- swap(*s, *e);
- s++;
- }
+ va_list args;
+ va_start(args, fmt);
+ prt_vprintf(out, fmt, args);
+ va_end(args);
+
+ printbuf_reverse_from(out, orig_pos);
}
static int __bch2_inum_to_path(struct btree_trans *trans,
@@ -639,7 +657,7 @@ static int __bch2_inum_to_path(struct btree_trans *trans,
subvol_inum n = (subvol_inum) { subvol ?: snapshot, inum };
if (darray_find_p(inums, i, i->subvol == n.subvol && i->inum == n.inum)) {
- prt_str_reversed(path, "(loop)");
+ prt_printf_reversed(path, "(loop at %llu:%u)", inum, snapshot);
break;
}
@@ -683,27 +701,27 @@ static int __bch2_inum_to_path(struct btree_trans *trans,
prt_char(path, '/');
- bch2_trans_iter_exit(trans, &d_iter);
+ bch2_trans_iter_exit(&d_iter);
}
if (orig_pos == path->pos)
prt_char(path, '/');
out:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+
ret = path->allocation_failure ? -ENOMEM : 0;
if (ret)
goto err;
- reverse_bytes(path->buf + orig_pos, path->pos - orig_pos);
+ printbuf_reverse_from(path, orig_pos);
darray_exit(&inums);
return 0;
err:
darray_exit(&inums);
return ret;
disconnected:
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto err;
-
- prt_str_reversed(path, "(disconnected)");
+ prt_printf_reversed(path, "(disconnected at %llu.%u)", inum, snapshot);
goto out;
}
@@ -836,7 +854,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
out:
err:
fsck_err:
- bch2_trans_iter_exit(trans, &bp_iter);
+ bch2_trans_iter_exit(&bp_iter);
bch_err_fn(c, ret);
return ret;
}
@@ -913,14 +931,14 @@ static int bch2_propagate_has_case_insensitive(struct btree_trans *trans, subvol
if (ret)
break;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (subvol_inum_eq(inum, BCACHEFS_ROOT_SUBVOL_INUM))
break;
inum = parent_inum(inum, &inode);
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/progress.c b/fs/bcachefs/progress.c
index 42353067ba28..792fc6fef270 100644
--- a/fs/bcachefs/progress.c
+++ b/fs/bcachefs/progress.c
@@ -52,7 +52,8 @@ void bch2_progress_update_iter(struct btree_trans *trans,
: 0;
prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
- msg, percent, s->nodes_seen, s->nodes_total);
+ strip_bch2(msg),
+ percent, s->nodes_seen, s->nodes_total);
bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
bch_info(c, "%s", buf.buf);
diff --git a/fs/bcachefs/progress.h b/fs/bcachefs/progress.h
index 23fb1811f943..972a73087ffe 100644
--- a/fs/bcachefs/progress.h
+++ b/fs/bcachefs/progress.h
@@ -26,4 +26,7 @@ void bch2_progress_update_iter(struct btree_trans *,
struct btree_iter *,
const char *);
+#define progress_update_iter(trans, p, iter) \
+ bch2_progress_update_iter(trans, p, iter, __func__)
+
#endif /* _BCACHEFS_PROGRESS_H */
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 5f1eff591b29..64a7f5eeeb5c 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -512,7 +512,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
KEY_TYPE_QUOTA_NOCHECK);
advance:
- bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
return 0;
}
@@ -820,7 +820,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 32fa7cf90b63..c0c5fe961a83 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -15,6 +15,7 @@
#include "inode.h"
#include "io_write.h"
#include "move.h"
+#include "progress.h"
#include "rebalance.h"
#include "subvolume.h"
#include "super-io.h"
@@ -234,14 +235,13 @@ static const char * const bch2_rebalance_state_strs[] = {
int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
- SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work,
+ SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+ BTREE_ITER_intent);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
u64 v = k.k->type == KEY_TYPE_cookie
? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
@@ -250,16 +250,13 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
struct bkey_i_cookie *cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
ret = PTR_ERR_OR_ZERO(cookie);
if (ret)
- goto err;
+ return ret;
bkey_cookie_init(&cookie->k_i);
cookie->k.p = iter.pos;
cookie->v.cookie = cpu_to_le64(v + 1);
- ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return bch2_trans_update(trans, &iter, &cookie->k_i, 0);
}
int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
@@ -278,31 +275,28 @@ int bch2_set_fs_needs_rebalance(struct bch_fs *c)
static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
{
- struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
- SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work,
+ SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+ BTREE_ITER_intent);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
u64 v = k.k->type == KEY_TYPE_cookie
? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
: 0;
- if (v == cookie)
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return v == cookie
+ ? bch2_btree_delete_at(trans, &iter, 0)
+ : 0;
}
static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
struct btree_iter *work_iter)
{
return !kthread_should_stop()
- ? bch2_btree_iter_peek(trans, work_iter)
+ ? bch2_btree_iter_peek(work_iter)
: bkey_s_c_null;
}
@@ -331,12 +325,12 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
- bch2_trans_iter_exit(trans, extent_iter);
+ bch2_trans_iter_exit(extent_iter);
bch2_trans_iter_init(trans, extent_iter,
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
work_pos,
BTREE_ITER_all_snapshots);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, extent_iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
if (bkey_err(k))
return k;
@@ -530,7 +524,7 @@ static int do_rebalance(struct moving_context *ctxt)
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &c->rebalance;
- struct btree_iter rebalance_work_iter, extent_iter = {};
+ struct btree_iter extent_iter = { NULL };
struct bkey_s_c k;
u32 kick = r->kick;
int ret = 0;
@@ -540,9 +534,9 @@ static int do_rebalance(struct moving_context *ctxt)
bch2_move_stats_init(&r->work_stats, "rebalance_work");
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
- bch2_trans_iter_init(trans, &rebalance_work_iter,
- BTREE_ID_rebalance_work, POS_MIN,
- BTREE_ITER_all_snapshots);
+ CLASS(btree_iter, rebalance_work_iter)(trans,
+ BTREE_ID_rebalance_work, POS_MIN,
+ BTREE_ITER_all_snapshots);
while (!bch2_move_ratelimit(ctxt)) {
if (!bch2_rebalance_enabled(c)) {
@@ -572,11 +566,10 @@ static int do_rebalance(struct moving_context *ctxt)
if (ret)
break;
- bch2_btree_iter_advance(trans, &rebalance_work_iter);
+ bch2_btree_iter_advance(&rebalance_work_iter);
}
- bch2_trans_iter_exit(trans, &extent_iter);
- bch2_trans_iter_exit(trans, &rebalance_work_iter);
+ bch2_trans_iter_exit(&extent_iter);
bch2_move_stats_exit(&r->scan_stats, c);
if (!ret &&
@@ -769,8 +762,8 @@ static int check_rebalance_work_one(struct btree_trans *trans,
struct bkey_s_c extent_k, rebalance_k;
CLASS(printbuf, buf)();
- int ret = bkey_err(extent_k = bch2_btree_iter_peek(trans, extent_iter)) ?:
- bkey_err(rebalance_k = bch2_btree_iter_peek(trans, rebalance_iter));
+ int ret = bkey_err(extent_k = bch2_btree_iter_peek(extent_iter)) ?:
+ bkey_err(rebalance_k = bch2_btree_iter_peek(rebalance_iter));
if (ret)
return ret;
@@ -778,7 +771,7 @@ static int check_rebalance_work_one(struct btree_trans *trans,
extent_iter->btree_id == BTREE_ID_reflink &&
(!rebalance_k.k ||
rebalance_k.k->p.inode >= BCACHEFS_ROOT_INO)) {
- bch2_trans_iter_exit(trans, extent_iter);
+ bch2_trans_iter_exit(extent_iter);
bch2_trans_iter_init(trans, extent_iter,
BTREE_ID_extents, POS_MIN,
BTREE_ITER_prefetch|
@@ -834,9 +827,9 @@ static int check_rebalance_work_one(struct btree_trans *trans,
}
if (cmp <= 0)
- bch2_btree_iter_advance(trans, extent_iter);
+ bch2_btree_iter_advance(extent_iter);
if (cmp >= 0)
- bch2_btree_iter_advance(trans, rebalance_iter);
+ bch2_btree_iter_advance(rebalance_iter);
fsck_err:
return ret;
}
@@ -844,21 +837,22 @@ fsck_err:
int bch2_check_rebalance_work(struct bch_fs *c)
{
CLASS(btree_trans, trans)(c);
- struct btree_iter rebalance_iter, extent_iter;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &extent_iter,
- BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_prefetch);
- bch2_trans_iter_init(trans, &rebalance_iter,
- BTREE_ID_rebalance_work, POS_MIN,
- BTREE_ITER_prefetch);
+ CLASS(btree_iter, extent_iter)(trans, BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_prefetch);
+ CLASS(btree_iter, rebalance_iter)(trans, BTREE_ID_rebalance_work, POS_MIN,
+ BTREE_ITER_prefetch);
struct bkey_buf last_flushed;
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_rebalance_work));
+
+ int ret = 0;
while (!ret) {
+ progress_update_iter(trans, &progress, &rebalance_iter);
+
bch2_trans_begin(trans);
ret = check_rebalance_work_one(trans, &extent_iter, &rebalance_iter, &last_flushed);
@@ -868,7 +862,5 @@ int bch2_check_rebalance_work(struct bch_fs *c)
}
bch2_bkey_buf_exit(&last_flushed, c);
- bch2_trans_iter_exit(trans, &extent_iter);
- bch2_trans_iter_exit(trans, &rebalance_iter);
return ret < 0 ? ret : 0;
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 58c159e5f10d..c57ff235a97a 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -67,13 +67,16 @@ int bch2_btree_lost_data(struct bch_fs *c,
ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0, &write_sb) ?: ret;
#endif
+ write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent);
+ write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_backpointer_to_missing_ptr, ext->errors_silent);
+ write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
+ write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
+
switch (btree) {
case BTREE_ID_alloc:
ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0, &write_sb) ?: ret;
- write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
- write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
@@ -203,7 +206,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(trans, &iter);
+ int ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto out;
@@ -231,7 +234,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, new, BTREE_TRIGGER_norun);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -266,7 +269,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
iter_flags);
- ret = bch2_btree_iter_traverse(trans, &iter);
+ ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto out;
@@ -294,10 +297,10 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
goto out;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, 0, iter_flags);
- ret = bch2_btree_iter_traverse(trans, &iter) ?:
+ ret = bch2_btree_iter_traverse(&iter) ?:
bch2_btree_increase_depth(trans, iter.path, 0) ?:
-BCH_ERR_transaction_restart_nested;
goto out;
@@ -319,7 +322,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
ret = bch2_trans_update(trans, &iter, k->k, update_flags);
out:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index b2cdd111fd0e..bd442652d0f5 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -639,6 +639,8 @@ void bch2_recovery_pass_status_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "Current pass:\t%s\n", bch2_recovery_passes[r->curr_pass]);
prt_passes(out, "Current passes", r->passes_to_run);
}
+
+ prt_printf(out, "Pass done:\t%s\n", bch2_recovery_passes[r->pass_done]);
}
void bch2_fs_recovery_passes_init(struct bch_fs *c)
diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h
index 4f2c2f811d5e..95e3612bb96c 100644
--- a/fs/bcachefs/recovery_passes.h
+++ b/fs/bcachefs/recovery_passes.h
@@ -26,6 +26,12 @@ static inline bool go_rw_in_recovery(struct bch_fs *c)
(c->opts.fsck && !(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))));
}
+static inline bool recovery_pass_will_run(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ return unlikely(test_bit(BCH_FS_in_recovery, &c->flags) &&
+ c->recovery.passes_to_run & BIT_ULL(pass));
+}
+
int bch2_run_print_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
int __bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *,
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 60abd89d7c9f..c083deb83ff7 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -277,13 +277,13 @@ struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *trans,
int ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset,
missing_end, should_commit);
if (ret) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return bkey_s_c_err(ret);
}
} else if (unlikely(REFLINK_P_ERROR(p.v))) {
int ret = bch2_indirect_extent_not_missing(trans, p, should_commit);
if (ret) {
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return bkey_s_c_err(ret);
}
}
@@ -357,7 +357,7 @@ next:
*idx = k.k->p.offset;
err:
fsck_err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -497,13 +497,12 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
if (orig->k.type == KEY_TYPE_inline_data)
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
- struct btree_iter reflink_iter;
- bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
- BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, &reflink_iter);
+ CLASS(btree_iter, reflink_iter)(trans, BTREE_ID_reflink, POS_MAX,
+ BTREE_ITER_intent);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(&reflink_iter);
int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
/*
* XXX: we're assuming that 56 bits will be enough for the life of the
@@ -516,7 +515,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
struct bkey_i *r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
ret = PTR_ERR_OR_ZERO(r_v);
if (ret)
- goto err;
+ return ret;
bkey_init(&r_v->k);
r_v->k.type = bkey_type_to_indirect(&orig->k);
@@ -532,7 +531,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
if (ret)
- goto err;
+ return ret;
/*
* orig is in a bkey_buf which statically allocates 5 64s for the val,
@@ -555,21 +554,16 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
if (reflink_p_may_update_opts_field)
SET_REFLINK_P_MAY_UPDATE_OPTIONS(&r_p->v, true);
- ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
- BTREE_UPDATE_internal_snapshot_node);
-err:
- bch2_trans_iter_exit(trans, &reflink_iter);
-
- return ret;
+ return bch2_trans_update(trans, extent_iter, &r_p->k_i,
+ BTREE_UPDATE_internal_snapshot_node);
}
-static struct bkey_s_c get_next_src(struct btree_trans *trans,
- struct btree_iter *iter, struct bpos end)
+static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
{
struct bkey_s_c k;
int ret;
- for_each_btree_key_max_continue_norestart(trans, *iter, end, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) {
if (bkey_extent_is_unwritten(k))
continue;
@@ -578,7 +572,7 @@ static struct bkey_s_c get_next_src(struct btree_trans *trans,
}
if (bkey_ge(iter->pos, end))
- bch2_btree_iter_set_pos(trans, iter, end);
+ bch2_btree_iter_set_pos(iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
@@ -641,27 +635,27 @@ s64 bch2_remap_range(struct bch_fs *c,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(trans, &src_iter, src_snapshot);
+ bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
&dst_snapshot);
if (ret)
continue;
- bch2_btree_iter_set_snapshot(trans, &dst_iter, dst_snapshot);
+ bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
if (dst_inum.inum < src_inum.inum) {
/* Avoid some lock cycle transaction restarts */
- ret = bch2_btree_iter_traverse(trans, &dst_iter);
+ ret = bch2_btree_iter_traverse(&dst_iter);
if (ret)
continue;
}
dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(trans, &src_iter, src_want);
+ bch2_btree_iter_set_pos(&src_iter, src_want);
- src_k = get_next_src(trans, &src_iter, src_end);
+ src_k = get_next_src(&src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
continue;
@@ -722,8 +716,8 @@ s64 bch2_remap_range(struct bch_fs *c,
true);
bch2_disk_reservation_put(c, &disk_res);
}
- bch2_trans_iter_exit(trans, &dst_iter);
- bch2_trans_iter_exit(trans, &src_iter);
+ bch2_trans_iter_exit(&dst_iter);
+ bch2_trans_iter_exit(&src_iter);
BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
BUG_ON(bkey_gt(dst_iter.pos, dst_end));
@@ -733,7 +727,7 @@ s64 bch2_remap_range(struct bch_fs *c,
do {
struct bch_inode_unpacked inode_u;
- struct btree_iter inode_iter = {};
+ struct btree_iter inode_iter = { NULL };
bch2_trans_begin(trans);
@@ -748,7 +742,7 @@ s64 bch2_remap_range(struct bch_fs *c,
BCH_TRANS_COMMIT_no_enospc);
}
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
} while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
err:
bch2_bkey_buf_exit(&new_src, c);
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
index fb72ad730518..b2b892687cdd 100644
--- a/fs/bcachefs/sb-members_format.h
+++ b/fs/bcachefs/sb-members_format.h
@@ -17,7 +17,7 @@
UUID_INIT(0xffffffff, 0xffff, 0xffff, \
0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
-#define BCH_MIN_NR_NBUCKETS (1 << 6)
+#define BCH_MIN_NR_NBUCKETS (1 << 9)
#define BCH_IOPS_MEASUREMENTS() \
x(seqread, 0) \
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 7a801513b134..1b7b21494479 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -11,6 +11,7 @@
#include "errcode.h"
#include "error.h"
#include "fs.h"
+#include "progress.h"
#include "recovery_passes.h"
#include "snapshot.h"
@@ -73,7 +74,7 @@ __bch2_snapshot_tree_create(struct btree_trans *trans)
s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
ret = PTR_ERR_OR_ZERO(s_t);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret ? ERR_PTR(ret) : s_t;
}
@@ -142,7 +143,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
- if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots))
+ if (unlikely(recovery_pass_will_run(c, BCH_RECOVERY_PASS_check_snapshots)))
return __bch2_snapshot_is_ancestor_early(t, id, ancestor);
if (likely(ancestor >= IS_ANCESTOR_BITMAP))
@@ -364,31 +365,32 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
/* fsck: */
-static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
+static u32 bch2_snapshot_child(struct snapshot_table *t,
+ u32 id, unsigned child)
{
- return snapshot_t(c, id)->children[child];
+ return __snapshot_t(t, id)->children[child];
}
-static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
+static u32 bch2_snapshot_left_child(struct snapshot_table *t, u32 id)
{
- return bch2_snapshot_child(c, id, 0);
+ return bch2_snapshot_child(t, id, 0);
}
-static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
+static u32 bch2_snapshot_right_child(struct snapshot_table *t, u32 id)
{
- return bch2_snapshot_child(c, id, 1);
+ return bch2_snapshot_child(t, id, 1);
}
-static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
+static u32 bch2_snapshot_tree_next(struct snapshot_table *t, u32 id)
{
u32 n, parent;
- n = bch2_snapshot_left_child(c, id);
+ n = bch2_snapshot_left_child(t, id);
if (n)
return n;
- while ((parent = bch2_snapshot_parent(c, id))) {
- n = bch2_snapshot_right_child(c, parent);
+ while ((parent = __bch2_snapshot_parent(t, id))) {
+ n = bch2_snapshot_right_child(t, parent);
if (n && n != id)
return n;
id = parent;
@@ -401,17 +403,18 @@ u32 bch2_snapshot_oldest_subvol(struct bch_fs *c, u32 snapshot_root,
snapshot_id_list *skip)
{
guard(rcu)();
+ struct snapshot_table *t = rcu_dereference(c->snapshots);
u32 id, subvol = 0, s;
retry:
id = snapshot_root;
- while (id && bch2_snapshot_exists(c, id)) {
+ while (id && __bch2_snapshot_exists(t, id)) {
if (!(skip && snapshot_list_has_id(skip, id))) {
- s = snapshot_t(c, id)->subvol;
+ s = __snapshot_t(t, id)->subvol;
if (s && (!subvol || s < subvol))
subvol = s;
}
- id = bch2_snapshot_tree_next(c, id);
+ id = bch2_snapshot_tree_next(t, id);
if (id == snapshot_root)
break;
}
@@ -447,7 +450,7 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (!ret && !found) {
struct bkey_i_subvolume *u;
@@ -560,7 +563,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
out:
err:
fsck_err:
- bch2_trans_iter_exit(trans, &snapshot_iter);
+ bch2_trans_iter_exit(&snapshot_iter);
return ret;
}
@@ -682,7 +685,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
*s = u->v;
}
err:
- bch2_trans_iter_exit(trans, &root_iter);
+ bch2_trans_iter_exit(&root_iter);
return ret;
}
@@ -865,7 +868,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
return ret;
@@ -895,7 +898,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return bch2_snapshot_table_make_room(c, id) ?:
bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0);
@@ -973,12 +976,16 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
struct snapshot_tree_reconstruct r = {};
int ret = 0;
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, btree_has_snapshots_mask);
+
for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
if (btree_type_has_snapshots(btree)) {
r.btree = btree;
ret = for_each_btree_key(trans, iter, btree, POS_MIN,
BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
+ progress_update_iter(trans, &progress, &iter);
get_snapshot_trees(c, &r, k.k->p);
}));
if (ret)
@@ -1093,7 +1100,7 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans,
if (ret)
break;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
darray_exit(s);
@@ -1125,7 +1132,7 @@ int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
s->v.subvol = 0;
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1250,10 +1257,10 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
set_bkey_val_u64s(&s->k, 0);
}
err:
- bch2_trans_iter_exit(trans, &tree_iter);
- bch2_trans_iter_exit(trans, &p_iter);
- bch2_trans_iter_exit(trans, &c_iter);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&tree_iter);
+ bch2_trans_iter_exit(&p_iter);
+ bch2_trans_iter_exit(&c_iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1263,35 +1270,30 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
unsigned nr_snapids)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
struct bkey_i_snapshot *n;
- struct bkey_s_c k;
- unsigned i, j;
u32 depth = bch2_snapshot_depth(c, parent);
- int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
- POS_MIN, BTREE_ITER_intent);
- k = bch2_btree_iter_peek(trans, &iter);
- ret = bkey_err(k);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_snapshots,
+ POS_MIN, BTREE_ITER_intent);
+ struct bkey_s_c k = bch2_btree_iter_peek(&iter);
+ int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
- for (i = 0; i < nr_snapids; i++) {
- k = bch2_btree_iter_prev_slot(trans, &iter);
+ for (unsigned i = 0; i < nr_snapids; i++) {
+ k = bch2_btree_iter_prev_slot(&iter);
ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
if (!k.k || !k.k->p.offset) {
- ret = bch_err_throw(c, ENOSPC_snapshot_create);
- goto err;
+ return bch_err_throw(c, ENOSPC_snapshot_create);
}
n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
- goto err;
+ return ret;
n->v.flags = 0;
n->v.parent = cpu_to_le32(parent);
@@ -1301,7 +1303,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
n->v.btime.lo = cpu_to_le64(bch2_current_time(c));
n->v.btime.hi = 0;
- for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
+ for (unsigned j = 0; j < ARRAY_SIZE(n->v.skip); j++)
n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
@@ -1310,13 +1312,12 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
if (ret)
- goto err;
+ return ret;
new_snapids[i] = iter.pos.offset;
}
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+
+ return 0;
}
/*
@@ -1357,7 +1358,7 @@ static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 par
n_parent->v.subvol = 0;
SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -1424,38 +1425,22 @@ static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id)
return i ? i->live_child : 0;
}
-static unsigned __live_child(struct snapshot_table *t, u32 id,
- snapshot_id_list *delete_leaves,
- interior_delete_list *delete_interior)
-{
- struct snapshot_t *s = __snapshot_t(t, id);
- if (!s)
- return 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++)
- if (s->children[i] &&
- !snapshot_list_has_id(delete_leaves, s->children[i]) &&
- !interior_delete_has_id(delete_interior, s->children[i]))
- return s->children[i];
-
- for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) {
- u32 live_child = s->children[i]
- ? __live_child(t, s->children[i], delete_leaves, delete_interior)
- : 0;
- if (live_child)
- return live_child;
- }
-
- return 0;
-}
-
-static unsigned live_child(struct bch_fs *c, u32 id)
+static unsigned live_child(struct bch_fs *c, u32 start)
{
struct snapshot_delete *d = &c->snapshot_delete;
guard(rcu)();
- return __live_child(rcu_dereference(c->snapshots), id,
- &d->delete_leaves, &d->delete_interior);
+ struct snapshot_table *t = rcu_dereference(c->snapshots);
+
+ for (u32 id = bch2_snapshot_tree_next(t, start);
+ id && id != start;
+ id = bch2_snapshot_tree_next(t, id))
+ if (bch2_snapshot_is_leaf(c, id) &&
+ !snapshot_list_has_id(&d->delete_leaves, id) &&
+ !interior_delete_has_id(&d->delete_interior, id))
+ return id;
+
+ return 0;
}
static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id)
@@ -1498,7 +1483,7 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans,
: 0) ?:
bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &dst_iter);
+ bch2_trans_iter_exit(&dst_iter);
return ret;
}
@@ -1526,7 +1511,7 @@ static bool skip_unrelated_snapshot_tree(struct btree_trans *trans, struct btree
pos.snapshot = 0;
if (iter->btree_id != BTREE_ID_inodes)
pos.offset = U64_MAX;
- bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(pos));
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(pos));
}
return ret;
@@ -1604,7 +1589,7 @@ static int delete_dead_snapshot_keys_v2(struct btree_trans *trans)
while (1) {
struct bkey_s_c k;
ret = lockrestart_do(trans,
- bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
+ bkey_err(k = bch2_btree_iter_peek(&iter)));
if (ret)
break;
@@ -1627,12 +1612,12 @@ static int delete_dead_snapshot_keys_v2(struct btree_trans *trans)
if (ret)
break;
- bch2_btree_iter_set_pos(trans, &iter, POS(0, k.k->p.offset + 1));
+ bch2_btree_iter_set_pos(&iter, POS(0, k.k->p.offset + 1));
} else {
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
if (ret)
goto err;
@@ -1712,12 +1697,14 @@ static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
interior_delete_list *skip)
{
guard(rcu)();
+ struct snapshot_table *t = rcu_dereference(c->snapshots);
+
while (interior_delete_has_id(skip, id))
- id = __bch2_snapshot_parent(c, id);
+ id = __bch2_snapshot_parent(t, id);
while (n--) {
do {
- id = __bch2_snapshot_parent(c, id);
+ id = __bch2_snapshot_parent(t, id);
} while (interior_delete_has_id(skip, id));
}
@@ -1960,7 +1947,7 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
break;
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index 6dcb118b0fbd..fef32a0118c4 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -63,19 +63,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
return __bch2_snapshot_parent_early(c, id);
}
-static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
+static inline u32 __bch2_snapshot_parent(struct snapshot_table *t, u32 id)
{
- const struct snapshot_t *s = snapshot_t(c, id);
+ const struct snapshot_t *s = __snapshot_t(t, id);
if (!s)
return 0;
u32 parent = s->parent;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
parent &&
- s->depth != snapshot_t(c, parent)->depth + 1)
+ s->depth != __snapshot_t(t, parent)->depth + 1)
panic("id %u depth=%u parent %u depth=%u\n",
- id, snapshot_t(c, id)->depth,
- parent, snapshot_t(c, parent)->depth);
+ id, __snapshot_t(t, id)->depth,
+ parent, __snapshot_t(t, parent)->depth);
return parent;
}
@@ -83,14 +83,16 @@ static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
{
guard(rcu)();
- return __bch2_snapshot_parent(c, id);
+ return __bch2_snapshot_parent(rcu_dereference(c->snapshots), id);
}
static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
{
guard(rcu)();
+ struct snapshot_table *t = rcu_dereference(c->snapshots);
+
while (n--)
- id = __bch2_snapshot_parent(c, id);
+ id = __bch2_snapshot_parent(t, id);
return id;
}
@@ -100,23 +102,29 @@ u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
{
guard(rcu)();
+ struct snapshot_table *t = rcu_dereference(c->snapshots);
u32 parent;
- while ((parent = __bch2_snapshot_parent(c, id)))
+ while ((parent = __bch2_snapshot_parent(t, id)))
id = parent;
return id;
}
-static inline enum snapshot_id_state __bch2_snapshot_id_state(struct bch_fs *c, u32 id)
+static inline enum snapshot_id_state __bch2_snapshot_id_state(struct snapshot_table *t, u32 id)
{
- const struct snapshot_t *s = snapshot_t(c, id);
+ const struct snapshot_t *s = __snapshot_t(t, id);
return s ? s->state : SNAPSHOT_ID_empty;
}
static inline enum snapshot_id_state bch2_snapshot_id_state(struct bch_fs *c, u32 id)
{
guard(rcu)();
- return __bch2_snapshot_id_state(c, id);
+ return __bch2_snapshot_id_state(rcu_dereference(c->snapshots), id);
+}
+
+static inline bool __bch2_snapshot_exists(struct snapshot_table *t, u32 id)
+{
+ return __bch2_snapshot_id_state(t, id) == SNAPSHOT_ID_live;
}
static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id)
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index dfe4b6ae0733..a6503ec58acc 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -26,7 +26,7 @@ static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dir
return ret;
ret = bkey_is_inode(k.k);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
}
@@ -206,7 +206,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans,
bch_err_throw(c, transaction_restart_nested);
err:
fsck_err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -328,8 +328,7 @@ duplicate_entries:
}
out:
fsck_err:
- bch2_trans_iter_exit(trans, dup_iter);
- printbuf_exit(&buf);
+ bch2_trans_iter_exit(dup_iter);
if (free_snapshots_seen)
darray_exit(&s->ids);
return ret;
@@ -372,11 +371,11 @@ int __bch2_str_hash_check_key(struct btree_trans *trans,
if (bkey_deleted(k.k))
goto bad_hash;
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
fsck_err:
return ret;
bad_hash:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
/*
* Before doing any repair, check hash_info itself:
*/
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 353a927857f1..7b4e7e9eb993 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -173,7 +173,7 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
break;
}
}
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return bkey_s_c_err(ret ?: bch_err_throw(trans->c, ENOENT_str_hash_lookup));
}
@@ -215,7 +215,7 @@ bch2_hash_hole(struct btree_trans *trans,
BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
if (!is_visible_key(desc, inum, k))
return 0;
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(iter);
return ret ?: bch_err_throw(trans->c, ENOSPC_str_hash_create);
}
@@ -230,11 +230,11 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_trans_copy_iter(trans, &iter, start);
+ bch2_trans_copy_iter(&iter, start);
- bch2_btree_iter_advance(trans, &iter);
+ bch2_btree_iter_advance(&iter);
- for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) {
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
@@ -246,7 +246,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
}
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -280,7 +280,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
}
if (!slot.path && !(flags & STR_HASH_must_replace))
- bch2_trans_copy_iter(trans, &slot, iter);
+ bch2_trans_copy_iter(&slot, iter);
if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
@@ -289,14 +289,14 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
if (!ret)
ret = bch_err_throw(c, ENOSPC_str_hash_create);
out:
- bch2_trans_iter_exit(trans, &slot);
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(&slot);
+ bch2_trans_iter_exit(iter);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
found:
found = true;
not_found:
if (found && (flags & STR_HASH_must_create)) {
- bch2_trans_iter_exit(trans, &slot);
+ bch2_trans_iter_exit(&slot);
return k;
} else if (!found && (flags & STR_HASH_must_replace)) {
ret = bch_err_throw(c, ENOENT_str_hash_set_must_replace);
@@ -326,7 +326,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
if (ret)
return ret;
if (k.k) {
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return bch_err_throw(trans->c, EEXIST_str_hash_set);
}
@@ -389,7 +389,7 @@ int bch2_hash_delete(struct btree_trans *trans,
return ret;
ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index 2d2d6b22df88..c3066dc56601 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -176,7 +176,7 @@ static int check_subvol(struct btree_trans *trans,
}
err:
fsck_err:
- bch2_trans_iter_exit(trans, &subvol_children_iter);
+ bch2_trans_iter_exit(&subvol_children_iter);
return ret;
}
@@ -297,11 +297,8 @@ int bch2_subvolume_trigger(struct btree_trans *trans,
int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol)
{
- struct btree_iter iter;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
- struct bkey_s_c k = bch2_btree_iter_peek(trans, &iter);
- bch2_trans_iter_exit(trans, &iter);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
+ struct bkey_s_c k = bch2_btree_iter_peek(&iter);
return bkey_err(k) ?: k.k && k.k->p.inode == subvol
? bch_err_throw(trans->c, ENOTEMPTY_subvol_not_empty)
@@ -373,7 +370,7 @@ int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
if (likely(!ret))
*snapid = le32_to_cpu(subvol.v->snapshot);
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -486,9 +483,9 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
ret = bch2_btree_delete_at(trans, &subvol_iter, 0) ?:
bch2_snapshot_node_set_deleted(trans, snapid);
err:
- bch2_trans_iter_exit(trans, &snapshot_tree_iter);
- bch2_trans_iter_exit(trans, &snapshot_iter);
- bch2_trans_iter_exit(trans, &subvol_iter);
+ bch2_trans_iter_exit(&snapshot_tree_iter);
+ bch2_trans_iter_exit(&snapshot_iter);
+ bch2_trans_iter_exit(&subvol_iter);
return ret;
}
@@ -590,7 +587,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
n->v.fs_path_parent = 0;
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -602,7 +599,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
bool ro)
{
struct bch_fs *c = trans->c;
- struct btree_iter dst_iter, src_iter = {};
+ struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
struct bkey_i_subvolume *new_subvol = NULL;
struct bkey_i_subvolume *src_subvol = NULL;
u32 parent = 0, new_nodes[2], snapshot_subvols[2];
@@ -665,8 +662,8 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
*new_subvolid = new_subvol->k.p.offset;
*new_snapshotid = new_nodes[0];
err:
- bch2_trans_iter_exit(trans, &src_iter);
- bch2_trans_iter_exit(trans, &dst_iter);
+ bch2_trans_iter_exit(&src_iter);
+ bch2_trans_iter_exit(&dst_iter);
return ret;
}
@@ -727,7 +724,7 @@ static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
ret = bch2_inode_write(trans, &iter, &inode);
err:
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 075f55e25c70..b6d7c1f4a256 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -33,45 +33,41 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
static inline struct bkey_s_c
-bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos end, u32 subvolid, unsigned flags)
+bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end,
+ u32 subvolid, unsigned flags)
{
u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(trans, subvolid, &snapshot);
+ int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
if (ret)
return bkey_s_c_err(ret);
- bch2_btree_iter_set_snapshot(trans, iter, snapshot);
- return bch2_btree_iter_peek_max_type(trans, iter, end, flags);
+ bch2_btree_iter_set_snapshot(iter, snapshot);
+ return bch2_btree_iter_peek_max_type(iter, end, flags);
}
#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \
_end, _subvolid, _flags, _k, _do) \
({ \
- struct bkey_s_c _k; \
int _ret3 = 0; \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_in_subvolume_max_type(trans, &(_iter),\
+ struct bkey_s_c _k = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter),\
_end, _subvolid, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
\
- bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
})
#define for_each_btree_key_in_subvolume_max(_trans, _iter, _btree_id, \
_start, _end, _subvolid, _flags, _k, _do) \
({ \
- struct btree_iter _iter; \
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
+ CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \
\
for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \
_end, _subvolid, _flags, _k, _do); \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 40fa87ce1d09..c88759964575 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -79,7 +79,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v
} else {
darray_for_each(c->incompat_versions_requested, i)
if (version == *i)
- return -BCH_ERR_may_not_use_incompat_feature;
+ return bch_err_throw(c, may_not_use_incompat_feature);
darray_push(&c->incompat_versions_requested, version);
CLASS(printbuf, buf)();
@@ -90,7 +90,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v
prt_printf(&buf, "\n set version_upgrade=incompat to enable");
bch_notice(c, "%s", buf.buf);
- return -BCH_ERR_may_not_use_incompat_feature;
+ return bch_err_throw(c, may_not_use_incompat_feature);
}
}
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 4e038f655f83..b3b2d8353a36 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -514,6 +514,10 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
return ret;
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ return ret;
+
clear_bit(BCH_FS_clean_shutdown, &c->flags);
scoped_guard(rcu)
@@ -537,10 +541,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_journal_space_available(&c->journal);
}
- ret = bch2_fs_mark_dirty(c);
- if (ret)
- return ret;
-
/*
* Don't jump to our error path, and call bch2_fs_read_only(), unless we
* successfully marked the filesystem dirty
@@ -729,6 +729,8 @@ void __bch2_fs_stop(struct bch_fs *c)
cancel_work_sync(&ca->io_error_work);
cancel_work_sync(&c->read_only_work);
+
+ flush_work(&c->btree_interior_update_work);
}
void bch2_fs_free(struct bch_fs *c)
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 158f526e3dcc..bd3fa9c3372d 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -18,6 +18,7 @@
#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
+#include "btree_write_buffer.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
@@ -150,6 +151,7 @@ write_attribute(trigger_journal_flush);
write_attribute(trigger_journal_writes);
write_attribute(trigger_btree_cache_shrink);
write_attribute(trigger_btree_key_cache_shrink);
+write_attribute(trigger_btree_write_buffer_flush);
write_attribute(trigger_btree_updates);
write_attribute(trigger_freelist_wakeup);
write_attribute(trigger_recalc_capacity);
@@ -539,6 +541,11 @@ STORE(bch2_fs)
c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc);
}
+ if (attr == &sysfs_trigger_btree_write_buffer_flush)
+ bch2_trans_do(c,
+ (bch2_btree_write_buffer_flush_sync(trans),
+ bch2_trans_begin(trans)));
+
if (attr == &sysfs_trigger_gc)
bch2_gc_gens(c);
@@ -709,6 +716,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_journal_writes,
&sysfs_trigger_btree_cache_shrink,
&sysfs_trigger_btree_key_cache_shrink,
+ &sysfs_trigger_btree_write_buffer_flush,
&sysfs_trigger_btree_updates,
&sysfs_trigger_freelist_wakeup,
&sysfs_trigger_recalc_capacity,
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index ea27df30cfcb..baaaedf68422 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -31,76 +31,66 @@ static void delete_test_keys(struct bch_fs *c)
static int test_delete(struct bch_fs *c, u64 nr)
{
- CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
struct bkey_i_cookie k;
- int ret;
-
bkey_cookie_init(&k.k_i);
k.k.p.snapshot = U32_MAX;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_intent);
+ CLASS(btree_trans, trans)(c);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent);
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ int ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
- goto err;
+ return ret;
pr_info("deleting once");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (first)");
if (ret)
- goto err;
+ return ret;
pr_info("deleting twice");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (second)");
if (ret)
- goto err;
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return ret;
+
+ return 0;
}
static int test_delete_written(struct bch_fs *c, u64 nr)
{
- CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
struct bkey_i_cookie k;
- int ret;
-
bkey_cookie_init(&k.k_i);
k.k.p.snapshot = U32_MAX;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_intent);
+ CLASS(btree_trans, trans)(c);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent);
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ int ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
- goto err;
+ return ret;
bch2_trans_unlock(trans);
bch2_journal_flush_all_pins(&c->journal);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(trans, &iter) ?:
+ bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error");
if (ret)
- goto err;
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return ret;
+
+ return 0;
}
static int test_iterate(struct bch_fs *c, u64 nr)
@@ -343,19 +333,15 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
delete_test_keys(c);
CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
- struct bkey_s_c k;
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
+ struct bkey_s_c k;
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- bch2_trans_iter_exit(trans, &iter);
return 0;
}
@@ -364,19 +350,15 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
delete_test_keys(c);
CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
- struct bkey_s_c k;
+ CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(0, 0, U32_MAX), 0);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), 0);
-
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
+ struct bkey_s_c k;
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- bch2_trans_iter_exit(trans, &iter);
return 0;
}
@@ -470,25 +452,21 @@ static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
/* Test skipping over keys in unrelated snapshots: */
static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
{
- struct btree_iter iter;
- struct bkey_s_c k;
struct bkey_i_cookie cookie;
- int ret;
-
bkey_cookie_init(&cookie.k_i);
cookie.k.p.snapshot = snapid_hi;
- ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
+ int ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0);
if (ret)
return ret;
CLASS(btree_trans, trans)(c);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0);
+
+ struct bkey_s_c k;
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX);
- bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -583,24 +561,18 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr)
static int rand_lookup(struct bch_fs *c, u64 nr)
{
CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0);
for (u64 i = 0; i < nr; i++) {
- bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX));
+ bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
- ret = bkey_err(k);
+ struct bkey_s_c k;
+ int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
if (ret)
- break;
+ return ret;
}
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return 0;
}
static int rand_mixed_trans(struct btree_trans *trans,
@@ -611,9 +583,9 @@ static int rand_mixed_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX));
+ bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
- k = bch2_btree_iter_peek(trans, iter);
+ k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
bch_err_msg(trans->c, ret, "lookup error");
if (ret)
@@ -631,45 +603,33 @@ static int rand_mixed_trans(struct btree_trans *trans,
static int rand_mixed(struct bch_fs *c, u64 nr)
{
CLASS(btree_trans, trans)(c);
- struct btree_iter iter;
- struct bkey_i_cookie cookie;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0);
for (u64 i = 0; i < nr; i++) {
u64 rand = test_rand();
- ret = commit_do(trans, NULL, NULL, 0,
+ struct bkey_i_cookie cookie;
+ int ret = commit_do(trans, NULL, NULL, 0,
rand_mixed_trans(trans, &iter, &cookie, i, rand));
if (ret)
- break;
+ return ret;
}
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return 0;
}
static int __do_delete(struct btree_trans *trans, struct bpos pos)
{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret = 0;
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
- BTREE_ITER_intent);
- k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX));
- ret = bkey_err(k);
+ CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, pos,
+ BTREE_ITER_intent);
+ struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX));
+ int ret = bkey_err(k);
if (ret)
- goto err;
+ return ret;
if (!k.k)
- goto err;
+ return 0;
- ret = bch2_btree_delete_at(trans, &iter, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return bch2_btree_delete_at(trans, &iter, 0);
}
static int rand_delete(struct bch_fs *c, u64 nr)
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 903e20cd34fa..6094b568dd33 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -157,7 +157,7 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
else
memcpy(buffer, xattr_val(xattr.v), ret);
}
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_exit(&iter);
return ret;
}
@@ -168,7 +168,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
int type, int flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = {};
+ struct btree_iter inode_iter = { NULL };
int ret;
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
@@ -184,7 +184,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
inode_u->bi_ctime = bch2_current_time(c);
ret = bch2_inode_write(trans, &inode_iter, inode_u);
- bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(&inode_iter);
if (ret)
return ret;