diff options
Diffstat (limited to 'fs/bcachefs')
65 files changed, 1377 insertions, 1543 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 8f970dc19dea..3befa1f36e72 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -273,7 +273,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu) struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0); - struct btree_iter iter = {}; + struct btree_iter iter = { NULL }; struct posix_acl *acl = NULL; if (rcu) @@ -303,7 +303,7 @@ err: if (!IS_ERR_OR_NULL(acl)) set_cached_acl(&inode->v, type, acl); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return acl; } @@ -343,7 +343,7 @@ int bch2_set_acl(struct mnt_idmap *idmap, { struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter inode_iter = {}; + struct btree_iter inode_iter = { NULL }; struct bch_inode_unpacked inode_u; struct posix_acl *acl; umode_t mode; @@ -379,7 +379,7 @@ retry: ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?: bch2_trans_commit(trans, NULL, NULL, 0); btree_err: - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&inode_iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; @@ -431,7 +431,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum, *new_acl = acl; acl = NULL; err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (!IS_ERR_OR_NULL(acl)) kfree(acl); return ret; diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index f1d35b7f3fc5..e988e8f75146 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -20,6 +20,7 @@ #include "enumerated_ref.h" #include "error.h" #include "lru.h" +#include "progress.h" #include "recovery.h" #include "varint.h" @@ -389,7 +390,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) if (k.k->type == KEY_TYPE_alloc_v4) { void *src, *dst; - *out = *bkey_s_c_to_alloc_v4(k).v; + bkey_val_copy(out, bkey_s_c_to_alloc_v4(k)); src = alloc_v4_backpointers(out); SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); @@ -486,7 +487,7 @@ bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_i goto err; return a; err: - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ERR_PTR(ret); } @@ -505,18 +506,18 @@ struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, if ((void *) k.v >= trans->mem && (void *) k.v < trans->mem + trans->mem_top) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v); } struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); if (IS_ERR(a)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return a; } ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return unlikely(ret) ? ERR_PTR(ret) : a; } @@ -639,7 +640,7 @@ int bch2_alloc_read(struct bch_fs *c) * bch2_check_alloc_key() which runs later: */ if (!ca) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } @@ -660,17 +661,17 @@ int bch2_alloc_read(struct bch_fs *c) * bch2_check_alloc_key() which runs later: */ if (!ca) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } if (k.k->p.offset < ca->mi.first_bucket) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); continue; } if (k.k->p.offset >= ca->mi.nbuckets) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } @@ -743,8 +744,8 @@ static int bch2_bucket_do_index(struct btree_trans *trans, return 0; } - struct btree_iter iter; - struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_intent); + struct bkey_s_c old = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(old); if (ret) return ret; @@ -754,30 +755,25 @@ static int bch2_bucket_do_index(struct btree_trans *trans, trans, alloc_k, set, btree == BTREE_ID_need_discard, false); - ret = bch2_btree_bit_mod_iter(trans, &iter, set); + return bch2_btree_bit_mod_iter(trans, &iter, set); fsck_err: - bch2_trans_iter_exit(trans, &iter); return ret; } static noinline int bch2_bucket_gen_update(struct btree_trans *trans, struct bpos bucket, u8 gen) { - struct btree_iter iter; - unsigned offset; - struct bpos pos = alloc_gens_pos(bucket, &offset); - struct bkey_i_bucket_gens *g; - struct bkey_s_c k; - int ret; - - g = bch2_trans_kmalloc(trans, sizeof(*g)); - ret = PTR_ERR_OR_ZERO(g); + struct bkey_i_bucket_gens *g = bch2_trans_kmalloc(trans, sizeof(*g)); + int ret = PTR_ERR_OR_ZERO(g); if (ret) return ret; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, - BTREE_ITER_intent| - BTREE_ITER_with_updates); + unsigned offset; + struct bpos pos = alloc_gens_pos(bucket, &offset); + + CLASS(btree_iter, iter)(trans, BTREE_ID_bucket_gens, pos, + BTREE_ITER_intent|BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) return ret; @@ -792,7 +788,7 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans, g->v.gens[offset] = gen; ret = bch2_trans_update(trans, &iter, &g->k_i, 0); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1043,10 +1039,9 @@ invalid_bucket: * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for * extents style btrees, but works on non-extents btrees: */ -static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end, struct bkey *hole) +static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole) { - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); if (bkey_err(k)) return k; @@ -1057,9 +1052,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt struct btree_iter iter2; struct bpos next; - bch2_trans_copy_iter(trans, &iter2, iter); + bch2_trans_copy_iter(&iter2, iter); - struct btree_path *path = btree_iter_path(trans, iter); + struct btree_path *path = btree_iter_path(iter->trans, iter); if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); @@ -1069,9 +1064,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt * btree node min/max is a closed interval, upto takes a half * open interval: */ - k = bch2_btree_iter_peek_max(trans, &iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); next = iter2.pos; - bch2_trans_iter_exit(trans, &iter2); + bch2_trans_iter_exit(&iter2); BUG_ON(next.offset >= iter->pos.offset + U32_MAX); @@ -1111,14 +1106,13 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck return *ca != NULL; } -static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans, - struct btree_iter *iter, - struct bch_dev **ca, struct bkey *hole) +static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, + struct bch_dev **ca, struct bkey *hole) { - struct bch_fs *c = trans->c; + struct bch_fs *c = iter->trans->c; struct bkey_s_c k; again: - k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole); + k = bch2_get_key_or_hole(iter, POS_MAX, hole); if (bkey_err(k)) return k; @@ -1131,7 +1125,7 @@ again: if (!next_bucket(c, ca, &hole_start)) return bkey_s_c_null; - bch2_btree_iter_set_pos(trans, iter, hole_start); + bch2_btree_iter_set_pos(iter, hole_start); goto again; } @@ -1172,8 +1166,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, a = bch2_alloc_to_v4(alloc_k, &a_convert); - bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p); - k = bch2_btree_iter_peek_slot(trans, discard_iter); + bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); + k = bch2_btree_iter_peek_slot(discard_iter); ret = bkey_err(k); if (ret) return ret; @@ -1186,8 +1180,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, return ret; } - bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); - k = bch2_btree_iter_peek_slot(trans, freespace_iter); + bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); + k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) return ret; @@ -1200,8 +1194,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, return ret; } - bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); - k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); + bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); + k = bch2_btree_iter_peek_slot(bucket_gens_iter); ret = bkey_err(k); if (ret) return ret; @@ -1250,9 +1244,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, if (!ca->mi.freespace_initialized) return 0; - bch2_btree_iter_set_pos(trans, freespace_iter, start); + bch2_btree_iter_set_pos(freespace_iter, start); - k = bch2_btree_iter_peek_slot(trans, freespace_iter); + k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) return ret; @@ -1298,9 +1292,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, unsigned i, gens_offset, gens_end_offset; int ret; - bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); + bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); - k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); + k = bch2_btree_iter_peek_slot(bucket_gens_iter); ret = bkey_err(k); if (ret) return ret; @@ -1354,8 +1348,8 @@ struct check_discard_freespace_key_async { static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); + CLASS(btree_iter, iter)(trans, pos.btree, pos.pos, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; @@ -1364,7 +1358,7 @@ static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct ret = k.k->type != KEY_TYPE_set ? __bch2_check_discard_freespace_key(trans, &iter, &gen, FSCK_ERR_SILENT) : 0; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1435,8 +1429,8 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i *gen = a->gen; out: fsck_err: - bch2_set_btree_iter_dontneed(trans, &alloc_iter); - bch2_trans_iter_exit(trans, &alloc_iter); + bch2_set_btree_iter_dontneed(&alloc_iter); + bch2_trans_iter_exit(&alloc_iter); return ret; delete: if (!async_repair) { @@ -1553,6 +1547,9 @@ int bch2_check_alloc_info(struct bch_fs *c) struct bkey_s_c k; int ret = 0; + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc)); + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch); @@ -1568,7 +1565,7 @@ int bch2_check_alloc_info(struct bch_fs *c) bch2_trans_begin(trans); - k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole); + k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); ret = bkey_err(k); if (ret) goto bkey_err; @@ -1576,6 +1573,8 @@ int bch2_check_alloc_info(struct bch_fs *c) if (!k.k) break; + progress_update_iter(trans, &progress, &iter); + if (k.k->type) { next = bpos_nosnap_successor(k.k->p); @@ -1606,17 +1605,17 @@ int bch2_check_alloc_info(struct bch_fs *c) if (ret) goto bkey_err; - bch2_btree_iter_set_pos(trans, &iter, next); + bch2_btree_iter_set_pos(&iter, next); bkey_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) break; } - bch2_trans_iter_exit(trans, &bucket_gens_iter); - bch2_trans_iter_exit(trans, &freespace_iter); - bch2_trans_iter_exit(trans, &discard_iter); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&bucket_gens_iter); + bch2_trans_iter_exit(&freespace_iter); + bch2_trans_iter_exit(&discard_iter); + bch2_trans_iter_exit(&iter); bch2_dev_put(ca); ca = NULL; @@ -1634,7 +1633,7 @@ bkey_err: BTREE_ITER_prefetch); while (1) { bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &iter); + k = bch2_btree_iter_peek(&iter); if (!k.k) break; @@ -1651,9 +1650,9 @@ bkey_err: break; } - bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos)); + bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -1677,7 +1676,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, CLASS(printbuf, buf)(); int ret; - alloc_k = bch2_btree_iter_peek(trans, alloc_iter); + alloc_k = bch2_btree_iter_peek(alloc_iter); if (!alloc_k.k) return 0; @@ -1736,12 +1735,16 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c) bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc)); + CLASS(btree_trans, trans)(c); int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)) ?: - bch2_check_stripe_to_lru_refs(trans); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed); + }))?: bch2_check_stripe_to_lru_refs(trans); bch2_bkey_buf_exit(&last_flushed, c); return ret; @@ -1789,16 +1792,12 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bpos pos = need_discard_iter->pos; - struct btree_iter iter = {}; - struct bkey_s_c k; - struct bkey_i_alloc_v4 *a; - CLASS(printbuf, buf)(); bool discard_locked = false; int ret = 0; if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { s->open++; - goto out; + return 0; } u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, @@ -1806,30 +1805,29 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, if (seq_ready > c->journal.flushed_seq_ondisk) { if (seq_ready > c->journal.flushing_seq) s->need_journal_commit++; - goto out; + return 0; } - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - need_discard_iter->pos, - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, need_discard_iter->pos, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) - goto out; + return ret; - a = bch2_alloc_to_v4_mut(trans, k); + struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); ret = PTR_ERR_OR_ZERO(a); if (ret) - goto out; + return ret; if (a->v.data_type != BCH_DATA_need_discard) { if (need_discard_or_freespace_err(trans, k, true, true, true)) { ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); if (ret) - goto out; + return ret; goto commit; } - goto out; + return 0; } if (!fastpath) { @@ -1882,7 +1880,6 @@ fsck_err: discard_in_flight_remove(ca, iter.pos.offset); if (!ret) s->seen++; - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -1946,9 +1943,8 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans, struct bpos *discard_pos_done, struct discard_buckets_state *s) { - struct btree_iter need_discard_iter; - struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, - BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + CLASS(btree_iter, need_discard_iter)(trans, BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + struct bkey_s_c discard_k = bch2_btree_iter_peek_slot(&need_discard_iter); int ret = bkey_err(discard_k); if (ret) return ret; @@ -1957,12 +1953,10 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans, trans, discarding_bucket_not_in_need_discard_btree, "attempting to discard bucket %u:%llu not in need_discard btree", ca->dev_idx, bucket)) - goto out; + return 0; - ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); -out: + return bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); fsck_err: - bch2_trans_iter_exit(trans, &need_discard_iter); return ret; } @@ -2055,7 +2049,7 @@ static int invalidate_one_bp(struct btree_trans *trans, bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); err: - bch2_trans_iter_exit(trans, &extent_iter); + bch2_trans_iter_exit(&extent_iter); return ret; } @@ -2098,7 +2092,6 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); - struct btree_iter alloc_iter = {}; int ret = 0; if (*nr_to_invalidate <= 0) @@ -2109,15 +2102,14 @@ static int invalidate_one_bucket(struct btree_trans *trans, "lru key points to nonexistent device:bucket %llu:%llu", bucket.inode, bucket.offset)) return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); - goto out; + return 0; } if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) return 0; - struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, - BTREE_ID_alloc, bucket, - BTREE_ITER_cached); + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, BTREE_ITER_cached); + struct bkey_s_c alloc_k = bch2_btree_iter_peek_slot(&alloc_iter); ret = bkey_err(alloc_k); if (ret) return ret; @@ -2127,7 +2119,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, /* We expect harmless races here due to the btree write buffer: */ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) - goto out; + return 0; /* * Impossible since alloc_lru_idx_read() only returns nonzero if the @@ -2142,7 +2134,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, if (!a->cached_sectors) { bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, true, last_flushed); - goto out; + return 0; } unsigned cached_sectors = a->cached_sectors; @@ -2150,13 +2142,11 @@ static int invalidate_one_bucket(struct btree_trans *trans, ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); if (ret) - goto out; + return ret; trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); --*nr_to_invalidate; -out: fsck_err: - bch2_trans_iter_exit(trans, &alloc_iter); return ret; } @@ -2165,9 +2155,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter { struct bkey_s_c k; again: - k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); + k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); if (!k.k && !*wrapped) { - bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); + bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); *wrapped = true; goto again; } @@ -2217,9 +2207,9 @@ restart_err: if (ret) break; - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); err: bch2_bkey_buf_exit(&last_flushed, c); enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); @@ -2285,7 +2275,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, break; } - k = bch2_get_key_or_hole(trans, &iter, end, &hole); + k = bch2_get_key_or_hole(&iter, end, &hole); ret = bkey_err(k); if (ret) goto bkey_err; @@ -2304,7 +2294,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, if (ret) goto bkey_err; - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } else { struct bkey_i *freespace; @@ -2324,7 +2314,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, if (ret) goto bkey_err; - bch2_btree_iter_set_pos(trans, &iter, k.k->p); + bch2_btree_iter_set_pos(&iter, k.k->p); } bkey_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -2333,7 +2323,7 @@ bkey_err: break; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret < 0) { bch_err_msg(ca, ret, "initializing free space"); @@ -2437,7 +2427,7 @@ static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: bch2_trans_commit(trans, NULL, NULL, 0); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index fd1415524e46..75f49211eba6 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -321,7 +321,7 @@ again: bucket = sector_to_bucket(ca, round_up(bucket_to_sector(ca, bucket) + 1, 1ULL << ca->mi.btree_bitmap_shift)); - bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket)); + bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket)); req->counters.buckets_seen++; req->counters.skipped_mi_btree_bitmap++; continue; @@ -348,12 +348,12 @@ again: ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) : NULL; next: - bch2_set_btree_iter_dontneed(trans, &citer); - bch2_trans_iter_exit(trans, &citer); + bch2_set_btree_iter_dontneed(&citer); + bch2_trans_iter_exit(&citer); if (ob) break; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); alloc_cursor = iter.pos.offset; @@ -375,7 +375,6 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, struct closure *cl) { struct bch_dev *ca = req->ca; - struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; @@ -409,7 +408,7 @@ again: 1ULL << ca->mi.btree_bitmap_shift)); alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56)); - bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor)); + bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); req->counters.skipped_mi_btree_bitmap++; goto next; } @@ -418,7 +417,7 @@ again: if (ob) { if (!IS_ERR(ob)) *dev_alloc_cursor = iter.pos.offset; - bch2_set_btree_iter_dontneed(trans, &iter); + bch2_set_btree_iter_dontneed(&iter); break; } @@ -430,7 +429,6 @@ next: break; } fail: - bch2_trans_iter_exit(trans, &iter); BUG_ON(ob && ret); diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index bd26ab3e6812..1aab9a63d0cb 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -154,12 +154,10 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, struct bkey_i_backpointer *bp, bool insert) { - struct btree_iter bp_iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bp->k.p, - BTREE_ITER_intent| - BTREE_ITER_slots| - BTREE_ITER_with_updates); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp->k.p, + BTREE_ITER_intent| + BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&bp_iter); int ret = bkey_err(k); if (ret) return ret; @@ -170,7 +168,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) { ret = backpointer_mod_err(trans, orig_k, bp, k, insert); if (ret) - goto err; + return ret; } if (!insert) { @@ -178,10 +176,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, set_bkey_val_u64s(&bp->k, 0); } - ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); -err: - bch2_trans_iter_exit(trans, &bp_iter); - return ret; + return bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); } static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) @@ -282,7 +277,7 @@ static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans, 0, bp.v->level - 1, 0); - struct btree *b = bch2_btree_iter_peek_node(trans, iter); + struct btree *b = bch2_btree_iter_peek_node(iter); if (IS_ERR_OR_NULL(b)) goto err; @@ -300,7 +295,7 @@ static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans, b = ret ? ERR_PTR(ret) : NULL; } err: - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return b; } @@ -322,9 +317,9 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans, 0, bp.v->level, iter_flags); - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); if (bkey_err(k)) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return k; } @@ -344,7 +339,7 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans, extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp)) return k; - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); if (!bp.v->level) { int ret = backpointer_target_not_found(trans, bp, k, last_flushed, commit); @@ -384,8 +379,6 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st return 0; struct bch_fs *c = trans->c; - struct btree_iter alloc_iter = {}; - struct bkey_s_c alloc_k; CLASS(printbuf, buf)(); int ret = 0; @@ -393,24 +386,25 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) { ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); if (ret) - goto out; + return ret; if (fsck_err(trans, backpointer_to_missing_device, "backpointer for missing device:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ret = bch2_backpointer_del(trans, k.k->p); - goto out; + return ret; } - alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0); + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, 0); + struct bkey_s_c alloc_k = bch2_btree_iter_peek_slot(&alloc_iter); ret = bkey_err(alloc_k); if (ret) - goto out; + return ret; if (alloc_k.k->type != KEY_TYPE_alloc_v4) { ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); if (ret) - goto out; + return ret; if (fsck_err(trans, backpointer_to_missing_alloc, "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", @@ -418,9 +412,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ret = bch2_backpointer_del(trans, k.k->p); } -out: fsck_err: - bch2_trans_iter_exit(trans, &alloc_iter); return ret; } @@ -542,25 +534,24 @@ static int check_bp_exists(struct btree_trans *trans, bpos_gt(bp->k.p, s->bp_end)) return 0; - struct btree_iter bp_iter; - struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp->k.p, 0); + struct bkey_s_c bp_k = bch2_btree_iter_peek_slot(&bp_iter); int ret = bkey_err(bp_k); if (ret) - goto err; + return ret; if (bp_k.k->type != KEY_TYPE_backpointer || memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) { ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); if (ret) - goto err; + return ret; goto check_existing_bp; } out: err: fsck_err: - bch2_trans_iter_exit(trans, &other_extent_iter); - bch2_trans_iter_exit(trans, &bp_iter); + bch2_trans_iter_exit(&other_extent_iter); return ret; check_existing_bp: /* Do we have a backpointer for a different extent? */ @@ -720,13 +711,13 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans, retry: bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, bch2_btree_id_root(c, btree_id)->b->c.level, 0); - b = bch2_btree_iter_peek_node(trans, &iter); + b = bch2_btree_iter_peek_node(&iter); ret = PTR_ERR_OR_ZERO(b); if (ret) goto err; if (b != btree_node_root(c, b)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry; } @@ -735,7 +726,7 @@ retry: k = bkey_i_to_s_c(&b->key); ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -835,6 +826,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, check_extent_to_backpointers(trans, s, btree_id, level, k) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); })); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -893,7 +885,6 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b if (!ca) return 0; - struct btree_iter iter; struct bkey_s_c bp_k; int ret = 0; for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers, @@ -909,7 +900,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b bp.v->pad)) { ret = bch2_backpointer_del(trans, bp_k.k->p); if (ret) - break; + return ret; need_commit = true; continue; @@ -924,7 +915,6 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b sectors[alloc_counter] += bp.v->bucket_len; }; - bch2_trans_iter_exit(trans, &iter); if (ret) return ret; @@ -1016,7 +1006,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k, { struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); int ret = PTR_ERR_OR_ZERO(b); if (ret) goto err; @@ -1024,7 +1014,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k, if (b) bch2_node_pin(trans->c, b); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1060,6 +1050,7 @@ static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans, bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, path->level - 1); })); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -1089,6 +1080,7 @@ static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans, ret; })); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -1170,17 +1162,13 @@ static int check_bucket_backpointer_pos_mismatch(struct btree_trans *trans, bool *had_mismatch, struct bkey_buf *last_flushed) { - struct btree_iter alloc_iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &alloc_iter, - BTREE_ID_alloc, bucket, - BTREE_ITER_cached); + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&alloc_iter); int ret = bkey_err(k); if (ret) return ret; - ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); - bch2_trans_iter_exit(trans, &alloc_iter); - return ret; + return check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); } int bch2_check_bucket_backpointer_mismatch(struct btree_trans *trans, @@ -1239,7 +1227,7 @@ static int check_one_backpointer(struct btree_trans *trans, if (ret) return ret; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 8a6f886b5bf2..45c15bdaa6f4 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -1277,4 +1277,11 @@ static inline int bch2_fs_casefold_enabled(struct bch_fs *c) return 0; } +static inline const char *strip_bch2(const char *msg) +{ + if (!strncmp("bch2_", msg, 5)) + return msg + 5; + return msg; +} + #endif /* _BCACHEFS_H */ diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 23ed7393f07f..25b01e750880 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -511,7 +511,7 @@ restart: if (btree_node_accessed(b)) { clear_btree_node_accessed(b); bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; - --touched;; + --touched; } else if (!btree_node_reclaim(c, b)) { __bch2_btree_node_hash_remove(bc, b); __btree_node_data_free(b); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index e95bb6849aef..ce3c7750a922 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -713,6 +713,7 @@ static int bch2_gc_btree(struct btree_trans *trans, gc_pos_set(c, gc_pos_btree(btree, level, k.k->p)); bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial); })); + bch2_trans_iter_exit(&iter); if (ret) goto err; } @@ -725,13 +726,13 @@ retry_root: struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, bch2_btree_id_root(c, btree)->b->c.level, 0); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); ret = PTR_ERR_OR_ZERO(b); if (ret) goto err_root; if (b != btree_node_root(c, b)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry_root; } @@ -739,7 +740,7 @@ retry_root: struct bkey_s_c k = bkey_i_to_s_c(&b->key); ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial); err_root: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); } while (bch2_err_matches(ret, BCH_ERR_transaction_restart)); err: bch_err_fn(c, ret); @@ -1228,7 +1229,7 @@ int bch2_gc_gens(struct bch_fs *c) BCH_TRANS_COMMIT_no_enospc, ({ ca = bch2_dev_iterate(c, ca, k.k->p.inode); if (!ca) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } bch2_alloc_write_oldest_gen(trans, ca, &iter, k); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index bd86dd7151a1..8a03cd75a64f 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1405,10 +1405,8 @@ static void btree_node_read_work(struct work_struct *work) ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), &failed, &rb->pick, -1); - if (ret <= 0) { - set_btree_node_read_error(b); + if (ret <= 0) break; - } ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); rb->have_ioref = ca != NULL; @@ -1442,27 +1440,21 @@ start: bch2_maybe_corrupt_bio(bio, bch2_btree_read_corrupt_ratio); ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf); - if (ret == -BCH_ERR_btree_node_read_err_want_retry || - ret == -BCH_ERR_btree_node_read_err_must_retry) - continue; - - if (ret) - set_btree_node_read_error(b); - - break; + if (ret != -BCH_ERR_btree_node_read_err_want_retry && + ret != -BCH_ERR_btree_node_read_err_must_retry) + break; } bch2_io_failures_to_text(&buf, c, &failed); - if (btree_node_read_error(b)) - bch2_btree_lost_data(c, &buf, b->c.btree_id); - /* * only print retry success if we read from a replica with no errors */ - if (btree_node_read_error(b)) + if (ret) { + set_btree_node_read_error(b); + bch2_btree_lost_data(c, &buf, b->c.btree_id); prt_printf(&buf, "ret %s", bch2_err_str(ret)); - else if (failed.nr) { + } else if (failed.nr) { if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev)) prt_printf(&buf, "retry success"); else @@ -2019,7 +2011,7 @@ static void btree_node_scrub_work(struct work_struct *work) bch_err_fn_ratelimited(c, ret); } - bch2_bkey_buf_exit(&scrub->key, c);; + bch2_bkey_buf_exit(&scrub->key, c); btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); kfree(scrub); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index a282c3886168..2220198d7e2d 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -240,8 +240,10 @@ void __bch2_trans_verify_paths(struct btree_trans *trans) __bch2_btree_path_verify(trans, path); } -static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter) +static void __bch2_btree_iter_verify(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); BUG_ON((iter->flags & BTREE_ITER_is_extents) && @@ -270,12 +272,9 @@ static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) bkey_gt(iter->pos, iter->k.p))); } -static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, - struct btree_iter *iter, struct bkey_s_c k) +static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { - struct btree_iter copy; - struct bkey_s_c prev; - int ret = 0; + struct btree_trans *trans = iter->trans; if (!(iter->flags & BTREE_ITER_filter_snapshots)) return 0; @@ -287,16 +286,16 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, iter->snapshot, k.k->p.snapshot)); - bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, - BTREE_ITER_nopreserve| - BTREE_ITER_all_snapshots); - prev = bch2_btree_iter_prev(trans, ©); + CLASS(btree_iter, copy)(trans, iter->btree_id, iter->pos, + BTREE_ITER_nopreserve| + BTREE_ITER_all_snapshots); + struct bkey_s_c prev = bch2_btree_iter_prev(©); if (!prev.k) - goto out; + return 0; - ret = bkey_err(prev); + int ret = bkey_err(prev); if (ret) - goto out; + return ret; if (bkey_eq(prev.k->p, k.k->p) && bch2_snapshot_is_ancestor(trans->c, iter->snapshot, @@ -312,9 +311,8 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, iter->snapshot, buf1.buf, buf2.buf); } -out: - bch2_trans_iter_exit(trans, ©); - return ret; + + return 0; } void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, @@ -364,11 +362,10 @@ static inline void bch2_btree_path_verify(struct btree_trans *trans, __bch2_btree_path_verify(trans, path); } -static inline void bch2_btree_iter_verify(struct btree_trans *trans, - struct btree_iter *iter) +static inline void bch2_btree_iter_verify(struct btree_iter *iter) { if (static_branch_unlikely(&bch2_debug_check_iterators)) - __bch2_btree_iter_verify(trans, iter); + __bch2_btree_iter_verify(iter); } static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) @@ -377,11 +374,11 @@ static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) __bch2_btree_iter_verify_entry_exit(iter); } -static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter, +static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return static_branch_unlikely(&bch2_debug_check_iterators) - ? __bch2_btree_iter_verify_ret(trans, iter, k) + ? __bch2_btree_iter_verify_ret(iter, k) : 0; } @@ -891,7 +888,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, struct btree_path *path, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; struct btree_path_level *l = path_l(path); @@ -943,7 +940,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans, static __always_inline int btree_path_down(struct btree_trans *trans, struct btree_path *path, - unsigned flags, + enum btree_iter_update_trigger_flags flags, unsigned long trace_ip) { struct bch_fs *c = trans->c; @@ -1151,7 +1148,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, */ int bch2_btree_path_traverse_one(struct btree_trans *trans, btree_path_idx_t path_idx, - unsigned flags, + enum btree_iter_update_trigger_flags flags, unsigned long trace_ip) { struct btree_path *path = &trans->paths[path_idx]; @@ -1732,7 +1729,8 @@ static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans, btree_path_idx_t bch2_path_get(struct btree_trans *trans, enum btree_id btree_id, struct bpos pos, unsigned locks_want, unsigned level, - unsigned flags, unsigned long ip) + enum btree_iter_update_trigger_flags flags, + unsigned long ip) { struct btree_path *path; bool cached = flags & BTREE_ITER_cached; @@ -1863,8 +1861,10 @@ hole: return (struct bkey_s_c) { u, NULL }; } -void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter) +void bch2_set_btree_iter_dontneed(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + if (!iter->path || trans->restarted) return; @@ -1876,14 +1876,17 @@ void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter * /* Btree iterators: */ int __must_check -__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) +__bch2_btree_iter_traverse(struct btree_iter *iter) { - return bch2_btree_path_traverse(trans, iter->path, iter->flags); + return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); } int __must_check -bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) +bch2_btree_iter_traverse(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + int ret; + bch2_trans_verify_not_unlocked_or_in_restart(trans); iter->path = bch2_btree_path_set_pos(trans, iter->path, @@ -1891,7 +1894,7 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) iter->flags & BTREE_ITER_intent, btree_iter_ip_allocated(iter)); - int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); if (ret) return ret; @@ -1903,14 +1906,14 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) /* Iterate across nodes (leaf and interior nodes) */ -struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans, - struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct btree *b = NULL; int ret; EBUG_ON(trans->paths[iter->path].cached); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (ret) @@ -1932,7 +1935,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans, btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter)); out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return b; err: @@ -1941,26 +1944,26 @@ err: } /* Only kept for -tools */ -struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans, - struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter) { struct btree *b; - while (b = bch2_btree_iter_peek_node(trans, iter), + while (b = bch2_btree_iter_peek_node(iter), bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart)) - bch2_trans_begin(trans); + bch2_trans_begin(iter->trans); return b; } -struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter) +struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct btree *b = NULL; int ret; EBUG_ON(trans->paths[iter->path].cached); bch2_trans_verify_not_unlocked_or_in_restart(trans); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (ret) @@ -2034,7 +2037,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ EBUG_ON(btree_iter_path(trans, iter)->uptodate); out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return b; err: @@ -2044,7 +2047,7 @@ err: /* Iterate across keys (in leaf nodes only) */ -inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter) +inline bool bch2_btree_iter_advance(struct btree_iter *iter) { struct bpos pos = iter->k.p; bool ret = !(iter->flags & BTREE_ITER_all_snapshots @@ -2053,11 +2056,11 @@ inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter if (ret && !(iter->flags & BTREE_ITER_is_extents)) pos = bkey_successor(iter, pos); - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); return ret; } -inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter) +inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { struct bpos pos = bkey_start_pos(&iter->k); bool ret = !(iter->flags & BTREE_ITER_all_snapshots @@ -2066,7 +2069,7 @@ inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter if (ret && !(iter->flags & BTREE_ITER_is_extents)) pos = bkey_predecessor(iter, pos); - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); return ret; } @@ -2198,9 +2201,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans, * bkey_s_c_null: */ static noinline -struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter, - struct bpos pos) +struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos) { + struct btree_trans *trans = iter->trans; struct bch_fs *c = trans->c; struct bkey u; struct bkey_s_c k; @@ -2246,14 +2249,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btr return k; } -static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter, - struct bpos search_key) +static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key) { + struct btree_trans *trans = iter->trans; struct bkey_s_c k, k2; int ret; EBUG_ON(btree_iter_path(trans, iter)->cached); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, @@ -2263,7 +2266,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); break; } @@ -2273,7 +2276,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct if (unlikely(!l->b)) { /* No btree nodes at requested level: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } @@ -2285,10 +2288,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && k.k && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) { + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { k = k2; if (bkey_err(k)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); break; } } @@ -2321,13 +2324,13 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct search_key = bpos_successor(l->b->key.k.p); } else { /* End of btree: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } } - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); if (trace___btree_iter_peek_enabled()) { CLASS(printbuf, buf)(); @@ -2348,15 +2351,14 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct /** * bch2_btree_iter_peek_max() - returns first key greater than or equal to * iterator's current position - * @trans: btree transaction object * @iter: iterator to peek from * @end: search limit: returns keys less than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end) { + struct btree_trans *trans = iter->trans; struct bpos search_key = btree_iter_search_key(iter); struct bkey_s_c k; struct bpos iter_pos = iter->pos; @@ -2378,7 +2380,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree } while (1) { - k = __bch2_btree_iter_peek(trans, iter, search_key); + k = __bch2_btree_iter_peek(iter, search_key); if (unlikely(!k.k)) goto end; if (unlikely(bkey_err(k))) @@ -2492,9 +2494,9 @@ out_no_locked: if (!(iter->flags & BTREE_ITER_all_snapshots)) iter->pos.snapshot = iter->snapshot; - ret = bch2_btree_iter_verify_ret(trans, iter, k); + ret = bch2_btree_iter_verify_ret(iter, k); if (unlikely(ret)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); } @@ -2515,7 +2517,7 @@ out_no_locked: return k; end: - bch2_btree_iter_set_pos(trans, iter, end); + bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } @@ -2523,25 +2525,24 @@ end: /** * bch2_btree_iter_next() - returns first key greater than iterator's current * position - * @trans: btree transaction object * @iter: iterator to peek from * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { - if (!bch2_btree_iter_advance(trans, iter)) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek(trans, iter); + return bch2_btree_iter_peek(iter); } -static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter, - struct bpos search_key) +static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key) { + struct btree_trans *trans = iter->trans; struct bkey_s_c k, k2; - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, @@ -2551,7 +2552,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); break; } @@ -2561,7 +2562,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st if (unlikely(!l->b)) { /* No btree nodes at requested level: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } @@ -2578,10 +2579,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && k.k && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) { + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { k = k2; if (bkey_err(k2)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); break; } } @@ -2602,27 +2603,25 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st search_key = bpos_predecessor(path->l[0].b->data->min_key); } else { /* Start of btree: */ - bch2_btree_iter_set_pos(trans, iter, POS_MIN); + bch2_btree_iter_set_pos(iter, POS_MIN); k = bkey_s_c_null; break; } } - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return k; } /** * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to * iterator's current position - * @trans: btree transaction object * @iter: iterator to peek from * @end: search limit: returns keys greater than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end) { if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) && !bkey_eq(iter->pos, POS_MAX) && @@ -2637,7 +2636,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct * real visible extents - easiest to just use peek_slot() (which * internally uses peek() for extents) */ - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); if (bkey_err(k)) return k; @@ -2647,6 +2646,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct return k; } + struct btree_trans *trans = iter->trans; struct bpos search_key = iter->pos; struct bkey_s_c k; btree_path_idx_t saved_path = 0; @@ -2662,7 +2662,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct } while (1) { - k = __bch2_btree_iter_peek_prev(trans, iter, search_key); + k = __bch2_btree_iter_peek_prev(iter, search_key); if (unlikely(!k.k)) goto end; if (unlikely(bkey_err(k))) @@ -2744,7 +2744,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct } /* Extents can straddle iter->pos: */ - iter->pos = bpos_min(iter->pos, k.k->p);; + iter->pos = bpos_min(iter->pos, k.k->p); if (iter->flags & BTREE_ITER_filter_snapshots) iter->pos.snapshot = iter->snapshot; @@ -2753,7 +2753,7 @@ out_no_locked: bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent); bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); if (trace_btree_iter_peek_prev_min_enabled()) { CLASS(printbuf, buf)(); @@ -2769,7 +2769,7 @@ out_no_locked: } return k; end: - bch2_btree_iter_set_pos(trans, iter, end); + bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } @@ -2777,27 +2777,27 @@ end: /** * bch2_btree_iter_prev() - returns first key less than iterator's current * position - * @trans: btree transaction object * @iter: iterator to peek from * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { - if (!bch2_btree_iter_rewind(trans, iter)) + if (!bch2_btree_iter_rewind(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_prev(trans, iter); + return bch2_btree_iter_peek_prev(iter); } -struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct bpos search_key; struct bkey_s_c k, k2; int ret; bch2_trans_verify_not_unlocked_or_in_restart(trans); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache)); @@ -2815,7 +2815,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre goto out2; } - bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos)); + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); } search_key = btree_iter_search_key(iter); @@ -2858,7 +2858,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) { + (k2 = btree_trans_peek_key_cache(iter, iter->pos)).k) { k = k2; if (bkey_err(k)) goto out; @@ -2881,21 +2881,21 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre if (iter->flags & BTREE_ITER_intent) { struct btree_iter iter2; - bch2_trans_copy_iter(trans, &iter2, iter); - k = bch2_btree_iter_peek_max(trans, &iter2, end); + bch2_trans_copy_iter(&iter2, iter); + k = bch2_btree_iter_peek_max(&iter2, end); if (k.k && !bkey_err(k)) { swap(iter->key_cache_path, iter2.key_cache_path); iter->k = iter2.k; k.k = &iter->k; } - bch2_trans_iter_exit(trans, &iter2); + bch2_trans_iter_exit(&iter2); } else { struct bpos pos = iter->pos; - k = bch2_btree_iter_peek_max(trans, iter, end); + k = bch2_btree_iter_peek_max(iter, end); if (unlikely(bkey_err(k))) - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); else iter->pos = pos; } @@ -2924,8 +2924,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre } out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); - ret = bch2_btree_iter_verify_ret(trans, iter, k); + bch2_btree_iter_verify(iter); + ret = bch2_btree_iter_verify_ret(iter, k); if (unlikely(ret)) k = bkey_s_c_err(ret); out2: @@ -2945,31 +2945,31 @@ out2: return k; } -struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { - if (!bch2_btree_iter_advance(trans, iter)) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_slot(trans, iter); + return bch2_btree_iter_peek_slot(iter); } -struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) { - if (!bch2_btree_iter_rewind(trans, iter)) + if (!bch2_btree_iter_rewind(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_slot(trans, iter); + return bch2_btree_iter_peek_slot(iter); } /* Obsolete, but still used by rust wrapper in -tools */ -struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter) { struct bkey_s_c k; - while (btree_trans_too_many_iters(trans) || - (k = bch2_btree_iter_peek_type(trans, iter, iter->flags), + while (btree_trans_too_many_iters(iter->trans) || + (k = bch2_btree_iter_peek_type(iter, iter->flags), bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) - bch2_trans_begin(trans); + bch2_trans_begin(iter->trans); return k; } @@ -3101,8 +3101,10 @@ static inline void btree_path_list_add(struct btree_trans *trans, btree_trans_verify_sorted_refs(trans); } -void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) +void bch2_trans_iter_exit(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + if (iter->update_path) bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent); @@ -3115,12 +3117,13 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) iter->path = 0; iter->update_path = 0; iter->key_cache_path = 0; + iter->trans = NULL; } void bch2_trans_iter_init_outlined(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, bch2_btree_iter_flags(trans, btree_id, 0, flags), @@ -3133,7 +3136,7 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, struct bpos pos, unsigned locks_want, unsigned depth, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { flags |= BTREE_ITER_not_extents; flags |= BTREE_ITER_snapshot_field; @@ -3154,9 +3157,10 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, BUG_ON(iter->min_depth != depth); } -void bch2_trans_copy_iter(struct btree_trans *trans, - struct btree_iter *dst, struct btree_iter *src) +void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src) { + struct btree_trans *trans = src->trans; + *dst = *src; #ifdef TRACK_PATH_ALLOCATED dst->ip_allocated = _RET_IP_; diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 53074ed62e09..4da36a998dff 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -235,12 +235,14 @@ bch2_btree_path_set_pos(struct btree_trans *trans, int __must_check bch2_btree_path_traverse_one(struct btree_trans *, btree_path_idx_t, - unsigned, unsigned long); + enum btree_iter_update_trigger_flags, + unsigned long); static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *); static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans, - btree_path_idx_t path, unsigned flags) + btree_path_idx_t path, + enum btree_iter_update_trigger_flags flags) { bch2_trans_verify_not_unlocked_or_in_restart(trans); @@ -251,7 +253,9 @@ static inline int __must_check bch2_btree_path_traverse(struct btree_trans *tran } btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos, - unsigned, unsigned, unsigned, unsigned long); + unsigned, unsigned, + enum btree_iter_update_trigger_flags, + unsigned long); btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id, unsigned, struct bpos); @@ -404,37 +408,36 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct void bch2_trans_node_drop(struct btree_trans *trans, struct btree *); void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *); -int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *); -int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *); +int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter); +int __must_check bch2_btree_iter_traverse(struct btree_iter *); -struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *); -struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *); -struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *); +struct btree *bch2_btree_iter_peek_node(struct btree_iter *); +struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *); +struct btree *bch2_btree_iter_next_node(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos); -struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *); +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos); +struct bkey_s_c bch2_btree_iter_next(struct btree_iter *); -static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans, - struct btree_iter *iter) +static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) { - return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX); + return bch2_btree_iter_peek_max(iter, SPOS_MAX); } -struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos); +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos); -static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter) +static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) { - return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN); + return bch2_btree_iter_peek_prev_min(iter, POS_MIN); } -struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *); +struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *); -struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *); -struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *); +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *); +struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *); +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *); -bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *); -bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *); +bool bch2_btree_iter_advance(struct btree_iter *); +bool bch2_btree_iter_rewind(struct btree_iter *); static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) { @@ -445,9 +448,10 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo iter->k.size = 0; } -static inline void bch2_btree_iter_set_pos(struct btree_trans *trans, - struct btree_iter *iter, struct bpos new_pos) +static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) { + struct btree_trans *trans = iter->trans; + if (unlikely(iter->update_path)) bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent); @@ -465,22 +469,21 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it iter->pos = bkey_start_pos(&iter->k); } -static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans, - struct btree_iter *iter, u32 snapshot) +static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot) { struct bpos pos = iter->pos; iter->snapshot = snapshot; pos.snapshot = snapshot; - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); } -void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *); +void bch2_trans_iter_exit(struct btree_iter *); -static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans, - unsigned btree_id, - unsigned level, - unsigned flags) +static inline enum btree_iter_update_trigger_flags +bch2_btree_iter_flags(struct btree_trans *trans, + unsigned btree_id, unsigned level, + enum btree_iter_update_trigger_flags flags) { if (level || !btree_id_cached(trans->c, btree_id)) { flags &= ~BTREE_ITER_cached; @@ -508,15 +511,16 @@ static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans, static inline void bch2_trans_iter_init_common(struct btree_trans *trans, struct btree_iter *iter, - unsigned btree_id, struct bpos pos, + enum btree_id btree, struct bpos pos, unsigned locks_want, unsigned depth, - unsigned flags, + enum btree_iter_update_trigger_flags flags, unsigned long ip) { + iter->trans = trans; iter->update_path = 0; iter->key_cache_path = 0; - iter->btree_id = btree_id; + iter->btree_id = btree; iter->min_depth = 0; iter->flags = flags; iter->snapshot = pos.snapshot; @@ -526,33 +530,51 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans, #ifdef CONFIG_BCACHEFS_DEBUG iter->ip_allocated = ip; #endif - iter->path = bch2_path_get(trans, btree_id, iter->pos, - locks_want, depth, flags, ip); + iter->path = bch2_path_get(trans, btree, iter->pos, locks_want, depth, flags, ip); } void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *, - enum btree_id, struct bpos, unsigned); + enum btree_id, struct bpos, + enum btree_iter_update_trigger_flags); static inline void bch2_trans_iter_init(struct btree_trans *trans, struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags) { - if (__builtin_constant_p(btree_id) && + if (__builtin_constant_p(btree) && __builtin_constant_p(flags)) - bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, - bch2_btree_iter_flags(trans, btree_id, 0, flags), + bch2_trans_iter_init_common(trans, iter, btree, pos, 0, 0, + bch2_btree_iter_flags(trans, btree, 0, flags), _THIS_IP_); else - bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags); + bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags); +} + +static inline struct btree_iter bch2_trans_iter_class_init(struct btree_trans *trans, + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags) +{ + struct btree_iter iter; + bch2_trans_iter_init(trans, &iter, btree, pos, flags); + return iter; } +DEFINE_CLASS(btree_iter, struct btree_iter, + bch2_trans_iter_exit(&_T), + bch2_trans_iter_class_init(trans, btree, pos, flags), + struct btree_trans *trans, + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags); + void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *, enum btree_id, struct bpos, - unsigned, unsigned, unsigned); -void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *); + unsigned, unsigned, + enum btree_iter_update_trigger_flags); + +void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *); -void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *); +void bch2_set_btree_iter_dontneed(struct btree_iter *); #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE void bch2_trans_kmalloc_trace_to_text(struct printbuf *, @@ -623,27 +645,28 @@ static __always_inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *tr static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans, struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type) + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags, + enum bch_bkey_type type) { struct bkey_s_c k; - bch2_trans_iter_init(trans, iter, btree_id, pos, flags); - k = bch2_btree_iter_peek_slot(trans, iter); + bch2_trans_iter_init(trans, iter, btree, pos, flags); + k = bch2_btree_iter_peek_slot(iter); if (!bkey_err(k) && type && k.k->type != type) k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch); if (unlikely(bkey_err(k))) - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return k; } static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans, struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags) { - return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0); + return __bch2_bkey_get_iter(trans, iter, btree, pos, flags, 0); } #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ @@ -665,16 +688,17 @@ do { \ } while (0) static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, - unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type, + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags, + enum bch_bkey_type type, unsigned val_size, void *val) { struct btree_iter iter; - struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type); + struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree, pos, flags, type); int ret = bkey_err(k); if (!ret) { __bkey_val_copy(val, val_size, k); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); } return ret; @@ -699,17 +723,17 @@ u32 bch2_trans_begin(struct btree_trans *); int _ret3 = 0; \ do { \ _ret3 = lockrestart_do((_trans), ({ \ - struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\ + struct btree *_b = bch2_btree_iter_peek_node(&_iter); \ if (!_b) \ break; \ \ PTR_ERR_OR_ZERO(_b) ?: (_do); \ })) ?: \ lockrestart_do((_trans), \ - PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\ + PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \ } while (!_ret3); \ \ - bch2_trans_iter_exit((_trans), &(_iter)); \ + bch2_trans_iter_exit(&(_iter)); \ _ret3; \ }) @@ -718,34 +742,31 @@ u32 bch2_trans_begin(struct btree_trans *); __for_each_btree_node(_trans, _iter, _btree_id, _start, \ 0, 0, _flags, _b, _do) -static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans, - struct btree_iter *iter, - unsigned flags) +static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter, + enum btree_iter_update_trigger_flags flags) { - return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) : - bch2_btree_iter_peek_prev(trans, iter); + return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) : + bch2_btree_iter_peek_prev(iter); } -static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans, - struct btree_iter *iter, - unsigned flags) +static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter, + enum btree_iter_update_trigger_flags flags) { - return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) : - bch2_btree_iter_peek(trans, iter); + return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) : + bch2_btree_iter_peek(iter); } -static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans, - struct btree_iter *iter, +static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter, struct bpos end, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { if (!(flags & BTREE_ITER_slots)) - return bch2_btree_iter_peek_max(trans, iter, end); + return bch2_btree_iter_peek_max(iter, end); if (bkey_gt(iter->pos, end)) return bkey_s_c_null; - return bch2_btree_iter_peek_slot(trans, iter); + return bch2_btree_iter_peek_slot(iter); } int __bch2_btree_trans_too_many_iters(struct btree_trans *); @@ -801,7 +822,7 @@ transaction_restart: \ if (!_ret2) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \ + _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \ }) #define for_each_btree_key_max_continue(_trans, _iter, \ @@ -812,62 +833,52 @@ transaction_restart: \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), \ _end, (_flags)); \ if (!(_k).k) \ break; \ \ bkey_err(_k) ?: (_do); \ })); \ - } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \ + } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \ \ - bch2_trans_iter_exit((_trans), &(_iter)); \ _ret3; \ }) #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \ for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_max(_trans, _iter, _btree_id, \ - _start, _end, _flags, _k, _do) \ -({ \ - bch2_trans_begin(trans); \ - \ - struct btree_iter _iter; \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - \ - for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\ +#define for_each_btree_key_max(_trans, _iter, _btree_id, \ + _start, _end, _flags, _k, _do) \ +({ \ + bch2_trans_begin(trans); \ + \ + CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ + for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do); \ }) -#define for_each_btree_key(_trans, _iter, _btree_id, \ - _start, _flags, _k, _do) \ - for_each_btree_key_max(_trans, _iter, _btree_id, _start, \ - SPOS_MAX, _flags, _k, _do) +#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k, _do) \ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ - _start, _flags, _k, _do) \ -({ \ - struct btree_iter _iter; \ - struct bkey_s_c _k; \ - int _ret3 = 0; \ - \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - \ - do { \ - _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), \ - (_flags)); \ - if (!(_k).k) \ - break; \ - \ - bkey_err(_k) ?: (_do); \ - })); \ - } while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter))); \ - \ - bch2_trans_iter_exit((_trans), &(_iter)); \ - _ret3; \ +#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ + _start, _flags, _k, _do) \ +({ \ + int _ret3 = 0; \ + \ + CLASS(btree_iter, iter)((_trans), (_btree_id), (_start), (_flags)); \ + \ + do { \ + _ret3 = lockrestart_do(_trans, ({ \ + struct bkey_s_c _k = \ + bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\ + if (!(_k).k) \ + break; \ + \ + bkey_err(_k) ?: (_do); \ + })); \ + } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \ + \ + _ret3; \ }) #define for_each_btree_key_commit(_trans, _iter, _btree_id, \ @@ -894,38 +905,36 @@ transaction_restart: \ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) -struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *, - struct btree_iter *); - -#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ - _start, _end, _flags, _k, _ret) \ - for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\ - !((_ret) = bkey_err(_k)) && (_k).k; \ - bch2_btree_iter_advance(_trans, &(_iter))) - -#define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\ - for (; \ - (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags), \ - !((_ret) = bkey_err(_k)) && (_k).k; \ - bch2_btree_iter_advance(_trans, &(_iter))) - -#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ - _start, _flags, _k, _ret) \ - for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\ - SPOS_MAX, _flags, _k, _ret) +struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); -#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ - _start, _flags, _k, _ret) \ - for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags), \ - !((_ret) = bkey_err(_k)) && (_k).k; \ - bch2_btree_iter_rewind(_trans, &(_iter))) +#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ + _start, _end, _flags, _k, _ret) \ + for (CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ + bch2_btree_iter_advance(&(_iter))) + +#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ + _start, _flags, _k, _ret) \ + for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start, \ + SPOS_MAX, _flags, _k, _ret) -#define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret) \ - for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret) +#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret) \ + for (; \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ + bch2_btree_iter_advance(&(_iter))) + +#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ + for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) + +#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ + _start, _flags, _k, _ret) \ + for (CLASS(btree_iter, _iter)((_trans), (_btree_id), \ + (_start), (_flags)); \ + (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ + bch2_btree_iter_rewind(&(_iter))) /* * This should not be used in a fastpath, without first trying _do in diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index d61b782087ce..d69cf9435872 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -323,19 +323,16 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, } struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - int ret; - bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos, - BTREE_ITER_intent| - BTREE_ITER_key_cache_fill| - BTREE_ITER_cached_nofill); + CLASS(btree_iter, iter)(trans, ck_path->btree_id, ck_path->pos, + BTREE_ITER_intent| + BTREE_ITER_key_cache_fill| + BTREE_ITER_cached_nofill); iter.flags &= ~BTREE_ITER_with_journal; - k = bch2_btree_iter_peek_slot(trans, &iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) - goto err; + return ret; /* Recheck after btree lookup, before allocating: */ ck_path = trans->paths + ck_path_idx; @@ -345,15 +342,13 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k); if (ret) - goto err; + return ret; if (trace_key_cache_fill_enabled()) do_trace_key_cache_fill(trans, ck_path, k); out: /* We're not likely to need this iterator again: */ - bch2_set_btree_iter_dontneed(trans, &iter); -err: - bch2_trans_iter_exit(trans, &iter); + bch2_set_btree_iter_dontneed(&iter); return ret; } @@ -425,35 +420,34 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct journal *j = &c->journal; - struct btree_iter c_iter, b_iter; struct bkey_cached *ck = NULL; int ret; - bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos, - BTREE_ITER_slots| - BTREE_ITER_intent| - BTREE_ITER_all_snapshots); - bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos, - BTREE_ITER_cached| - BTREE_ITER_intent); + CLASS(btree_iter, b_iter)(trans, key.btree_id, key.pos, + BTREE_ITER_slots| + BTREE_ITER_intent| + BTREE_ITER_all_snapshots); + CLASS(btree_iter, c_iter)(trans, key.btree_id, key.pos, + BTREE_ITER_cached| + BTREE_ITER_intent); b_iter.flags &= ~BTREE_ITER_with_key_cache; - ret = bch2_btree_iter_traverse(trans, &c_iter); + ret = bch2_btree_iter_traverse(&c_iter); if (ret) - goto out; + return ret; ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; if (!ck) - goto out; + return 0; if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { if (evict) goto evict; - goto out; + return 0; } if (journal_seq && ck->journal.seq != journal_seq) - goto out; + return 0; trans->journal_res.seq = ck->journal.seq; @@ -470,7 +464,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, !test_bit(JOURNAL_space_low, &c->journal.flags)) commit_flags |= BCH_TRANS_COMMIT_no_journal_res; - struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter); + struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter); ret = bkey_err(btree_k); if (ret) goto err; @@ -529,8 +523,6 @@ evict: } } out: - bch2_trans_iter_exit(trans, &b_iter); - bch2_trans_iter_exit(trans, &c_iter); return ret; } diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index f4f958f4615d..4b7b5ca74ba1 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -158,14 +158,6 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX) return; - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); - bio->bi_iter.bi_sector = offset; - bch2_bio_map(bio, b->data, c->opts.btree_node_size); - - submit_time = local_clock(); - submit_bio_wait(bio); - bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); - rcu_read_lock(); struct found_btree_node n = { .btree_id = BTREE_NODE_ID(bn), @@ -182,6 +174,14 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, }; rcu_read_unlock(); + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); + bio->bi_iter.bi_sector = offset; + bch2_bio_map(bio, b->data, c->opts.btree_node_size); + + submit_time = local_clock(); + submit_bio_wait(bio); + bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); + found_btree_node_to_key(&b->key, &n); CLASS(printbuf, buf)(); diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 58590ccc26bd..8b94a8156fbf 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -969,7 +969,7 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans, BUG_ON(current != c->recovery_task); struct bkey_i *accounting; - +retry: percpu_down_read(&c->mark_lock); for (accounting = btree_trans_subbuf_base(trans, &trans->accounting); accounting != btree_trans_subbuf_top(trans, &trans->accounting); @@ -1025,13 +1025,17 @@ fatal_err: bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret)); percpu_down_read(&c->mark_lock); revert_fs_usage: - BUG(); - /* error path not handled by __bch2_trans_commit() */ for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); i != accounting; i = bkey_next(i)) bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags); percpu_up_read(&c->mark_lock); + + if (bch2_err_matches(ret, BCH_ERR_btree_insert_need_mark_replicas)) { + ret = drop_locks_do(trans, bch2_accounting_update_sb(trans)); + if (!ret) + goto retry; + } return ret; } diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 76adf75617aa..ffa250008d91 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -364,6 +364,7 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path) * @nodes_intent_locked - bitmask indicating which locks are intent locks */ struct btree_iter { + struct btree_trans *trans; btree_path_idx_t path; btree_path_idx_t update_path; btree_path_idx_t key_cache_path; @@ -485,7 +486,7 @@ typedef DARRAY(struct trans_kmalloc_trace) darray_trans_kmalloc_trace; struct btree_trans_subbuf { u16 base; u16 u64s; - u16 size;; + u16 size; }; struct btree_trans { @@ -854,15 +855,15 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type) return type != BKEY_TYPE_btree && btree_id_is_extents(type - 1); } -static inline bool btree_type_has_snapshots(enum btree_id btree) -{ - const u64 mask = 0 +static const u64 btree_has_snapshots_mask = 0 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_snapshots)) << nr) - BCH_BTREE_IDS() +BCH_BTREE_IDS() #undef x - ; +; - return BIT_ULL(btree) & mask; +static inline bool btree_type_has_snapshots(enum btree_id btree) +{ + return BIT_ULL(btree) & btree_has_snapshots_mask; } static inline bool btree_type_has_snapshot_field(enum btree_id btree) diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index f514a8ad7a89..09c75ac2d5a1 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -117,7 +117,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -131,10 +131,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, darray_for_each(*s, id) { pos.snapshot = *id; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, pos, - BTREE_ITER_not_extents| - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_not_extents|BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) break; @@ -143,7 +141,6 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, struct bkey_i *update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i)); ret = PTR_ERR_OR_ZERO(update); if (ret) { - bch2_trans_iter_exit(trans, &iter); break; } @@ -154,7 +151,6 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, update, BTREE_UPDATE_internal_snapshot_node); } - bch2_trans_iter_exit(trans, &iter); if (ret) break; @@ -268,18 +264,16 @@ static int bch2_trans_update_extent(struct btree_trans *trans, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - struct bkey_s_c k; enum btree_id btree_id = orig_iter->btree_id; - int ret = 0; - bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k), - BTREE_ITER_intent| - BTREE_ITER_with_updates| - BTREE_ITER_not_extents); - k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX)); - if ((ret = bkey_err(k))) - goto err; + CLASS(btree_iter, iter)(trans, btree_id, bkey_start_pos(&insert->k), + BTREE_ITER_intent| + BTREE_ITER_with_updates| + BTREE_ITER_not_extents); + struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); + int ret = bkey_err(k); + if (ret) + return ret; if (!k.k) goto out; @@ -287,7 +281,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, if (bch2_bkey_maybe_mergable(k.k, &insert->k)) { ret = extent_front_merge(trans, &iter, k, &insert, flags); if (ret) - goto err; + return ret; } goto next; @@ -298,15 +292,15 @@ static int bch2_trans_update_extent(struct btree_trans *trans, ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert)); if (ret) - goto err; + return ret; if (done) goto out; next: - bch2_btree_iter_advance(trans, &iter); - k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX)); + bch2_btree_iter_advance(&iter); + k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) - goto err; + return ret; if (!k.k) goto out; } @@ -314,15 +308,12 @@ next: if (bch2_bkey_maybe_mergable(&insert->k, k.k)) { ret = extent_back_merge(trans, &iter, insert, k); if (ret) - goto err; + return ret; } out: - if (!bkey_deleted(&insert->k)) - ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags); -err: - bch2_trans_iter_exit(trans, &iter); - - return ret; + return !bkey_deleted(&insert->k) + ? bch2_btree_insert_nonextent(trans, btree_id, insert, flags) + : 0; } static inline struct btree_insert_entry * @@ -594,13 +585,13 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree, struct bpos end) { bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_prev(iter); int ret = bkey_err(k); if (ret) goto err; - bch2_btree_iter_advance(trans, iter); - k = bch2_btree_iter_peek_slot(trans, iter); + bch2_btree_iter_advance(iter); + k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) goto err; @@ -614,7 +605,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, return 0; err: - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -629,29 +620,21 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, btree, k->k.p, - BTREE_ITER_cached| - BTREE_ITER_not_extents| - BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(trans, &iter) ?: + CLASS(btree_iter, iter)(trans, btree, k->k.p, + BTREE_ITER_cached| + BTREE_ITER_not_extents| + BTREE_ITER_intent); + return bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, k, flags); - bch2_trans_iter_exit(trans, &iter); - return ret; } -int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id, +int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k), - BTREE_ITER_intent|flags); - int ret = bch2_btree_iter_traverse(trans, &iter) ?: - bch2_trans_update(trans, &iter, k, flags); - bch2_trans_iter_exit(trans, &iter); - return ret; + CLASS(btree_iter, iter)(trans, btree, bkey_start_pos(&k->k), + BTREE_ITER_intent|flags); + return bch2_btree_iter_traverse(&iter) ?: + bch2_trans_update(trans, &iter, k, flags); } /** @@ -693,31 +676,25 @@ int bch2_btree_delete(struct btree_trans *trans, enum btree_id btree, struct bpos pos, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, btree, pos, - BTREE_ITER_cached| - BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(trans, &iter) ?: + CLASS(btree_iter, iter)(trans, btree, pos, + BTREE_ITER_cached| + BTREE_ITER_intent); + return bch2_btree_iter_traverse(&iter) ?: bch2_btree_delete_at(trans, &iter, flags); - bch2_trans_iter_exit(trans, &iter); - - return ret; } -int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, +int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id btree, struct bpos start, struct bpos end, enum btree_iter_update_trigger_flags flags, u64 *journal_seq) { u32 restart_count = trans->restart_count; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; - bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent|flags); - while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) { + CLASS(btree_iter, iter)(trans, btree, start, BTREE_ITER_intent|flags); + + while ((k = bch2_btree_iter_peek_max(&iter, end)).k) { struct disk_reservation disk_res = bch2_disk_reservation_init(trans->c, 0); struct bkey_i delete; @@ -767,7 +744,6 @@ err: if (ret) break; } - bch2_trans_iter_exit(trans, &iter); return ret ?: trans_was_restarted(trans, restart_count); } @@ -808,13 +784,10 @@ int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, struct bpos pos, bool set) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_intent); - int ret = bch2_btree_iter_traverse(trans, &iter) ?: - bch2_btree_bit_mod_iter(trans, &iter, set); - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_btree_iter_traverse(&iter) ?: + bch2_btree_bit_mod_iter(trans, &iter, set); } int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree, diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 633de3b3ac28..6790e0254a63 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -382,7 +382,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr ? ERR_CAST(k.k) : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes); if (IS_ERR(ret)) - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -409,7 +409,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, ret = bch2_trans_update(trans, iter, mut, flags); if (ret) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ERR_PTR(ret); } diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index e4aa4fa749bc..5f4f82967105 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -2066,7 +2066,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, sib_path = bch2_path_get(trans, btree, sib_pos, U8_MAX, level, BTREE_ITER_intent, _THIS_IP_); - ret = bch2_btree_path_traverse(trans, sib_path, false); + ret = bch2_btree_path_traverse(trans, sib_path, 0); if (ret) goto err; @@ -2220,7 +2220,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter, bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p, BTREE_MAX_DEPTH, b->c.level, BTREE_ITER_intent); - int ret = bch2_btree_iter_traverse(trans, iter); + int ret = bch2_btree_iter_traverse(iter); if (ret) goto err; @@ -2235,7 +2235,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter, BUG_ON(!btree_node_hashed(b)); return 0; err: - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -2315,7 +2315,7 @@ int bch2_btree_node_rewrite_key(struct btree_trans *trans, bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, BTREE_MAX_DEPTH, level, 0); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); int ret = PTR_ERR_OR_ZERO(b); if (ret) goto out; @@ -2325,7 +2325,7 @@ int bch2_btree_node_rewrite_key(struct btree_trans *trans, ? bch2_btree_node_rewrite(trans, &iter, b, 0, flags) : -ENOENT; out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -2340,14 +2340,14 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans, /* Traverse one depth lower to get a pointer to the node itself: */ struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); int ret = PTR_ERR_OR_ZERO(b); if (ret) goto err; ret = bch2_btree_node_rewrite(trans, &iter, b, target, flags); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -2361,7 +2361,7 @@ int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *trans, return ret == -BCH_ERR_btree_node_dying ? 0 : ret; ret = bch2_btree_node_rewrite(trans, &iter, b, 0, flags); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -2484,7 +2484,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bool skip_triggers) { struct bch_fs *c = trans->c; - struct btree_iter iter2 = {}; + struct btree_iter iter2 = { NULL }; struct btree *parent; int ret; @@ -2508,7 +2508,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, parent = btree_node_parent(btree_iter_path(trans, iter), b); if (parent) { - bch2_trans_copy_iter(trans, &iter2, iter); + bch2_trans_copy_iter(&iter2, iter); iter2.path = bch2_btree_path_make_mut(trans, iter2.path, iter2.flags & BTREE_ITER_intent, @@ -2522,7 +2522,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, trans->paths_sorted = false; - ret = bch2_btree_iter_traverse(trans, &iter2) ?: + ret = bch2_btree_iter_traverse(&iter2) ?: bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun); if (ret) goto err; @@ -2562,7 +2562,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b); out: - bch2_trans_iter_exit(trans, &iter2); + bch2_trans_iter_exit(&iter2); return ret; err: if (new_hash) { @@ -2633,7 +2633,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, ret = bch2_btree_node_update_key(trans, &iter, b, new_key, commit_flags, skip_triggers); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 9cfc3edce39a..afad11831e1d 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -145,7 +145,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq); EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); - ret = bch2_btree_iter_traverse(trans, iter); + ret = bch2_btree_iter_traverse(iter); if (ret) return ret; @@ -203,19 +203,14 @@ static int btree_write_buffered_insert(struct btree_trans *trans, struct btree_write_buffered_key *wb) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), - BTREE_ITER_cached|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, wb->btree, bkey_start_pos(&wb->k.k), + BTREE_ITER_cached|BTREE_ITER_intent); trans->journal_res.seq = wb->journal_seq; - ret = bch2_btree_iter_traverse(trans, &iter) ?: + return bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &wb->k, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &iter); - return ret; } static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) @@ -285,7 +280,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) struct bch_fs *c = trans->c; struct journal *j = &c->journal; struct btree_write_buffer *wb = &c->btree_write_buffer; - struct btree_iter iter = {}; + struct btree_iter iter = { NULL }; size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0; bool write_locked = false; bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags); @@ -366,7 +361,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) write_locked = false; ret = lockrestart_do(trans, - bch2_btree_iter_traverse(trans, &iter) ?: + bch2_btree_iter_traverse(&iter) ?: bch2_foreground_maybe_merge(trans, iter.path, 0, BCH_WATERMARK_reclaim| BCH_TRANS_COMMIT_journal_reclaim| @@ -378,12 +373,12 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) } if (!iter.path || iter.btree_id != k->btree) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p, BTREE_ITER_intent|BTREE_ITER_all_snapshots); } - bch2_btree_iter_set_pos(trans, &iter, k->k.k.p); + bch2_btree_iter_set_pos(&iter, k->k.k.p); btree_iter_path(trans, &iter)->preserve = false; bool accounting_accumulated = false; @@ -412,7 +407,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) struct btree_path *path = btree_iter_path(trans, &iter); bch2_btree_node_unlock_write(trans, path, path->l[0].b); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) goto err; diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 5aab527e3e7c..0a357005e9e8 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -372,11 +372,11 @@ found: struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level, BTREE_ITER_intent|BTREE_ITER_all_snapshots); - ret = bch2_btree_iter_traverse(trans, &iter) ?: + ret = bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node| BTREE_TRIGGER_norun); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -694,7 +694,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans, acc.replicas.data_type = data_type; ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -995,7 +995,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, &a->k_i, 0); } err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index ccedc93fe0ef..01838a3a189d 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -258,11 +258,10 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, struct bch_write_op *op) { struct bch_fs *c = op->c; - struct btree_iter iter; struct data_update *m = container_of(op, struct data_update, op); int ret = 0; - bch2_trans_iter_init(trans, &iter, m->btree_id, + CLASS(btree_iter, iter)(trans, m->btree_id, bkey_start_pos(&bch2_keylist_front(&op->insert_keys)->k), BTREE_ITER_slots|BTREE_ITER_intent); @@ -283,7 +282,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, bch2_trans_begin(trans); - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) goto err; @@ -456,7 +455,7 @@ restart_drop_extra_replicas: if (ret) goto err; - bch2_btree_iter_set_pos(trans, &iter, next_pos); + bch2_btree_iter_set_pos(&iter, next_pos); this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size); if (trace_io_move_finish_enabled()) @@ -483,11 +482,10 @@ nowork: count_event(c, io_move_fail); - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); goto next; } out: - bch2_trans_iter_exit(trans, &iter); BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); return ret; } @@ -553,10 +551,10 @@ int bch2_update_unwritten_extent(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos, BTREE_ITER_slots); ret = lockrestart_do(trans, ({ - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); bkey_err(k); })); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k))) break; diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index dd60c47528da..ccd24aa78d71 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -214,11 +214,13 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); struct qstr d_name = bch2_dirent_get_name(d); - prt_printf(out, "%.*s", d_name.len, d_name.name); + prt_bytes(out, d_name.name, d_name.len); if (d.v->d_casefold) { + prt_str(out, " (casefold "); struct qstr d_name = bch2_dirent_get_lookup_name(d); - prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name); + prt_bytes(out, d_name.name, d_name.len); + prt_char(out, ')'); } prt_str(out, " ->"); @@ -404,8 +406,8 @@ int bch2_dirent_rename(struct btree_trans *trans, enum bch_rename_mode mode) { struct qstr src_name_lookup, dst_name_lookup; - struct btree_iter src_iter = {}; - struct btree_iter dst_iter = {}; + struct btree_iter src_iter = { NULL }; + struct btree_iter dst_iter = { NULL }; struct bkey_s_c old_src, old_dst = bkey_s_c_null; struct bkey_i_dirent *new_src = NULL, *new_dst = NULL; struct bpos dst_pos = @@ -565,16 +567,16 @@ out_set_src: } if (delete_src) { - bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot); - ret = bch2_btree_iter_traverse(trans, &src_iter) ?: + bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot); + ret = bch2_btree_iter_traverse(&src_iter) ?: bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node); if (ret) goto out; } if (delete_dst) { - bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot); - ret = bch2_btree_iter_traverse(trans, &dst_iter) ?: + bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot); + ret = bch2_btree_iter_traverse(&dst_iter) ?: bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node); if (ret) goto out; @@ -584,8 +586,8 @@ out_set_src: *src_offset = new_src->k.p.offset; *dst_offset = new_dst->k.p.offset; out: - bch2_trans_iter_exit(trans, &src_iter); - bch2_trans_iter_exit(trans, &dst_iter); + bch2_trans_iter_exit(&src_iter); + bch2_trans_iter_exit(&dst_iter); return ret; } @@ -612,7 +614,7 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans, ret = -ENOENT; err: if (ret) - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -625,13 +627,12 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir, int ret = lockrestart_do(trans, bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0)); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot) { - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -645,7 +646,6 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty); break; } - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -735,31 +735,28 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr, ret = bch_err_throw(trans->c, ENOENT_inode); found: bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bch_inode_unpacked dir_inode; struct bch_hash_info dir_hash_info; - int ret; - ret = lookup_first_inode(trans, pos.inode, &dir_inode); + int ret = lookup_first_inode(trans, pos.inode, &dir_inode); if (ret) goto err; dir_hash_info = bch2_hash_info_init(c, &dir_inode); - bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, pos, BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(trans, &iter) ?: + ret = bch2_btree_iter_traverse(&iter) ?: bch2_hash_delete_at(trans, bch2_dirent_hash_desc, &dir_hash_info, &iter, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &iter); err: bch_err_fn(c, ret); return ret; diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index 219e37738aee..f96530c70262 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -778,12 +778,13 @@ int bch2_accounting_read(struct bch_fs *c) struct disk_accounting_pos next; memset(&next, 0, sizeof(next)); next.type = acc_k.type + 1; - bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next)); + bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next)); continue; } accounting_read_key(trans, k); })); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -965,7 +966,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c) struct disk_accounting_pos next; memset(&next, 0, sizeof(next)); next.type = acc_k.type + 1; - bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next)); + bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next)); continue; } diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index bea14f02114f..c2be7883d941 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -785,23 +785,15 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, struct ec_stripe_buf *stripe) { - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, - POS(0, idx), BTREE_ITER_slots); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_slots); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) - goto err; - if (k.k->type != KEY_TYPE_stripe) { - ret = -ENOENT; - goto err; - } + return ret; + if (k.k->type != KEY_TYPE_stripe) + return -ENOENT; bkey_reassemble(&stripe->key, k); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } /* recovery read path: */ @@ -950,13 +942,11 @@ static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) static int ec_stripe_delete(struct btree_trans *trans, u64 idx) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_stripes, POS(0, idx), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* * We expect write buffer races here @@ -965,10 +955,9 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx) if (k.k->type == KEY_TYPE_stripe && !bch2_stripe_is_open(trans->c, idx) && stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1) - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_btree_delete_at(trans, &iter, 0); + + return 0; } /* @@ -1009,20 +998,17 @@ static int ec_stripe_key_update(struct btree_trans *trans, struct bch_fs *c = trans->c; bool create = !old; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, - new->k.p, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, new->k.p, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe), c, "error %s stripe: got existing key type %s", create ? "creating" : "updating", - bch2_bkey_types[k.k->type])) { - ret = -EINVAL; - goto err; - } + bch2_bkey_types[k.k->type])) + return -EINVAL; if (k.k->type == KEY_TYPE_stripe) { const struct bch_stripe *v = bkey_s_c_to_stripe(k).v; @@ -1042,8 +1028,7 @@ static int ec_stripe_key_update(struct btree_trans *trans, prt_str(&buf, "\nnew: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i)); bch2_fs_inconsistent(c, "%s", buf.buf); - ret = -EINVAL; - goto err; + return -EINVAL; } /* @@ -1061,10 +1046,7 @@ static int ec_stripe_key_update(struct btree_trans *trans, } } - ret = bch2_trans_update(trans, &iter, &new->k_i, 0); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_trans_update(trans, &iter, &new->k_i, 0); } static int ec_stripe_update_extent(struct btree_trans *trans, @@ -1087,7 +1069,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, if (bp.v->level) { struct btree_iter node_iter; struct btree *b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed); - bch2_trans_iter_exit(trans, &node_iter); + bch2_trans_iter_exit(&node_iter); if (!b) return 0; @@ -1149,7 +1131,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, n, 0); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1785,20 +1767,21 @@ static int __get_existing_stripe(struct btree_trans *trans, { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_stripes, POS(0, idx), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* We expect write buffer races here */ if (k.k->type != KEY_TYPE_stripe) - goto out; + return 0; struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); if (stripe_lru_pos(s.v) <= 1) - goto out; + return 0; + + bch2_set_btree_iter_dontneed(&iter); if (s.v->disk_label == head->disk_label && s.v->algorithm == head->algo && @@ -1806,13 +1789,10 @@ static int __get_existing_stripe(struct btree_trans *trans, le16_to_cpu(s.v->sectors) == head->blocksize && bch2_try_open_stripe(c, head->s, idx)) { bkey_reassemble(&stripe->key, k); - ret = 1; + return 1; } -out: - bch2_set_btree_iter_dontneed(trans, &iter); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + + return 0; } static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s) @@ -1871,7 +1851,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri if (may_create_new_stripe(c)) return -1; - struct btree_iter lru_iter; struct bkey_s_c lru_k; int ret = 0; @@ -1883,7 +1862,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri if (ret) break; } - bch2_trans_iter_exit(trans, &lru_iter); if (!ret) ret = bch_err_throw(c, stripe_alloc_blocked); if (ret == 1) @@ -1922,7 +1900,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st if (bkey_gt(k.k->p, POS(0, U32_MAX))) { if (start_pos.offset) { start_pos = min_pos; - bch2_btree_iter_set_pos(trans, &iter, start_pos); + bch2_btree_iter_set_pos(&iter, start_pos); continue; } @@ -1948,7 +1926,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st s->new_stripe.key.k.p = iter.pos; out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; err: bch2_disk_reservation_put(c, &s->res); @@ -2155,7 +2133,7 @@ static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, s return ret; ret = bch2_invalidate_stripe_to_dev(trans, &iter, s.s_c, k_a.k->p.inode, flags); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c index e76e58a568bf..0c1f6f2ec02c 100644 --- a/fs/bcachefs/extent_update.c +++ b/fs/bcachefs/extent_update.c @@ -92,7 +92,7 @@ static int count_iters_for_insert(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); break; } @@ -108,14 +108,14 @@ int bch2_extent_atomic_end(struct btree_trans *trans, unsigned nr_iters = 0; struct btree_iter copy; - bch2_trans_copy_iter(trans, ©, iter); + bch2_trans_copy_iter(©, iter); - int ret = bch2_btree_iter_traverse(trans, ©); + int ret = bch2_btree_iter_traverse(©); if (ret) goto err; struct bkey_s_c k; - for_each_btree_key_max_continue_norestart(trans, copy, *end, 0, k, ret) { + for_each_btree_key_max_continue_norestart(copy, *end, 0, k, ret) { unsigned offset = 0; if (bkey_gt(iter->pos, bkey_start_pos(k.k))) @@ -126,7 +126,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans, break; } err: - bch2_trans_iter_exit(trans, ©); + bch2_trans_iter_exit(©); return ret < 0 ? ret : 0; } diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 8152ef1cbbcd..b879a586b7f6 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -282,9 +282,9 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, if (have_pick) return 1; - if (!have_dirty_ptrs) + if (!have_dirty_ptrs && !bkey_is_btree_ptr(k.k)) return 0; - if (have_missing_devs) + if (have_missing_devs || !have_dirty_ptrs) return bch_err_throw(c, no_device_to_read_from); if (have_csum_errors) return bch_err_throw(c, data_read_csum_err); diff --git a/fs/bcachefs/fast_list.h b/fs/bcachefs/fast_list.h index 73c9bf591fd6..f67df3f72ee2 100644 --- a/fs/bcachefs/fast_list.h +++ b/fs/bcachefs/fast_list.h @@ -9,7 +9,7 @@ struct fast_list_pcpu; struct fast_list { GENRADIX(void *) items; - struct ida slots_allocated;; + struct ida slots_allocated; struct fast_list_pcpu __percpu *buffer; }; diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c index f2389054693a..0005569ecace 100644 --- a/fs/bcachefs/fs-io-buffered.c +++ b/fs/bcachefs/fs-io-buffered.c @@ -157,7 +157,6 @@ static void bchfs_read(struct btree_trans *trans, struct readpages_iter *readpages_iter) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_buf sk; int flags = BCH_READ_retry_if_stale| BCH_READ_may_promote; @@ -167,7 +166,7 @@ static void bchfs_read(struct btree_trans *trans, bch2_bkey_buf_init(&sk); bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, rbio->bio.bi_iter.bi_sector), BTREE_ITER_slots); while (1) { @@ -183,12 +182,12 @@ static void bchfs_read(struct btree_trans *trans, if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &iter, snapshot); + bch2_btree_iter_set_snapshot(&iter, snapshot); - bch2_btree_iter_set_pos(trans, &iter, + bch2_btree_iter_set_pos(&iter, POS(inum.inum, rbio->bio.bi_iter.bi_sector)); - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) goto err; @@ -251,7 +250,6 @@ err: !bch2_err_matches(ret, BCH_ERR_transaction_restart)) break; } - bch2_trans_iter_exit(trans, &iter); if (ret) { CLASS(printbuf, buf)(); diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index e53fee0513fd..8d5b2468f4cd 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -281,7 +281,7 @@ retry: } offset = iter.pos.offset; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); err: if (bch2_err_matches(err, BCH_ERR_transaction_restart)) goto retry; diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c index 2a6705186c44..469492f6264a 100644 --- a/fs/bcachefs/fs-io-pagecache.c +++ b/fs/bcachefs/fs-io-pagecache.c @@ -635,6 +635,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) goto out; } + inode->ei_last_dirtied = (unsigned long) current; + bch2_set_folio_dirty(c, inode, folio, &res, offset, len); bch2_folio_reservation_put(c, inode, &res); diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 93ad33f0953a..de0d965f3fde 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -206,7 +206,7 @@ static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_in ret = bch2_inode_write(trans, &iter, &u); } fsck_err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -626,15 +626,14 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, u64 start_sector, u64 end_sector) { struct bch_fs *c = inode->v.i_sb->s_fs_info; - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bpos end_pos = POS(inode->v.i_ino, end_sector); struct bch_io_opts opts; int ret = 0; bch2_inode_opts_get(&opts, c, &inode->ei_inode); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inode->v.i_ino, start_sector), BTREE_ITER_slots|BTREE_ITER_intent); @@ -657,9 +656,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, if (ret) goto bkey_err; - bch2_btree_iter_set_snapshot(trans, &iter, snapshot); + bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); if ((ret = bkey_err(k))) goto bkey_err; @@ -670,13 +669,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, /* already reserved */ if (bkey_extent_is_reservation(k) && bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) { - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); continue; } if (bkey_extent_is_data(k.k) && !(mode & FALLOC_FL_ZERO_RANGE)) { - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); continue; } @@ -697,7 +696,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, if (ret) goto bkey_err; } - bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start)); + bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start)); if (ret) goto bkey_err; @@ -747,7 +746,6 @@ bkey_err: bch2_quota_reservation_put(c, inode, "a_res); } - bch2_trans_iter_exit(trans, &iter); return ret; } diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 56b7126bc31d..3b289f696612 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -141,7 +141,7 @@ retry: if (!ret) bch2_inode_update_after_write(trans, inode, &inode_u, fields); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; @@ -692,7 +692,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, if (ret) goto err; out: - bch2_trans_iter_exit(trans, &dirent_iter); + bch2_trans_iter_exit(&dirent_iter); return inode; err: inode = ERR_PTR(ret); @@ -1131,7 +1131,7 @@ retry: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); btree_err: - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&inode_iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; @@ -1397,21 +1397,20 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, if (ret) return ret; - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - SPOS(inode->ei_inum.inum, start, snapshot), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + SPOS(inode->ei_inum.inum, start, snapshot), 0); struct bkey_s_c k = - bch2_btree_iter_peek_max(trans, &iter, POS(inode->ei_inum.inum, end)); + bch2_btree_iter_peek_max(&iter, POS(inode->ei_inum.inum, end)); ret = bkey_err(k); if (ret) - goto err; + return ret; u64 pagecache_end = k.k ? max(start, bkey_start_offset(k.k)) : end; ret = bch2_next_fiemap_pagecache_extent(trans, inode, start, pagecache_end, cur); if (ret) - goto err; + return ret; struct bpos pagecache_start = bkey_start_pos(&cur->kbuf.k->k); @@ -1447,7 +1446,7 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, &cur->kbuf); if (ret) - goto err; + return ret; struct bkey_i *k = cur->kbuf.k; sectors = min_t(unsigned, sectors, k->k.size - offset_into_extent); @@ -1459,9 +1458,8 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, k->k.p = iter.pos; k->k.p.offset += k->k.size; } -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + + return 0; } static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, @@ -1948,8 +1946,6 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child struct bch_inode_info *inode = to_bch_ei(child->d_inode); struct bch_inode_info *dir = to_bch_ei(parent->d_inode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter1; - struct btree_iter iter2; struct bkey_s_c k; struct bkey_s_c_dirent d; struct bch_inode_unpacked inode_u; @@ -1963,10 +1959,10 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child return -EINVAL; CLASS(btree_trans, trans)(c); - bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, - POS(dir->ei_inode.bi_inum, 0), 0); - bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, - POS(dir->ei_inode.bi_inum, 0), 0); + CLASS(btree_iter, iter1)(trans, BTREE_ID_dirents, + POS(dir->ei_inode.bi_inum, 0), 0); + CLASS(btree_iter, iter2)(trans, BTREE_ID_dirents, + POS(dir->ei_inode.bi_inum, 0), 0); retry: bch2_trans_begin(trans); @@ -1974,17 +1970,17 @@ retry: if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &iter1, snapshot); - bch2_btree_iter_set_snapshot(trans, &iter2, snapshot); + bch2_btree_iter_set_snapshot(&iter1, snapshot); + bch2_btree_iter_set_snapshot(&iter2, snapshot); ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u); if (ret) goto err; if (inode_u.bi_dir == dir->ei_inode.bi_inum) { - bch2_btree_iter_set_pos(trans, &iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset)); + bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset)); - k = bch2_btree_iter_peek_slot(trans, &iter1); + k = bch2_btree_iter_peek_slot(&iter1); ret = bkey_err(k); if (ret) goto err; @@ -2008,7 +2004,7 @@ retry: * File with multiple hardlinks and our backref is to the wrong * directory - linear search: */ - for_each_btree_key_continue_norestart(trans, iter2, 0, k, ret) { + for_each_btree_key_continue_norestart(iter2, 0, k, ret) { if (k.k->p.inode > dir->ei_inode.bi_inum) break; @@ -2039,8 +2035,6 @@ err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(trans, &iter1); - bch2_trans_iter_exit(trans, &iter2); return ret; } diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 183b88bbd402..589c2b915ff2 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -15,6 +15,7 @@ #include "io_misc.h" #include "keylist.h" #include "namei.h" +#include "progress.h" #include "recovery_passes.h" #include "snapshot.h" #include "super.h" @@ -125,7 +126,7 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans, struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); *target = le64_to_cpu(d.v->d_inum); *type = d.v->d_type; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return 0; } @@ -155,7 +156,7 @@ static int find_snapshot_tree_subvol(struct btree_trans *trans, } ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol); found: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -166,7 +167,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, { struct bch_fs *c = trans->c; struct qstr lostfound_str = QSTR("lost+found"); - struct btree_iter lostfound_iter = {}; + struct btree_iter lostfound_iter = { NULL }; u64 inum = 0; unsigned d_type = 0; int ret; @@ -201,7 +202,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, return ret; subvol->v.inode = cpu_to_le64(reattaching_inum); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); } subvol_inum root_inum = { @@ -274,8 +275,8 @@ create_lostfound: if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot); - ret = bch2_btree_iter_traverse(trans, &lostfound_iter); + bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot); + ret = bch2_btree_iter_traverse(&lostfound_iter); if (ret) goto err; @@ -291,7 +292,7 @@ create_lostfound: BTREE_UPDATE_internal_snapshot_node); err: bch_err_msg(c, ret, "creating lost+found"); - bch2_trans_iter_exit(trans, &lostfound_iter); + bch2_trans_iter_exit(&lostfound_iter); return ret; } @@ -332,11 +333,11 @@ static inline bool inode_should_reattach(struct bch_inode_unpacked *inode) static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents, - SPOS(d_pos.inode, d_pos.offset, snapshot), - BTREE_ITER_intent| - BTREE_ITER_with_updates); + CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, + SPOS(d_pos.inode, d_pos.offset, snapshot), + BTREE_ITER_intent| + BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; @@ -349,16 +350,15 @@ static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); ret = PTR_ERR_OR_ZERO(k); if (ret) - goto err; + return ret; bkey_init(&k->k); k->k.type = KEY_TYPE_whiteout; k->k.p = iter.pos; - ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); + return bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); } -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + + return 0; } static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) @@ -382,7 +382,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * return ret; subvol->v.fs_path_parent = BCACHEFS_ROOT_SUBVOL; - bch2_trans_iter_exit(trans, &subvol_iter); + bch2_trans_iter_exit(&subvol_iter); u64 root_inum; ret = subvol_lookup(trans, inode->bi_parent_subvol, @@ -454,7 +454,6 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * */ if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { CLASS(snapshot_id_list, whiteouts_done)(); - struct btree_iter iter; struct bkey_s_c k; darray_init(&whiteouts_done); @@ -473,19 +472,16 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * struct bch_inode_unpacked child_inode; ret = bch2_inode_unpack(k, &child_inode); if (ret) - break; + return ret; if (!inode_should_reattach(&child_inode)) { - ret = maybe_delete_dirent(trans, - SPOS(lostfound.bi_inum, inode->bi_dir_offset, - dirent_snapshot), - k.k->p.snapshot); - if (ret) - break; - - ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); + ret = maybe_delete_dirent(trans, + SPOS(lostfound.bi_inum, inode->bi_dir_offset, + dirent_snapshot), + k.k->p.snapshot) ?: + snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); if (ret) - break; + return ret; } else { iter.snapshot = k.k->p.snapshot; child_inode.bi_dir = inode->bi_dir; @@ -494,10 +490,9 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * ret = bch2_inode_write_flags(trans, &iter, &child_inode, BTREE_UPDATE_internal_snapshot_node); if (ret) - break; + return ret; } } - bch2_trans_iter_exit(trans, &iter); } return ret; @@ -531,7 +526,7 @@ static int remove_backpointer(struct btree_trans *trans, int ret = bkey_err(d) ?: dirent_points_to_inode(c, d, inode) ?: bch2_fsck_remove_dirent(trans, d.k->p); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -582,9 +577,9 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub new_inode.bi_subvol = subvolid; int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?: - bch2_btree_iter_traverse(trans, &inode_iter) ?: + bch2_btree_iter_traverse(&inode_iter) ?: bch2_inode_write(trans, &inode_iter, &new_inode); - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&inode_iter); if (ret) return ret; @@ -619,7 +614,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub s->v.subvol = cpu_to_le32(subvolid); SET_BCH_SNAPSHOT_SUBVOL(&s->v, true); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshot_trees, POS(0, snapshot_tree), @@ -632,7 +627,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub if (!st->v.master_subvol) st->v.master_subvol = cpu_to_le32(subvolid); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return 0; } @@ -644,11 +639,8 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 switch (btree) { case BTREE_ID_extents: { - struct btree_iter iter = {}; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); - struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0)); - bch2_trans_iter_exit(trans, &iter); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); + struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0)); int ret = bkey_err(k); if (ret) return ret; @@ -846,7 +838,6 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, struct inode_walker *w, u64 inum) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -866,7 +857,6 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(trans, &iter); if (ret) return ret; @@ -882,7 +872,6 @@ static int get_visible_inodes(struct btree_trans *trans, u64 inum) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -906,7 +895,6 @@ static int get_visible_inodes(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -1045,11 +1033,9 @@ static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0); - int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set; - bch2_trans_iter_exit(trans, &iter); - return ret; + CLASS(btree_iter, iter)(trans, BTREE_ID_deleted_inodes, p, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + return bkey_err(k) ?: k.k->type == KEY_TYPE_set; } static int check_inode_dirent_inode(struct btree_trans *trans, @@ -1106,7 +1092,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans, out: ret = 0; fsck_err: - bch2_trans_iter_exit(trans, &dirent_iter); + bch2_trans_iter_exit(&dirent_iter); bch_err_fn(c, ret); return ret; } @@ -1331,11 +1317,16 @@ int bch2_check_inodes(struct bch_fs *c) CLASS(btree_trans, trans)(c); CLASS(snapshots_seen, s)(); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_inodes)); + return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_inode(trans, &iter, k, &snapshot_root, &s)); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + check_inode(trans, &iter, k, &snapshot_root, &s); + })); } static int find_oldest_inode_needs_reattach(struct btree_trans *trans, @@ -1374,7 +1365,7 @@ static int find_oldest_inode_needs_reattach(struct btree_trans *trans, *inode = parent_inode; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1422,12 +1413,17 @@ fsck_err: */ int bch2_check_unreachable_inodes(struct bch_fs *c) { + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_inodes)); + CLASS(btree_trans, trans)(c); return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_unreachable_inode(trans, &iter, k)); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + check_unreachable_inode(trans, &iter, k); + })); } static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode) @@ -1452,13 +1448,12 @@ static int check_key_has_inode(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter iter2 = {}; int ret = PTR_ERR_OR_ZERO(i); if (ret) return ret; if (k.k->type == KEY_TYPE_whiteout) - goto out; + return 0; bool have_inode = i && !i->whiteout; @@ -1466,7 +1461,7 @@ static int check_key_has_inode(struct btree_trans *trans, goto reconstruct; if (have_inode && btree_matches_i_mode(iter->btree_id, i->inode.bi_mode)) - goto out; + return 0; prt_printf(&buf, ", "); @@ -1546,7 +1541,6 @@ static int check_key_has_inode(struct btree_trans *trans, out: err: fsck_err: - bch2_trans_iter_exit(trans, &iter2); bch_err_fn(c, ret); return ret; delete: @@ -1572,7 +1566,6 @@ static int maybe_reconstruct_inum_btree(struct btree_trans *trans, u64 inum, u32 snapshot, enum btree_id btree) { - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -1583,7 +1576,6 @@ static int maybe_reconstruct_inum_btree(struct btree_trans *trans, ret = 1; break; } - bch2_trans_iter_exit(trans, &iter); if (ret <= 0) return ret; @@ -1729,16 +1721,16 @@ static int overlapping_extents_found(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter iter1, iter2 = {}; + struct btree_iter iter2 = {}; struct bkey_s_c k1, k2; int ret; BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2))); - bch2_trans_iter_init(trans, &iter1, btree, pos1, - BTREE_ITER_all_snapshots| - BTREE_ITER_not_extents); - k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX)); + CLASS(btree_iter, iter1)(trans, btree, pos1, + BTREE_ITER_all_snapshots| + BTREE_ITER_not_extents); + k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX)); ret = bkey_err(k1); if (ret) goto err; @@ -1758,12 +1750,12 @@ static int overlapping_extents_found(struct btree_trans *trans, goto err; } - bch2_trans_copy_iter(trans, &iter2, &iter1); + bch2_trans_copy_iter(&iter2, &iter1); while (1) { - bch2_btree_iter_advance(trans, &iter2); + bch2_btree_iter_advance(&iter2); - k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX)); + k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX)); ret = bkey_err(k2); if (ret) goto err; @@ -1832,8 +1824,7 @@ static int overlapping_extents_found(struct btree_trans *trans, } fsck_err: err: - bch2_trans_iter_exit(trans, &iter2); - bch2_trans_iter_exit(trans, &iter1); + bch2_trans_iter_exit(&iter2); return ret; } @@ -1899,6 +1890,7 @@ static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *it return 0; } +noinline_for_stack static int check_extent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct inode_walker *inode, @@ -1961,11 +1953,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, "extent type past end of inode %llu:%u, i_size %llu\n%s", i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_fpunch_snapshot(trans, - SPOS(i->inode.bi_inum, - last_block, - i->inode.bi_snapshot), - POS(i->inode.bi_inum, U64_MAX)); + ret = snapshots_seen_add_inorder(c, s, i->inode.bi_snapshot) ?: + bch2_fpunch_snapshot(trans, + SPOS(i->inode.bi_inum, + last_block, + i->inode.bi_snapshot), + POS(i->inode.bi_inum, U64_MAX)); if (ret) goto err; @@ -2021,9 +2014,13 @@ int bch2_check_extents(struct bch_fs *c) CLASS(inode_walker, w)(); CLASS(extent_ends, extent_ends)(); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_extents)); + int ret = for_each_btree_key(trans, iter, BTREE_ID_extents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ + progress_update_iter(trans, &progress, &iter); bch2_disk_reservation_put(c, &res); check_extent(trans, &iter, k, &w, &s, &extent_ends, &res); })) ?: @@ -2038,11 +2035,15 @@ int bch2_check_indirect_extents(struct bch_fs *c) CLASS(btree_trans, trans)(c); struct disk_reservation res = { 0 }; + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_reflink)); + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_prefetch, k, &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); bch2_disk_reservation_put(c, &res); check_extent_overbig(trans, &iter, k); })); @@ -2116,7 +2117,7 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); *subvolid = k.k->p.offset; goto found; } @@ -2124,7 +2125,7 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su if (!ret) ret = -ENOENT; found: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -2263,7 +2264,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * out: err: fsck_err: - bch2_trans_iter_exit(trans, &subvol_iter); + bch2_trans_iter_exit(&subvol_iter); return ret; } @@ -2404,17 +2405,15 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - struct btree_iter delete_iter; - bch2_trans_iter_init(trans, &delete_iter, + CLASS(btree_iter, delete_iter)(trans, BTREE_ID_dirents, SPOS(k.k->p.inode, k.k->p.offset, *i), BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(trans, &delete_iter) ?: + ret = bch2_btree_iter_traverse(&delete_iter) ?: bch2_hash_delete_at(trans, bch2_dirent_hash_desc, hash_info, &delete_iter, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &delete_iter); if (ret) return ret; @@ -2451,15 +2450,20 @@ int bch2_check_dirents(struct bch_fs *c) CLASS(snapshots_seen, s)(); CLASS(inode_walker, dir)(); CLASS(inode_walker, target)(); + struct progress_indicator_state progress; bool need_second_pass = false, did_second_pass = false; int ret; again: + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_dirents)); + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s, - &need_second_pass)) ?: + &need_second_pass); + })) ?: check_subdir_count_notnested(trans, &dir); if (!ret && need_second_pass && !did_second_pass) { @@ -2519,13 +2523,18 @@ int bch2_check_xattrs(struct bch_fs *c) CLASS(btree_trans, trans)(c); CLASS(inode_walker, inode)(); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_xattrs)); + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc, - check_xattr(trans, &iter, k, &hash_info, &inode)); + BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + check_xattr(trans, &iter, k, &hash_info, &inode); + })); return ret; } @@ -2598,7 +2607,6 @@ int bch2_check_root(struct bch_fs *c) static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct btree_iter parent_iter = {}; CLASS(darray_u32, subvol_path)(); CLASS(printbuf, buf)(); int ret = 0; @@ -2606,6 +2614,8 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, if (k.k->type != KEY_TYPE_subvolume) return 0; + CLASS(btree_iter, parent_iter)(trans, BTREE_ID_subvolumes, POS_MIN, 0); + subvol_inum start = { .subvol = k.k->p.offset, .inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode), @@ -2614,7 +2624,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) { ret = darray_push(&subvol_path, k.k->p.offset); if (ret) - goto err; + return ret; struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); @@ -2633,20 +2643,18 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, ret = bch2_inum_to_path(trans, start, &buf); if (ret) - goto err; + return ret; if (fsck_err(trans, subvol_loop, "%s", buf.buf)) ret = reattach_subvol(trans, s); break; } - bch2_trans_iter_exit(trans, &parent_iter); - bch2_trans_iter_init(trans, &parent_iter, - BTREE_ID_subvolumes, POS(0, parent), 0); - k = bch2_btree_iter_peek_slot(trans, &parent_iter); + bch2_btree_iter_set_pos(&parent_iter, POS(0, parent)); + k = bch2_btree_iter_peek_slot(&parent_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, trans, subvol_unreachable, @@ -2654,48 +2662,49 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = reattach_subvol(trans, s); - break; + return reattach_subvol(trans, s); } } fsck_err: -err: - bch2_trans_iter_exit(trans, &parent_iter); return ret; } int bch2_check_subvolume_structure(struct bch_fs *c) { CLASS(btree_trans, trans)(c); + + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_subvolumes)); + return for_each_btree_key_commit(trans, iter, BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_subvol_path(trans, &iter, k)); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + check_subvol_path(trans, &iter, k); + })); } static int bch2_bi_depth_renumber_one(struct btree_trans *trans, u64 inum, u32 snapshot, u32 new_depth) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inum, snapshot), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, inum, snapshot), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); struct bch_inode_unpacked inode; int ret = bkey_err(k) ?: !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode : bch2_inode_unpack(k, &inode); if (ret) - goto err; + return ret; if (inode.bi_depth != new_depth) { inode.bi_depth = new_depth; - ret = __bch2_fsck_write_inode(trans, &inode) ?: - bch2_trans_commit(trans, NULL, NULL, 0); + return __bch2_fsck_write_inode(trans, &inode) ?: + bch2_trans_commit(trans, NULL, NULL, 0); } -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + + return 0; } static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path, @@ -2720,7 +2729,6 @@ static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path, static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) { struct bch_fs *c = trans->c; - struct btree_iter inode_iter = {}; CLASS(darray_u64, path)(); CLASS(printbuf, buf)(); u32 snapshot = inode_k.k->p.snapshot; @@ -2735,6 +2743,8 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) if (ret) return ret; + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, POS_MIN, 0); + /* * If we're running full fsck, check_dirents() will have already ran, * and we shouldn't see any missing backpointers here - otherwise that's @@ -2752,7 +2762,7 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) goto out; if (!ret && (ret = dirent_points_to_inode(c, d, &inode))) - bch2_trans_iter_exit(trans, &dirent_iter); + bch2_trans_iter_exit(&dirent_iter); if (bch2_err_matches(ret, ENOENT)) { printbuf_reset(&buf); @@ -2762,15 +2772,14 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) goto out; } - bch2_trans_iter_exit(trans, &dirent_iter); + bch2_trans_iter_exit(&dirent_iter); ret = darray_push(&path, inode.bi_inum); if (ret) return ret; - bch2_trans_iter_exit(trans, &inode_iter); - inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, - SPOS(0, inode.bi_dir, snapshot), 0); + bch2_btree_iter_set_pos(&inode_iter, SPOS(0, inode.bi_dir, snapshot)); + inode_k = bch2_btree_iter_peek_slot(&inode_iter); struct bch_inode_unpacked parent_inode; ret = bkey_err(inode_k) ?: @@ -2827,7 +2836,6 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) ret = bch2_bi_depth_renumber(trans, &path, snapshot, min_bi_depth); out: fsck_err: - bch2_trans_iter_exit(trans, &inode_iter); bch_err_fn(c, ret); return ret; } diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 4a9725f30c4f..40c4f1c0ba91 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -364,7 +364,7 @@ int __bch2_inode_peek(struct btree_trans *trans, err: if (warn) bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum); - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -373,19 +373,15 @@ int bch2_inode_find_by_inum_snapshot(struct btree_trans *trans, struct bch_inode_unpacked *inode, unsigned flags) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inode_nr, snapshot), flags); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, inode_nr, snapshot), flags); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; - ret = bkey_is_inode(k.k) + return bkey_is_inode(k.k) ? bch2_inode_unpack(k, inode) : -BCH_ERR_ENOENT_inode; -err: - bch2_trans_iter_exit(trans, &iter); - return ret; } int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, @@ -397,7 +393,7 @@ int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0); if (!ret) - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -410,7 +406,7 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans, ret = bch2_inode_peek(trans, &iter, inode, inum, 0); if (!ret) - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -424,7 +420,6 @@ int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, struct bch_inode_unpacked *root) { - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -433,15 +428,11 @@ int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, BTREE_ITER_all_snapshots, k, ret) { if (k.k->p.offset != inum) break; - if (bkey_is_inode(k.k)) { - ret = bch2_inode_unpack(k, root); - goto out; - } + if (bkey_is_inode(k.k)) + return bch2_inode_unpack(k, root); } /* We're only called when we know we have an inode for @inum */ BUG_ON(!ret); -out: - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -696,14 +687,15 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter struct bkey_s_c k; int ret = 0; - for_each_btree_key_max_norestart(trans, *iter, btree, - bpos_successor(pos), - SPOS(pos.inode, pos.offset, U32_MAX), - flags|BTREE_ITER_all_snapshots, k, ret) + bch2_trans_iter_init(trans, iter, btree, bpos_successor(pos), + flags|BTREE_ITER_all_snapshots); + + for_each_btree_key_max_continue_norestart(*iter, SPOS(pos.inode, pos.offset, U32_MAX), + flags|BTREE_ITER_all_snapshots, k, ret) if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot)) return k; - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } @@ -719,7 +711,7 @@ again: bkey_is_inode(k.k)) return k; - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); pos = k.k->p; goto again; } @@ -727,7 +719,6 @@ again: int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -740,7 +731,6 @@ int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) ret = 1; break; } - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -792,7 +782,7 @@ static int update_parent_inode_has_children(struct btree_trans *trans, struct bp bkey_inode_flags_set(bkey_i_to_s(update), f ^ BCH_INODE_has_child_snapshot); } err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -961,11 +951,10 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m cursor_idx &= ~(~0ULL << c->opts.shard_inode_numbers_bits); - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_logged_ops, - POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_logged_ops, + POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), + BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ERR_PTR(ret); @@ -974,9 +963,8 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m k.k->type == KEY_TYPE_inode_alloc_cursor ? bch2_bkey_make_mut_typed(trans, &iter, &k, 0, inode_alloc_cursor) : bch2_bkey_alloc(trans, &iter, 0, inode_alloc_cursor); - ret = PTR_ERR_OR_ZERO(cursor); - if (ret) - goto err; + if (IS_ERR(cursor)) + return cursor; if (c->opts.inodes_32bit) { *min = BLOCKDEV_INODE_MAX; @@ -997,9 +985,8 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m cursor->v.idx = cpu_to_le64(*min); le32_add_cpu(&cursor->v.gen, 1); } -err: - bch2_trans_iter_exit(trans, &iter); - return ret ? ERR_PTR(ret) : cursor; + + return cursor; } /* @@ -1026,7 +1013,7 @@ int bch2_inode_create(struct btree_trans *trans, BTREE_ITER_intent); struct bkey_s_c k; again: - while ((k = bch2_btree_iter_peek(trans, iter)).k && + while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k)) && bkey_lt(k.k->p, POS(0, max))) { if (pos < iter->pos.offset) @@ -1043,7 +1030,7 @@ again: * we've found just one: */ pos = iter->pos.offset + 1; - bch2_btree_iter_set_pos(trans, iter, POS(0, pos)); + bch2_btree_iter_set_pos(iter, POS(0, pos)); } if (!ret && pos < max) @@ -1053,21 +1040,21 @@ again: ret = bch_err_throw(trans->c, ENOSPC_inode_create); if (ret) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } /* Retry from start */ pos = start = min; - bch2_btree_iter_set_pos(trans, iter, POS(0, pos)); + bch2_btree_iter_set_pos(iter, POS(0, pos)); le32_add_cpu(&cursor->v.gen, 1); goto again; found_slot: - bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, snapshot)); - k = bch2_btree_iter_peek_slot(trans, iter); + bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot)); + k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret; } @@ -1080,7 +1067,6 @@ found_slot: static int bch2_inode_delete_keys(struct btree_trans *trans, subvol_inum inum, enum btree_id id) { - struct btree_iter iter; struct bkey_s_c k; struct bkey_i delete; struct bpos end = POS(inum.inum, U64_MAX); @@ -1091,8 +1077,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans, * We're never going to be deleting partial extents, no need to use an * extent iterator: */ - bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, id, POS(inum.inum, 0), BTREE_ITER_intent); while (1) { bch2_trans_begin(trans); @@ -1101,9 +1086,9 @@ static int bch2_inode_delete_keys(struct btree_trans *trans, if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &iter, snapshot); + bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_max(trans, &iter, end); + k = bch2_btree_iter_peek_max(&iter, end); ret = bkey_err(k); if (ret) goto err; @@ -1127,7 +1112,6 @@ err: break; } - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -1184,7 +1168,7 @@ retry: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; @@ -1305,10 +1289,7 @@ int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum, static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot) { struct bch_fs *c = trans->c; - struct btree_iter iter = {}; - struct bkey_i_inode_generation delete; - struct bch_inode_unpacked inode_u; - struct bkey_s_c k; + struct btree_iter iter = { NULL }; int ret; do { @@ -1330,8 +1311,8 @@ static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum retry: bch2_trans_begin(trans); - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inum, snapshot), BTREE_ITER_intent); + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, inum, snapshot), BTREE_ITER_intent); ret = bkey_err(k); if (ret) goto err; @@ -1344,12 +1325,14 @@ retry: goto err; } + struct bch_inode_unpacked inode_u; bch2_inode_unpack(k, &inode_u); /* Subvolume root? */ if (inode_u.bi_subvol) bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum); + struct bkey_i_inode_generation delete; bkey_inode_generation_init(&delete.k_i); delete.k.p = iter.pos; delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); @@ -1358,7 +1341,7 @@ retry: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; @@ -1383,7 +1366,7 @@ next_parent: bool unlinked = bkey_is_unlinked_inode(k); pos = k.k->p; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (!unlinked) return 0; @@ -1409,12 +1392,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, bool from_deleted_inodes) { struct bch_fs *c = trans->c; - struct btree_iter inode_iter; - struct bkey_s_c k; CLASS(printbuf, buf)(); int ret; - k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached); + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, pos, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&inode_iter); ret = bkey_err(k); if (ret) return ret; @@ -1426,11 +1408,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = bch2_inode_unpack(k, inode); if (ret) - goto out; + return ret; if (S_ISDIR(inode->bi_mode)) { ret = bch2_empty_dir_snapshot(trans, pos.offset, 0, pos.snapshot); @@ -1441,7 +1423,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; } ret = inode->bi_flags & BCH_INODE_unlinked ? 0 : bch_err_throw(c, inode_not_unlinked); @@ -1451,7 +1433,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = !(inode->bi_flags & BCH_INODE_has_child_snapshot) ? 0 : bch_err_throw(c, inode_has_child_snapshot); @@ -1462,11 +1444,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = bch2_inode_has_child_snapshots(trans, k.k->p); if (ret < 0) - goto out; + return ret; if (ret) { if (fsck_err(trans, inode_has_child_snapshots_wrong, @@ -1477,13 +1459,12 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, inode->bi_flags |= BCH_INODE_has_child_snapshot; ret = __bch2_fsck_write_inode(trans, inode); if (ret) - goto out; + return ret; } if (!from_deleted_inodes) { - ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: bch_err_throw(c, inode_has_child_snapshot); - goto out; } goto delete; @@ -1494,20 +1475,15 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, if (test_bit(BCH_FS_clean_recovery, &c->flags) && !fsck_err(trans, deleted_inode_but_clean, "filesystem marked as clean but have deleted inode %llu:%u", - pos.offset, pos.snapshot)) { - ret = 0; - goto out; - } + pos.offset, pos.snapshot)) + return 0; ret = 1; } -out: fsck_err: - bch2_trans_iter_exit(trans, &inode_iter); return ret; delete: - ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); - goto out; + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); } static int may_delete_deleted_inum(struct btree_trans *trans, subvol_inum inum, diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index 5d6681c070ba..3f9defd144a4 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -43,7 +43,7 @@ int bch2_extent_fallocate(struct btree_trans *trans, bch2_bkey_buf_init(&new); closure_init_stack(&cl); - k = bch2_btree_iter_peek_slot(trans, iter); + k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) return ret; @@ -190,12 +190,12 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, if (ret) continue; - bch2_btree_iter_set_snapshot(trans, iter, snapshot); + bch2_btree_iter_set_snapshot(iter, snapshot); /* * peek_max() doesn't have ideal semantics for extents: */ - k = bch2_btree_iter_peek_max(trans, iter, end_pos); + k = bch2_btree_iter_peek_max(iter, end_pos); if (!k.k) break; @@ -222,16 +222,11 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end, s64 *i_sectors_delta) { CLASS(btree_trans, trans)(c); - - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - POS(inum.inum, start), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, start), + BTREE_ITER_intent); int ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); - bch2_trans_iter_exit(trans, &iter); - return bch2_err_matches(ret, BCH_ERR_transaction_restart) ? 0 : ret; } @@ -251,7 +246,7 @@ static int truncate_set_isize(struct btree_trans *trans, u64 new_i_size, bool warn) { - struct btree_iter iter = {}; + struct btree_iter iter = { NULL }; struct bch_inode_unpacked inode_u; int ret; @@ -259,7 +254,7 @@ static int truncate_set_isize(struct btree_trans *trans, (inode_u.bi_size = new_i_size, 0) ?: bch2_inode_write(trans, &iter, &inode_u); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -268,7 +263,6 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans, u64 *i_sectors_delta) { struct bch_fs *c = trans->c; - struct btree_iter fpunch_iter; struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k); subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) }; u64 new_i_size = le64_to_cpu(op->v.new_i_size); @@ -280,11 +274,10 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans, if (ret) goto err; - bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents, + CLASS(btree_iter, fpunch_iter)(trans, BTREE_ID_extents, POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9), BTREE_ITER_intent); ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta); - bch2_trans_iter_exit(trans, &fpunch_iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) ret = 0; @@ -366,7 +359,7 @@ static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, ret = bch2_inode_write(trans, &iter, &inode_u); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -416,7 +409,7 @@ case LOGGED_OP_FINSERT_start: if (ret) goto err; } else { - bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset)); + bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset)); ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta); if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -442,12 +435,12 @@ case LOGGED_OP_FINSERT_shift_extents: if (ret) goto btree_err; - bch2_btree_iter_set_snapshot(trans, &iter, snapshot); - bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot)); + bch2_btree_iter_set_snapshot(&iter, snapshot); + bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot)); k = insert - ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0)) - : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX)); + ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0)) + : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX)); if ((ret = bkey_err(k))) goto btree_err; @@ -515,7 +508,7 @@ case LOGGED_OP_FINSERT_finish: break; } err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (warn_errors) bch_err_fn(c, ret); return ret; diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index b8ccd8c930e1..e7d53ab1cf55 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -534,7 +534,7 @@ static void get_rbio_extent(struct btree_trans *trans, break; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); } static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_read_bio *rbio, @@ -550,15 +550,14 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re if (flags & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) return 0; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, bkey_start_pos(read_k.k), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, bkey_start_pos(read_k.k), BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; if (!bkey_and_val_eq(k, read_k)) - goto out; + return 0; struct bkey_i *new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_extent_flags)); @@ -567,17 +566,17 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re bch2_bkey_extent_flags_set(c, new, flags|BIT_ULL(BCH_EXTENT_FLAG_poisoned)) ?: bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node) ?: bch2_trans_commit(trans, NULL, NULL, 0); + if (ret) + return ret; /* * Propagate key change back to data update path, in particular so it * knows the extent has been poisoned and it's safe to change the * checksum */ - if (u && !ret) + if (u) bch2_bkey_buf_copy(&u->k, c, new); -out: - bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } static noinline int bch2_read_retry_nodecode(struct btree_trans *trans, @@ -611,7 +610,7 @@ retry: bkey_i_to_s_c(u->k.k), 0, failed, flags, -1); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || bch2_err_matches(ret, BCH_ERR_data_read_retry)) @@ -746,56 +745,48 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, { struct bch_fs *c = rbio->c; u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset; - struct bch_extent_crc_unpacked new_crc; - struct btree_iter iter; - struct bkey_i *new; - struct bkey_s_c k; int ret = 0; if (crc_is_compressed(rbio->pick.crc)) return 0; - k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, rbio->data_btree, rbio->data_pos, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); if ((ret = bkey_err(k))) - goto out; + return ret; if (bversion_cmp(k.k->bversion, rbio->version) || !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) - goto out; + return 0; /* Extent was merged? */ if (bkey_start_offset(k.k) < data_offset || k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) - goto out; + return 0; + struct bch_extent_crc_unpacked new_crc; if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version, rbio->pick.crc, NULL, &new_crc, bkey_start_offset(k.k) - data_offset, k.k->size, rbio->pick.crc.csum_type)) { bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)"); - ret = 0; - goto out; + return 0; } /* * going to be temporarily appending another checksum entry: */ - new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + - sizeof(struct bch_extent_crc128)); + struct bkey_i *new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + + sizeof(struct bch_extent_crc128)); if ((ret = PTR_ERR_OR_ZERO(new))) - goto out; + return ret; bkey_reassemble(new, k); if (!bch2_bkey_narrow_crcs(new, new_crc)) - goto out; + return 0; - ret = bch2_trans_update(trans, &iter, new, - BTREE_UPDATE_internal_snapshot_node); -out: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node); } static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) @@ -1021,13 +1012,10 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, struct bch_extent_ptr ptr) { struct bch_fs *c = trans->c; - struct btree_iter iter; CLASS(printbuf, buf)(); - int ret; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - PTR_BUCKET_POS(ca, &ptr), - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, + PTR_BUCKET_POS(ca, &ptr), + BTREE_ITER_cached); int gen = bucket_gen_get(ca, iter.pos.offset); if (gen >= 0) { @@ -1039,7 +1027,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, prt_printf(&buf, "memory gen: %u", gen); - ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter))); + int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); if (!ret) { prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, k); @@ -1057,8 +1045,6 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, } bch2_fs_inconsistent(c, "%s", buf.buf); - - bch2_trans_iter_exit(trans, &iter); } int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, @@ -1406,7 +1392,6 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, unsigned flags) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_buf sk; struct bkey_s_c k; enum btree_id data_btree; @@ -1415,9 +1400,9 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, EBUG_ON(rbio->data_update); bch2_bkey_buf_init(&sk); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - POS(inum.inum, bvec_iter.bi_sector), - BTREE_ITER_slots); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + POS(inum.inum, bvec_iter.bi_sector), + BTREE_ITER_slots); while (1) { data_btree = BTREE_ID_extents; @@ -1429,12 +1414,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &iter, snapshot); + bch2_btree_iter_set_snapshot(&iter, snapshot); - bch2_btree_iter_set_pos(trans, &iter, + bch2_btree_iter_set_pos(&iter, POS(inum.inum, bvec_iter.bi_sector)); - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) goto err; @@ -1509,7 +1494,6 @@ err: bch2_rbio_done(rbio); } - bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); return ret; } diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h index 9d63d5914b20..1e1c0476bd03 100644 --- a/fs/bcachefs/io_read.h +++ b/fs/bcachefs/io_read.h @@ -108,12 +108,12 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans, return ret; if (bkey_deleted(k.k)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return bch_err_throw(c, missing_indirect_extent); } bch2_bkey_buf_reassemble(extent, c, k); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return 0; } diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index d7620138e038..1d83dcc9731e 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -89,7 +89,12 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) new = ewma_add(old, io_latency, 5); } while (!atomic64_try_cmpxchg(latency, &old, new)); - bch2_congested_acct(ca, io_latency, now, rw); + /* + * Only track read latency for congestion accounting: writes are subject + * to heavy queuing delays from page cache writeback: + */ + if (rw == READ) + bch2_congested_acct(ca, io_latency, now, rw); __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now); } @@ -166,9 +171,9 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, *i_sectors_delta = 0; *disk_sectors_delta = 0; - bch2_trans_copy_iter(trans, &iter, extent_iter); + bch2_trans_copy_iter(&iter, extent_iter); - for_each_btree_key_max_continue_norestart(trans, iter, + for_each_btree_key_max_continue_norestart(iter, new->k.p, BTREE_ITER_slots, old, ret) { s64 sectors = min(new->k.p.offset, old.k->p.offset) - max(bkey_start_offset(&new->k), @@ -193,7 +198,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, break; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -215,13 +220,13 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, */ unsigned inode_update_flags = BTREE_UPDATE_nojournal; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, - extent_iter->pos.inode, - extent_iter->snapshot), - BTREE_ITER_intent| - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, + SPOS(0, + extent_iter->pos.inode, + extent_iter->snapshot), + BTREE_ITER_intent| + BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (unlikely(ret)) return ret; @@ -233,7 +238,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8); ret = PTR_ERR_OR_ZERO(k_mut); if (unlikely(ret)) - goto err; + return ret; bkey_reassemble(k_mut, k); @@ -241,7 +246,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, k_mut = bch2_inode_to_v3(trans, k_mut); ret = PTR_ERR_OR_ZERO(k_mut); if (unlikely(ret)) - goto err; + return ret; } struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut); @@ -286,12 +291,9 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, inode_update_flags = 0; } - ret = bch2_trans_update(trans, &iter, &inode->k_i, - BTREE_UPDATE_internal_snapshot_node| - inode_update_flags); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_trans_update(trans, &iter, &inode->k_i, + BTREE_UPDATE_internal_snapshot_node| + inode_update_flags); } int bch2_extent_update(struct btree_trans *trans, @@ -314,7 +316,7 @@ int bch2_extent_update(struct btree_trans *trans, * path already traversed at iter->pos because * bch2_trans_extent_update() will use it to attempt extent merging */ - ret = __bch2_btree_iter_traverse(trans, iter); + ret = __bch2_btree_iter_traverse(iter); if (ret) return ret; @@ -359,7 +361,7 @@ int bch2_extent_update(struct btree_trans *trans, if (i_sectors_delta_total) *i_sectors_delta_total += i_sectors_delta; - bch2_btree_iter_set_pos(trans, iter, next_pos); + bch2_btree_iter_set_pos(iter, next_pos); return 0; } @@ -369,7 +371,6 @@ static int bch2_write_index_default(struct bch_write_op *op) struct bkey_buf sk; struct keylist *keys = &op->insert_keys; struct bkey_i *k = bch2_keylist_front(keys); - struct btree_iter iter; subvol_inum inum = { .subvol = op->subvol, .inum = k->k.p.inode, @@ -394,15 +395,14 @@ static int bch2_write_index_default(struct bch_write_op *op) if (ret) break; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - bkey_start_pos(&sk.k->k), - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + bkey_start_pos(&sk.k->k), + BTREE_ITER_slots|BTREE_ITER_intent); ret = bch2_extent_update(trans, inum, &iter, sk.k, &op->res, op->new_i_size, &op->i_sectors_delta, op->flags & BCH_WRITE_check_enospc); - bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -1340,7 +1340,7 @@ retry: if (ret) break; - k = bch2_btree_iter_peek_slot(trans, &iter); + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) break; @@ -1425,10 +1425,10 @@ retry: bch2_keylist_push(&op->insert_keys); if (op->flags & BCH_WRITE_submitted) break; - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 97760e89e5a3..07869436a964 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1060,14 +1060,13 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou if (open && !*blocked) { __bch2_journal_block(j); + s.v = atomic64_read_acquire(&j->reservations.counter); *blocked = true; } ret = journal_state_count(s, idx & JOURNAL_STATE_BUF_MASK) > open ? ERR_PTR(-EAGAIN) : buf; - if (!ret) - smp_mb(); break; } } @@ -1297,7 +1296,7 @@ int bch2_dev_journal_bucket_delete(struct bch_dev *ca, u64 b) return -EINVAL; } - u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);; + u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); if (!new_buckets) return bch_err_throw(c, ENOMEM_set_nr_journal_buckets); @@ -1472,6 +1471,10 @@ int bch2_fs_journal_start(struct journal *j, u64 last_seq, u64 cur_seq) last_seq = cur_seq; u64 nr = cur_seq - last_seq; + if (nr * sizeof(struct journal_entry_pin_list) > 1U << 30) { + bch_err(c, "too many ntjournal fifo (%llu open entries)", nr); + return bch_err_throw(c, ENOMEM_journal_pin_fifo); + } /* * Extra fudge factor, in case we crashed when the journal pin fifo was @@ -1484,7 +1487,7 @@ int bch2_fs_journal_start(struct journal *j, u64 last_seq, u64 cur_seq) nr = max(nr, JOURNAL_PIN); init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL); if (!j->pin.data) { - bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); + bch_err(c, "error allocating journal fifo (%llu open entries)", nr); return bch_err_throw(c, ENOMEM_journal_pin_fifo); } diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index c05aa94237f8..b46b9718d841 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -267,7 +267,7 @@ static inline union journal_res_state journal_state_buf_put(struct journal *j, u { union journal_res_state s; - s.v = atomic64_sub_return_release(((union journal_res_state) { + s.v = atomic64_sub_return(((union journal_res_state) { .buf0_count = idx == 0, .buf1_count = idx == 1, .buf2_count = idx == 2, diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 47224666d07e..093e4acad085 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -428,15 +428,22 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs bool first = true; jset_entry_for_each_key(entry, k) { - /* We may be called on entries that haven't been validated: */ - if (!k->k.u64s) - break; - if (!first) { prt_newline(out); bch2_prt_jset_entry_type(out, entry->type); prt_str(out, ": "); } + /* We may be called on entries that haven't been validated: */ + if (!k->k.u64s) { + prt_str(out, "(invalid, k->u64s 0)"); + break; + } + + if (bkey_next(k) > vstruct_last(entry)) { + prt_str(out, "(invalid, bkey overruns jset_entry)"); + break; + } + bch2_btree_id_level_to_text(out, entry->btree_id, entry->level); prt_char(out, ' '); bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c index 0367ea37e857..38cdacc6b067 100644 --- a/fs/bcachefs/logged_ops.c +++ b/fs/bcachefs/logged_ops.c @@ -81,7 +81,7 @@ static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k) k->k.p = iter.pos; ret = bch2_trans_update(trans, &iter, k, 0); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c index ee14656c3fdd..b9c0834498dd 100644 --- a/fs/bcachefs/lru.c +++ b/fs/bcachefs/lru.c @@ -9,6 +9,7 @@ #include "ec.h" #include "error.h" #include "lru.h" +#include "progress.h" #include "recovery.h" /* KEY_TYPE_lru is obsolete: */ @@ -87,10 +88,8 @@ int bch2_lru_check_set(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter lru_iter; - struct bkey_s_c lru_k = - bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, - lru_pos(lru_id, dev_bucket, time), 0); + CLASS(btree_iter, lru_iter)(trans, BTREE_ID_lru, lru_pos(lru_id, dev_bucket, time), 0); + struct bkey_s_c lru_k = bch2_btree_iter_peek_slot(&lru_iter); int ret = bkey_err(lru_k); if (ret) return ret; @@ -98,7 +97,7 @@ int bch2_lru_check_set(struct btree_trans *trans, if (lru_k.k->type != KEY_TYPE_set) { ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed); if (ret) - goto err; + return ret; if (fsck_err(trans, alloc_key_to_missing_lru_entry, "missing %s lru entry\n%s", @@ -106,12 +105,10 @@ int bch2_lru_check_set(struct btree_trans *trans, (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) { ret = bch2_lru_set(trans, lru_id, dev_bucket, time); if (ret) - goto err; + return ret; } } -err: fsck_err: - bch2_trans_iter_exit(trans, &lru_iter); return ret; } @@ -170,11 +167,11 @@ static int bch2_check_lru_key(struct btree_trans *trans, struct bbpos bp = lru_pos_to_bp(lru_k); - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, bp.btree, bp.pos, 0); + CLASS(btree_iter, iter)(trans, bp.btree, bp.pos, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; enum bch_lru_type type = lru_type(lru_k); u64 idx = bkey_lru_type_idx(c, type, k); @@ -182,7 +179,7 @@ static int bch2_check_lru_key(struct btree_trans *trans, if (lru_pos_time(lru_k.k->p) != idx) { ret = bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed); if (ret) - goto err; + return ret; if (fsck_err(trans, lru_entry_bad, "incorrect lru entry: lru %s time %llu\n" @@ -192,11 +189,9 @@ static int bch2_check_lru_key(struct btree_trans *trans, lru_pos_time(lru_k.k->p), (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf), (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) - ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); } -err: fsck_err: - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -207,11 +202,16 @@ int bch2_check_lrus(struct bch_fs *c) bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_lru)); + CLASS(btree_trans, trans)(c); int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_lru_key(trans, &iter, k, &last_flushed)); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + bch2_check_lru_key(trans, &iter, k, &last_flushed); + })); bch2_bkey_buf_exit(&last_flushed, c); return ret; diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c index bd1e54e0efd5..a66d01d04e57 100644 --- a/fs/bcachefs/migrate.c +++ b/fs/bcachefs/migrate.c @@ -111,7 +111,7 @@ static int bch2_dev_btree_drop_key(struct btree_trans *trans, ret = drop_btree_ptrs(trans, &iter, b, dev_idx, flags); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -163,7 +163,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, retry: ret = 0; while (bch2_trans_begin(trans), - (b = bch2_btree_iter_peek_node(trans, &iter)) && + (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { bch2_progress_update_iter(trans, progress, &iter, "dropping metadata"); @@ -179,12 +179,12 @@ retry: if (ret) break; next: - bch2_btree_iter_next_node(trans, &iter); + bch2_btree_iter_next_node(&iter); } if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) goto err; @@ -228,7 +228,7 @@ static int data_drop_bp(struct btree_trans *trans, unsigned dev_idx, else ret = bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 84a228c42f06..8f6e1bfe3bb8 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -514,10 +514,10 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans, if (!extent_k.k->p.inode) goto out; - struct btree_iter inode_iter; - struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), BTREE_ITER_cached); + struct bkey_s_c inode_k = bch2_btree_iter_peek_slot(&inode_iter); int ret = bkey_err(inode_k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; @@ -527,8 +527,6 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans, bch2_inode_unpack(inode_k, &inode); bch2_inode_opts_get(io_opts, c, &inode); } - bch2_trans_iter_exit(trans, &inode_iter); - /* seem to be spinning here? */ out: return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k); } @@ -594,14 +592,14 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans * BTREE_ID_reflink, reflink_pos, BTREE_ITER_not_extents); - struct bkey_s_c k = bch2_btree_iter_peek(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek(iter); if (!k.k || bkey_err(k)) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return k; } if (bkey_lt(reflink_pos, bkey_start_pos(k.k))) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return bkey_s_c_null; } @@ -646,13 +644,13 @@ retry_root: BTREE_ITER_prefetch| BTREE_ITER_not_extents| BTREE_ITER_all_snapshots); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); ret = PTR_ERR_OR_ZERO(b); if (ret) goto root_err; if (b != btree_node_root(c, b)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry_root; } @@ -676,7 +674,7 @@ retry_root: root_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry_root; } @@ -696,7 +694,7 @@ root_err: bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &iter); + k = bch2_btree_iter_peek(&iter); if (!k.k) break; @@ -717,7 +715,7 @@ root_err: REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); - bch2_trans_iter_exit(trans, &reflink_iter); + bch2_trans_iter_exit(&reflink_iter); k = bch2_lookup_indirect_extent_for_move(trans, &reflink_iter, p); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -781,12 +779,12 @@ next: if (ctxt->stats) atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: - if (!bch2_btree_iter_advance(trans, &iter)) + if (!bch2_btree_iter_advance(&iter)) break; } out: - bch2_trans_iter_exit(trans, &reflink_iter); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&reflink_iter); + bch2_trans_iter_exit(&iter); bch2_bkey_buf_exit(&sk, c); per_snapshot_io_opts_exit(&snapshot_io_opts); @@ -853,7 +851,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, struct bch_fs *c = trans->c; bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_iter iter = {}, bp_iter = {}; + struct btree_iter iter = {}; struct bkey_buf sk; struct bkey_s_c k; struct bkey_buf last_flushed; @@ -878,7 +876,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, */ bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp_start, 0); ret = bch2_btree_write_buffer_tryflush(trans); if (!bch2_err_matches(ret, EROFS)) @@ -892,7 +890,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &bp_iter); + k = bch2_btree_iter_peek(&bp_iter); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -936,7 +934,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (!bp.v->level) { ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k); if (ret) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); continue; } } @@ -949,13 +947,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, pred, arg, p); if (!p) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto next; } if (data_opts.scrub && !bch2_dev_idx_is_online(c, data_opts.read_dev)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); ret = bch_err_throw(c, device_offline); break; } @@ -974,7 +972,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, else ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -989,14 +987,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (ctxt->stats) atomic64_add(sectors, &ctxt->stats->sectors_seen); next: - bch2_btree_iter_advance(trans, &bp_iter); + bch2_btree_iter_advance(&bp_iter); } while (check_mismatch_done < bucket_end) bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++, copygc, &last_flushed); err: - bch2_trans_iter_exit(trans, &bp_iter); bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&last_flushed, c); return ret; @@ -1112,7 +1109,7 @@ static int bch2_move_btree(struct bch_fs *c, retry: ret = 0; while (bch2_trans_begin(trans), - (b = bch2_btree_iter_peek_node(trans, &iter)) && + (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { if (kthread && kthread_should_stop()) break; @@ -1132,12 +1129,12 @@ retry: if (ret) break; next: - bch2_btree_iter_next_node(trans, &iter); + bch2_btree_iter_next_node(&iter); } if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (kthread && kthread_should_stop()) break; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 9192b1fc3594..b0cbe3c1aab6 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -64,23 +64,22 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset)) return 0; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - b->k.bucket, BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, b->k.bucket, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; CLASS(bch2_dev_bucket_tryget, ca)(c, k.k->p); if (!ca) - goto out; + return 0; if (bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b->k.bucket.offset)) - goto out; + return 0; if (ca->mi.state != BCH_MEMBER_STATE_rw || !bch2_dev_is_online(ca)) - goto out; + return 0; struct bch_alloc_v4 _a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); @@ -88,10 +87,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, b->sectors = bch2_bucket_sectors_dirty(*a); u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); - ret = lru_idx && lru_idx <= time; -out: - bch2_trans_iter_exit(trans, &iter); - return ret; + return lru_idx && lru_idx <= time; } static void move_bucket_free(struct buckets_in_flight *list, diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c index 8fa108880f58..cfed2041c2c3 100644 --- a/fs/bcachefs/namei.c +++ b/fs/bcachefs/namei.c @@ -36,8 +36,8 @@ int bch2_create_trans(struct btree_trans *trans, unsigned flags) { struct bch_fs *c = trans->c; - struct btree_iter dir_iter = {}; - struct btree_iter inode_iter = {}; + struct btree_iter dir_iter = { NULL }; + struct btree_iter inode_iter = { NULL }; subvol_inum new_inum = dir; u64 now = bch2_current_time(c); u64 cpu = raw_smp_processor_id(); @@ -133,8 +133,8 @@ int bch2_create_trans(struct btree_trans *trans, if (ret) goto err; - bch2_btree_iter_set_snapshot(trans, &dir_iter, dir_snapshot); - ret = bch2_btree_iter_traverse(trans, &dir_iter); + bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot); + ret = bch2_btree_iter_traverse(&dir_iter); if (ret) goto err; } @@ -192,13 +192,13 @@ int bch2_create_trans(struct btree_trans *trans, new_inode->bi_depth = dir_u->bi_depth + 1; inode_iter.flags &= ~BTREE_ITER_all_snapshots; - bch2_btree_iter_set_snapshot(trans, &inode_iter, snapshot); + bch2_btree_iter_set_snapshot(&inode_iter, snapshot); - ret = bch2_btree_iter_traverse(trans, &inode_iter) ?: + ret = bch2_btree_iter_traverse(&inode_iter) ?: bch2_inode_write(trans, &inode_iter, new_inode); err: - bch2_trans_iter_exit(trans, &inode_iter); - bch2_trans_iter_exit(trans, &dir_iter); + bch2_trans_iter_exit(&inode_iter); + bch2_trans_iter_exit(&dir_iter); return ret; } @@ -208,8 +208,8 @@ int bch2_link_trans(struct btree_trans *trans, const struct qstr *name) { struct bch_fs *c = trans->c; - struct btree_iter dir_iter = {}; - struct btree_iter inode_iter = {}; + struct btree_iter dir_iter = { NULL }; + struct btree_iter inode_iter = { NULL }; struct bch_hash_info dir_hash; u64 now = bch2_current_time(c); u64 dir_offset = 0; @@ -254,8 +254,8 @@ int bch2_link_trans(struct btree_trans *trans, ret = bch2_inode_write(trans, &dir_iter, dir_u) ?: bch2_inode_write(trans, &inode_iter, inode_u); err: - bch2_trans_iter_exit(trans, &dir_iter); - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&dir_iter); + bch2_trans_iter_exit(&inode_iter); return ret; } @@ -267,9 +267,9 @@ int bch2_unlink_trans(struct btree_trans *trans, bool deleting_subvol) { struct bch_fs *c = trans->c; - struct btree_iter dir_iter = {}; - struct btree_iter dirent_iter = {}; - struct btree_iter inode_iter = {}; + struct btree_iter dir_iter = { NULL }; + struct btree_iter dirent_iter = { NULL }; + struct btree_iter inode_iter = { NULL }; struct bch_hash_info dir_hash; subvol_inum inum; u64 now = bch2_current_time(c); @@ -315,7 +315,7 @@ int bch2_unlink_trans(struct btree_trans *trans, if (ret) goto err; - k = bch2_btree_iter_peek_slot(trans, &dirent_iter); + k = bch2_btree_iter_peek_slot(&dirent_iter); ret = bkey_err(k); if (ret) goto err; @@ -324,8 +324,8 @@ int bch2_unlink_trans(struct btree_trans *trans, * If we're deleting a subvolume, we need to really delete the * dirent, not just emit a whiteout in the current snapshot: */ - bch2_btree_iter_set_snapshot(trans, &dirent_iter, k.k->p.snapshot); - ret = bch2_btree_iter_traverse(trans, &dirent_iter); + bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot); + ret = bch2_btree_iter_traverse(&dirent_iter); if (ret) goto err; } else { @@ -347,9 +347,9 @@ int bch2_unlink_trans(struct btree_trans *trans, bch2_inode_write(trans, &dir_iter, dir_u) ?: bch2_inode_write(trans, &inode_iter, inode_u); err: - bch2_trans_iter_exit(trans, &inode_iter); - bch2_trans_iter_exit(trans, &dirent_iter); - bch2_trans_iter_exit(trans, &dir_iter); + bch2_trans_iter_exit(&inode_iter); + bch2_trans_iter_exit(&dirent_iter); + bch2_trans_iter_exit(&dir_iter); return ret; } @@ -393,7 +393,7 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p return ret; s->v.fs_path_parent = cpu_to_le32(new_parent); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return 0; } @@ -407,10 +407,10 @@ int bch2_rename_trans(struct btree_trans *trans, enum bch_rename_mode mode) { struct bch_fs *c = trans->c; - struct btree_iter src_dir_iter = {}; - struct btree_iter dst_dir_iter = {}; - struct btree_iter src_inode_iter = {}; - struct btree_iter dst_inode_iter = {}; + struct btree_iter src_dir_iter = { NULL }; + struct btree_iter dst_dir_iter = { NULL }; + struct btree_iter src_inode_iter = { NULL }; + struct btree_iter dst_inode_iter = { NULL }; struct bch_hash_info src_hash, dst_hash; subvol_inum src_inum, dst_inum; u64 src_offset, dst_offset; @@ -582,15 +582,31 @@ int bch2_rename_trans(struct btree_trans *trans, ? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u) : 0); err: - bch2_trans_iter_exit(trans, &dst_inode_iter); - bch2_trans_iter_exit(trans, &src_inode_iter); - bch2_trans_iter_exit(trans, &dst_dir_iter); - bch2_trans_iter_exit(trans, &src_dir_iter); + bch2_trans_iter_exit(&dst_inode_iter); + bch2_trans_iter_exit(&src_inode_iter); + bch2_trans_iter_exit(&dst_dir_iter); + bch2_trans_iter_exit(&src_dir_iter); return ret; } /* inum_to_path */ +static inline void reverse_bytes(void *b, size_t n) +{ + char *e = b + n, *s = b; + + while (s < e) { + --e; + swap(*s, *e); + s++; + } +} + +static inline void printbuf_reverse_from(struct printbuf *out, unsigned pos) +{ + reverse_bytes(out->buf + pos, out->pos - pos); +} + static inline void prt_bytes_reversed(struct printbuf *out, const void *b, unsigned n) { bch2_printbuf_make_room(out, n); @@ -610,15 +626,17 @@ static inline void prt_str_reversed(struct printbuf *out, const char *s) prt_bytes_reversed(out, s, strlen(s)); } -static inline void reverse_bytes(void *b, size_t n) +__printf(2, 3) +static inline void prt_printf_reversed(struct printbuf *out, const char *fmt, ...) { - char *e = b + n, *s = b; + unsigned orig_pos = out->pos; - while (s < e) { - --e; - swap(*s, *e); - s++; - } + va_list args; + va_start(args, fmt); + prt_vprintf(out, fmt, args); + va_end(args); + + printbuf_reverse_from(out, orig_pos); } static int __bch2_inum_to_path(struct btree_trans *trans, @@ -639,7 +657,7 @@ static int __bch2_inum_to_path(struct btree_trans *trans, subvol_inum n = (subvol_inum) { subvol ?: snapshot, inum }; if (darray_find_p(inums, i, i->subvol == n.subvol && i->inum == n.inum)) { - prt_str_reversed(path, "(loop)"); + prt_printf_reversed(path, "(loop at %llu:%u)", inum, snapshot); break; } @@ -683,27 +701,27 @@ static int __bch2_inum_to_path(struct btree_trans *trans, prt_char(path, '/'); - bch2_trans_iter_exit(trans, &d_iter); + bch2_trans_iter_exit(&d_iter); } if (orig_pos == path->pos) prt_char(path, '/'); out: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto err; + ret = path->allocation_failure ? -ENOMEM : 0; if (ret) goto err; - reverse_bytes(path->buf + orig_pos, path->pos - orig_pos); + printbuf_reverse_from(path, orig_pos); darray_exit(&inums); return 0; err: darray_exit(&inums); return ret; disconnected: - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - goto err; - - prt_str_reversed(path, "(disconnected)"); + prt_printf_reversed(path, "(disconnected at %llu.%u)", inum, snapshot); goto out; } @@ -836,7 +854,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans, out: err: fsck_err: - bch2_trans_iter_exit(trans, &bp_iter); + bch2_trans_iter_exit(&bp_iter); bch_err_fn(c, ret); return ret; } @@ -913,14 +931,14 @@ static int bch2_propagate_has_case_insensitive(struct btree_trans *trans, subvol if (ret) break; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (subvol_inum_eq(inum, BCACHEFS_ROOT_SUBVOL_INUM)) break; inum = parent_inum(inum, &inode); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/progress.c b/fs/bcachefs/progress.c index 42353067ba28..792fc6fef270 100644 --- a/fs/bcachefs/progress.c +++ b/fs/bcachefs/progress.c @@ -52,7 +52,8 @@ void bch2_progress_update_iter(struct btree_trans *trans, : 0; prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ", - msg, percent, s->nodes_seen, s->nodes_total); + strip_bch2(msg), + percent, s->nodes_seen, s->nodes_total); bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos)); bch_info(c, "%s", buf.buf); diff --git a/fs/bcachefs/progress.h b/fs/bcachefs/progress.h index 23fb1811f943..972a73087ffe 100644 --- a/fs/bcachefs/progress.h +++ b/fs/bcachefs/progress.h @@ -26,4 +26,7 @@ void bch2_progress_update_iter(struct btree_trans *, struct btree_iter *, const char *); +#define progress_update_iter(trans, p, iter) \ + bch2_progress_update_iter(trans, p, iter, __func__) + #endif /* _BCACHEFS_PROGRESS_H */ diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c index 5f1eff591b29..eaa43ad9baa6 100644 --- a/fs/bcachefs/quota.c +++ b/fs/bcachefs/quota.c @@ -512,7 +512,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans, bch2_quota_acct(c, bch_qid(&u), Q_INO, 1, KEY_TYPE_QUOTA_NOCHECK); advance: - bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos)); + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); return 0; } @@ -798,10 +798,9 @@ static int bch2_set_quota_trans(struct btree_trans *trans, struct bkey_i_quota *new_quota, struct qc_dqblk *qdq) { - struct btree_iter iter; - struct bkey_s_c k = - bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p, - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_quotas, new_quota->k.p, + BTREE_ITER_slots|BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (unlikely(ret)) return ret; @@ -819,9 +818,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans, if (qdq->d_fieldmask & QC_INO_HARD) new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); - ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0); - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_trans_update(trans, &iter, &new_quota->k_i, 0); } static int bch2_set_quota(struct super_block *sb, struct kqid qid, diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 32fa7cf90b63..c0c5fe961a83 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -15,6 +15,7 @@ #include "inode.h" #include "io_write.h" #include "move.h" +#include "progress.h" #include "rebalance.h" #include "subvolume.h" #include "super-io.h" @@ -234,14 +235,13 @@ static const char * const bch2_rebalance_state_strs[] = { int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, - SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), - BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter); + CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work, + SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) @@ -250,16 +250,13 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) struct bkey_i_cookie *cookie = bch2_trans_kmalloc(trans, sizeof(*cookie)); ret = PTR_ERR_OR_ZERO(cookie); if (ret) - goto err; + return ret; bkey_cookie_init(&cookie->k_i); cookie->k.p = iter.pos; cookie->v.cookie = cpu_to_le64(v + 1); - ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_trans_update(trans, &iter, &cookie->k_i, 0); } int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum) @@ -278,31 +275,28 @@ int bch2_set_fs_needs_rebalance(struct bch_fs *c) static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, - SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), - BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter); + CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work, + SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) : 0; - if (v == cookie) - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return v == cookie + ? bch2_btree_delete_at(trans, &iter, 0) + : 0; } static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans, struct btree_iter *work_iter) { return !kthread_should_stop() - ? bch2_btree_iter_peek(trans, work_iter) + ? bch2_btree_iter_peek(work_iter) : bkey_s_c_null; } @@ -331,12 +325,12 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, { struct bch_fs *c = trans->c; - bch2_trans_iter_exit(trans, extent_iter); + bch2_trans_iter_exit(extent_iter); bch2_trans_iter_init(trans, extent_iter, work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink, work_pos, BTREE_ITER_all_snapshots); - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, extent_iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter); if (bkey_err(k)) return k; @@ -530,7 +524,7 @@ static int do_rebalance(struct moving_context *ctxt) struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; struct bch_fs_rebalance *r = &c->rebalance; - struct btree_iter rebalance_work_iter, extent_iter = {}; + struct btree_iter extent_iter = { NULL }; struct bkey_s_c k; u32 kick = r->kick; int ret = 0; @@ -540,9 +534,9 @@ static int do_rebalance(struct moving_context *ctxt) bch2_move_stats_init(&r->work_stats, "rebalance_work"); bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); - bch2_trans_iter_init(trans, &rebalance_work_iter, - BTREE_ID_rebalance_work, POS_MIN, - BTREE_ITER_all_snapshots); + CLASS(btree_iter, rebalance_work_iter)(trans, + BTREE_ID_rebalance_work, POS_MIN, + BTREE_ITER_all_snapshots); while (!bch2_move_ratelimit(ctxt)) { if (!bch2_rebalance_enabled(c)) { @@ -572,11 +566,10 @@ static int do_rebalance(struct moving_context *ctxt) if (ret) break; - bch2_btree_iter_advance(trans, &rebalance_work_iter); + bch2_btree_iter_advance(&rebalance_work_iter); } - bch2_trans_iter_exit(trans, &extent_iter); - bch2_trans_iter_exit(trans, &rebalance_work_iter); + bch2_trans_iter_exit(&extent_iter); bch2_move_stats_exit(&r->scan_stats, c); if (!ret && @@ -769,8 +762,8 @@ static int check_rebalance_work_one(struct btree_trans *trans, struct bkey_s_c extent_k, rebalance_k; CLASS(printbuf, buf)(); - int ret = bkey_err(extent_k = bch2_btree_iter_peek(trans, extent_iter)) ?: - bkey_err(rebalance_k = bch2_btree_iter_peek(trans, rebalance_iter)); + int ret = bkey_err(extent_k = bch2_btree_iter_peek(extent_iter)) ?: + bkey_err(rebalance_k = bch2_btree_iter_peek(rebalance_iter)); if (ret) return ret; @@ -778,7 +771,7 @@ static int check_rebalance_work_one(struct btree_trans *trans, extent_iter->btree_id == BTREE_ID_reflink && (!rebalance_k.k || rebalance_k.k->p.inode >= BCACHEFS_ROOT_INO)) { - bch2_trans_iter_exit(trans, extent_iter); + bch2_trans_iter_exit(extent_iter); bch2_trans_iter_init(trans, extent_iter, BTREE_ID_extents, POS_MIN, BTREE_ITER_prefetch| @@ -834,9 +827,9 @@ static int check_rebalance_work_one(struct btree_trans *trans, } if (cmp <= 0) - bch2_btree_iter_advance(trans, extent_iter); + bch2_btree_iter_advance(extent_iter); if (cmp >= 0) - bch2_btree_iter_advance(trans, rebalance_iter); + bch2_btree_iter_advance(rebalance_iter); fsck_err: return ret; } @@ -844,21 +837,22 @@ fsck_err: int bch2_check_rebalance_work(struct bch_fs *c) { CLASS(btree_trans, trans)(c); - struct btree_iter rebalance_iter, extent_iter; - int ret = 0; - - bch2_trans_iter_init(trans, &extent_iter, - BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch); - bch2_trans_iter_init(trans, &rebalance_iter, - BTREE_ID_rebalance_work, POS_MIN, - BTREE_ITER_prefetch); + CLASS(btree_iter, extent_iter)(trans, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch); + CLASS(btree_iter, rebalance_iter)(trans, BTREE_ID_rebalance_work, POS_MIN, + BTREE_ITER_prefetch); struct bkey_buf last_flushed; bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_rebalance_work)); + + int ret = 0; while (!ret) { + progress_update_iter(trans, &progress, &rebalance_iter); + bch2_trans_begin(trans); ret = check_rebalance_work_one(trans, &extent_iter, &rebalance_iter, &last_flushed); @@ -868,7 +862,5 @@ int bch2_check_rebalance_work(struct bch_fs *c) } bch2_bkey_buf_exit(&last_flushed, c); - bch2_trans_iter_exit(trans, &extent_iter); - bch2_trans_iter_exit(trans, &rebalance_iter); return ret < 0 ? ret : 0; } diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 58c159e5f10d..c57ff235a97a 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -67,13 +67,16 @@ int bch2_btree_lost_data(struct bch_fs *c, ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_backpointers_to_extents, 0, &write_sb) ?: ret; #endif + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_backpointer_to_missing_ptr, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); + write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); + switch (btree) { case BTREE_ID_alloc: ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_alloc_info, 0, &write_sb) ?: ret; - write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); - write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); write_sb |= !__test_and_set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); @@ -203,7 +206,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans, bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, BTREE_MAX_DEPTH, k->level, BTREE_ITER_intent); - int ret = bch2_btree_iter_traverse(trans, &iter); + int ret = bch2_btree_iter_traverse(&iter); if (ret) goto out; @@ -231,7 +234,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, new, BTREE_TRIGGER_norun); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -266,7 +269,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans, bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, BTREE_MAX_DEPTH, k->level, iter_flags); - ret = bch2_btree_iter_traverse(trans, &iter); + ret = bch2_btree_iter_traverse(&iter); if (ret) goto out; @@ -294,10 +297,10 @@ static int bch2_journal_replay_key(struct btree_trans *trans, goto out; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, BTREE_MAX_DEPTH, 0, iter_flags); - ret = bch2_btree_iter_traverse(trans, &iter) ?: + ret = bch2_btree_iter_traverse(&iter) ?: bch2_btree_increase_depth(trans, iter.path, 0) ?: -BCH_ERR_transaction_restart_nested; goto out; @@ -319,7 +322,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, k->k, update_flags); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index b2cdd111fd0e..bd442652d0f5 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -639,6 +639,8 @@ void bch2_recovery_pass_status_to_text(struct printbuf *out, struct bch_fs *c) prt_printf(out, "Current pass:\t%s\n", bch2_recovery_passes[r->curr_pass]); prt_passes(out, "Current passes", r->passes_to_run); } + + prt_printf(out, "Pass done:\t%s\n", bch2_recovery_passes[r->pass_done]); } void bch2_fs_recovery_passes_init(struct bch_fs *c) diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h index 4f2c2f811d5e..95e3612bb96c 100644 --- a/fs/bcachefs/recovery_passes.h +++ b/fs/bcachefs/recovery_passes.h @@ -26,6 +26,12 @@ static inline bool go_rw_in_recovery(struct bch_fs *c) (c->opts.fsck && !(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)))); } +static inline bool recovery_pass_will_run(struct bch_fs *c, enum bch_recovery_pass pass) +{ + return unlikely(test_bit(BCH_FS_in_recovery, &c->flags) && + c->recovery.passes_to_run & BIT_ULL(pass)); +} + int bch2_run_print_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass); int __bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *, diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index 60abd89d7c9f..c083deb83ff7 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -277,13 +277,13 @@ struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *trans, int ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset, missing_end, should_commit); if (ret) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return bkey_s_c_err(ret); } } else if (unlikely(REFLINK_P_ERROR(p.v))) { int ret = bch2_indirect_extent_not_missing(trans, p, should_commit); if (ret) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return bkey_s_c_err(ret); } } @@ -357,7 +357,7 @@ next: *idx = k.k->p.offset; err: fsck_err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -497,13 +497,12 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (orig->k.type == KEY_TYPE_inline_data) bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data); - struct btree_iter reflink_iter; - bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX, - BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, &reflink_iter); + CLASS(btree_iter, reflink_iter)(trans, BTREE_ID_reflink, POS_MAX, + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_prev(&reflink_iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* * XXX: we're assuming that 56 bits will be enough for the life of the @@ -516,7 +515,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, struct bkey_i *r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) - goto err; + return ret; bkey_init(&r_v->k); r_v->k.type = bkey_type_to_indirect(&orig->k); @@ -532,7 +531,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, ret = bch2_trans_update(trans, &reflink_iter, r_v, 0); if (ret) - goto err; + return ret; /* * orig is in a bkey_buf which statically allocates 5 64s for the val, @@ -555,21 +554,16 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (reflink_p_may_update_opts_field) SET_REFLINK_P_MAY_UPDATE_OPTIONS(&r_p->v, true); - ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, - BTREE_UPDATE_internal_snapshot_node); -err: - bch2_trans_iter_exit(trans, &reflink_iter); - - return ret; + return bch2_trans_update(trans, extent_iter, &r_p->k_i, + BTREE_UPDATE_internal_snapshot_node); } -static struct bkey_s_c get_next_src(struct btree_trans *trans, - struct btree_iter *iter, struct bpos end) +static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) { struct bkey_s_c k; int ret; - for_each_btree_key_max_continue_norestart(trans, *iter, end, 0, k, ret) { + for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) { if (bkey_extent_is_unwritten(k)) continue; @@ -578,7 +572,7 @@ static struct bkey_s_c get_next_src(struct btree_trans *trans, } if (bkey_ge(iter->pos, end)) - bch2_btree_iter_set_pos(trans, iter, end); + bch2_btree_iter_set_pos(iter, end); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } @@ -641,27 +635,27 @@ s64 bch2_remap_range(struct bch_fs *c, if (ret) continue; - bch2_btree_iter_set_snapshot(trans, &src_iter, src_snapshot); + bch2_btree_iter_set_snapshot(&src_iter, src_snapshot); ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol, &dst_snapshot); if (ret) continue; - bch2_btree_iter_set_snapshot(trans, &dst_iter, dst_snapshot); + bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot); if (dst_inum.inum < src_inum.inum) { /* Avoid some lock cycle transaction restarts */ - ret = bch2_btree_iter_traverse(trans, &dst_iter); + ret = bch2_btree_iter_traverse(&dst_iter); if (ret) continue; } dst_done = dst_iter.pos.offset - dst_start.offset; src_want = POS(src_start.inode, src_start.offset + dst_done); - bch2_btree_iter_set_pos(trans, &src_iter, src_want); + bch2_btree_iter_set_pos(&src_iter, src_want); - src_k = get_next_src(trans, &src_iter, src_end); + src_k = get_next_src(&src_iter, src_end); ret = bkey_err(src_k); if (ret) continue; @@ -722,8 +716,8 @@ s64 bch2_remap_range(struct bch_fs *c, true); bch2_disk_reservation_put(c, &disk_res); } - bch2_trans_iter_exit(trans, &dst_iter); - bch2_trans_iter_exit(trans, &src_iter); + bch2_trans_iter_exit(&dst_iter); + bch2_trans_iter_exit(&src_iter); BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end)); BUG_ON(bkey_gt(dst_iter.pos, dst_end)); @@ -733,7 +727,7 @@ s64 bch2_remap_range(struct bch_fs *c, do { struct bch_inode_unpacked inode_u; - struct btree_iter inode_iter = {}; + struct btree_iter inode_iter = { NULL }; bch2_trans_begin(trans); @@ -748,7 +742,7 @@ s64 bch2_remap_range(struct bch_fs *c, BCH_TRANS_COMMIT_no_enospc); } - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&inode_iter); } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart)); err: bch2_bkey_buf_exit(&new_src, c); diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index fb72ad730518..b2b892687cdd 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -17,7 +17,7 @@ UUID_INIT(0xffffffff, 0xffff, 0xffff, \ 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef) -#define BCH_MIN_NR_NBUCKETS (1 << 6) +#define BCH_MIN_NR_NBUCKETS (1 << 9) #define BCH_IOPS_MEASUREMENTS() \ x(seqread, 0) \ diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index 7a801513b134..fb37b15054c3 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -11,6 +11,7 @@ #include "errcode.h" #include "error.h" #include "fs.h" +#include "progress.h" #include "recovery_passes.h" #include "snapshot.h" @@ -73,7 +74,7 @@ __bch2_snapshot_tree_create(struct btree_trans *trans) s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(s_t); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret ? ERR_PTR(ret) : s_t; } @@ -142,7 +143,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) guard(rcu)(); struct snapshot_table *t = rcu_dereference(c->snapshots); - if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots)) + if (unlikely(recovery_pass_will_run(c, BCH_RECOVERY_PASS_check_snapshots))) return __bch2_snapshot_is_ancestor_early(t, id, ancestor); if (likely(ancestor >= IS_ANCESTOR_BITMAP)) @@ -364,31 +365,32 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id, /* fsck: */ -static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child) +static u32 bch2_snapshot_child(struct snapshot_table *t, + u32 id, unsigned child) { - return snapshot_t(c, id)->children[child]; + return __snapshot_t(t, id)->children[child]; } -static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id) +static u32 bch2_snapshot_left_child(struct snapshot_table *t, u32 id) { - return bch2_snapshot_child(c, id, 0); + return bch2_snapshot_child(t, id, 0); } -static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id) +static u32 bch2_snapshot_right_child(struct snapshot_table *t, u32 id) { - return bch2_snapshot_child(c, id, 1); + return bch2_snapshot_child(t, id, 1); } -static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id) +static u32 bch2_snapshot_tree_next(struct snapshot_table *t, u32 id) { u32 n, parent; - n = bch2_snapshot_left_child(c, id); + n = bch2_snapshot_left_child(t, id); if (n) return n; - while ((parent = bch2_snapshot_parent(c, id))) { - n = bch2_snapshot_right_child(c, parent); + while ((parent = __bch2_snapshot_parent(t, id))) { + n = bch2_snapshot_right_child(t, parent); if (n && n != id) return n; id = parent; @@ -401,17 +403,18 @@ u32 bch2_snapshot_oldest_subvol(struct bch_fs *c, u32 snapshot_root, snapshot_id_list *skip) { guard(rcu)(); + struct snapshot_table *t = rcu_dereference(c->snapshots); u32 id, subvol = 0, s; retry: id = snapshot_root; - while (id && bch2_snapshot_exists(c, id)) { + while (id && __bch2_snapshot_exists(t, id)) { if (!(skip && snapshot_list_has_id(skip, id))) { - s = snapshot_t(c, id)->subvol; + s = __snapshot_t(t, id)->subvol; if (s && (!subvol || s < subvol)) subvol = s; } - id = bch2_snapshot_tree_next(c, id); + id = bch2_snapshot_tree_next(t, id); if (id == snapshot_root) break; } @@ -447,7 +450,7 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (!ret && !found) { struct bkey_i_subvolume *u; @@ -560,7 +563,7 @@ static int check_snapshot_tree(struct btree_trans *trans, out: err: fsck_err: - bch2_trans_iter_exit(trans, &snapshot_iter); + bch2_trans_iter_exit(&snapshot_iter); return ret; } @@ -682,7 +685,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans, *s = u->v; } err: - bch2_trans_iter_exit(trans, &root_iter); + bch2_trans_iter_exit(&root_iter); return ret; } @@ -865,7 +868,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) break; } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -895,7 +898,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) break; } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return bch2_snapshot_table_make_room(c, id) ?: bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0); @@ -973,12 +976,16 @@ int bch2_reconstruct_snapshots(struct bch_fs *c) struct snapshot_tree_reconstruct r = {}; int ret = 0; + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, btree_has_snapshots_mask); + for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) { if (btree_type_has_snapshots(btree)) { r.btree = btree; ret = for_each_btree_key(trans, iter, btree, POS_MIN, BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({ + progress_update_iter(trans, &progress, &iter); get_snapshot_trees(c, &r, k.k->p); })); if (ret) @@ -1076,7 +1083,6 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans, snapshot_id_list *s) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -1093,7 +1099,6 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(trans, &iter); if (ret) darray_exit(s); @@ -1125,7 +1130,7 @@ int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) SET_BCH_SNAPSHOT_SUBVOL(&s->v, false); s->v.subvol = 0; err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1250,10 +1255,10 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) set_bkey_val_u64s(&s->k, 0); } err: - bch2_trans_iter_exit(trans, &tree_iter); - bch2_trans_iter_exit(trans, &p_iter); - bch2_trans_iter_exit(trans, &c_iter); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&tree_iter); + bch2_trans_iter_exit(&p_iter); + bch2_trans_iter_exit(&c_iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1263,35 +1268,30 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, unsigned nr_snapids) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_i_snapshot *n; - struct bkey_s_c k; - unsigned i, j; u32 depth = bch2_snapshot_depth(c, parent); - int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, - POS_MIN, BTREE_ITER_intent); - k = bch2_btree_iter_peek(trans, &iter); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_snapshots, + POS_MIN, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek(&iter); + int ret = bkey_err(k); if (ret) - goto err; + return ret; - for (i = 0; i < nr_snapids; i++) { - k = bch2_btree_iter_prev_slot(trans, &iter); + for (unsigned i = 0; i < nr_snapids; i++) { + k = bch2_btree_iter_prev_slot(&iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (!k.k || !k.k->p.offset) { - ret = bch_err_throw(c, ENOSPC_snapshot_create); - goto err; + return bch_err_throw(c, ENOSPC_snapshot_create); } n = bch2_bkey_alloc(trans, &iter, 0, snapshot); ret = PTR_ERR_OR_ZERO(n); if (ret) - goto err; + return ret; n->v.flags = 0; n->v.parent = cpu_to_le32(parent); @@ -1301,7 +1301,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, n->v.btime.lo = cpu_to_le64(bch2_current_time(c)); n->v.btime.hi = 0; - for (j = 0; j < ARRAY_SIZE(n->v.skip); j++) + for (unsigned j = 0; j < ARRAY_SIZE(n->v.skip); j++) n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent)); bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32); @@ -1310,13 +1310,12 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0); if (ret) - goto err; + return ret; new_snapids[i] = iter.pos.offset; } -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + + return 0; } /* @@ -1357,7 +1356,7 @@ static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 par n_parent->v.subvol = 0; SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false); err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1424,38 +1423,22 @@ static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id) return i ? i->live_child : 0; } -static unsigned __live_child(struct snapshot_table *t, u32 id, - snapshot_id_list *delete_leaves, - interior_delete_list *delete_interior) -{ - struct snapshot_t *s = __snapshot_t(t, id); - if (!s) - return 0; - - for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) - if (s->children[i] && - !snapshot_list_has_id(delete_leaves, s->children[i]) && - !interior_delete_has_id(delete_interior, s->children[i])) - return s->children[i]; - - for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) { - u32 live_child = s->children[i] - ? __live_child(t, s->children[i], delete_leaves, delete_interior) - : 0; - if (live_child) - return live_child; - } - - return 0; -} - -static unsigned live_child(struct bch_fs *c, u32 id) +static unsigned live_child(struct bch_fs *c, u32 start) { struct snapshot_delete *d = &c->snapshot_delete; guard(rcu)(); - return __live_child(rcu_dereference(c->snapshots), id, - &d->delete_leaves, &d->delete_interior); + struct snapshot_table *t = rcu_dereference(c->snapshots); + + for (u32 id = bch2_snapshot_tree_next(t, start); + id && id != start; + id = bch2_snapshot_tree_next(t, id)) + if (bch2_snapshot_is_leaf(c, id) && + !snapshot_list_has_id(&d->delete_leaves, id) && + !interior_delete_has_id(&d->delete_interior, id)) + return id; + + return 0; } static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id) @@ -1483,23 +1466,19 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans, new->k.p.snapshot = live_child; - struct btree_iter dst_iter; - struct bkey_s_c dst_k = bch2_bkey_get_iter(trans, &dst_iter, - iter->btree_id, new->k.p, - BTREE_ITER_all_snapshots| - BTREE_ITER_intent); + CLASS(btree_iter, dst_iter)(trans, iter->btree_id, new->k.p, + BTREE_ITER_all_snapshots|BTREE_ITER_intent); + struct bkey_s_c dst_k = bch2_btree_iter_peek_slot(&dst_iter); ret = bkey_err(dst_k); if (ret) return ret; - ret = (bkey_deleted(dst_k.k) + return (bkey_deleted(dst_k.k) ? bch2_trans_update(trans, &dst_iter, new, BTREE_UPDATE_internal_snapshot_node) : 0) ?: bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &dst_iter); - return ret; } return 0; @@ -1526,7 +1505,7 @@ static bool skip_unrelated_snapshot_tree(struct btree_trans *trans, struct btree pos.snapshot = 0; if (iter->btree_id != BTREE_ID_inodes) pos.offset = U64_MAX; - bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(pos)); + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(pos)); } return ret; @@ -1604,7 +1583,7 @@ static int delete_dead_snapshot_keys_v2(struct btree_trans *trans) while (1) { struct bkey_s_c k; ret = lockrestart_do(trans, - bkey_err(k = bch2_btree_iter_peek(trans, &iter))); + bkey_err(k = bch2_btree_iter_peek(&iter))); if (ret) break; @@ -1627,12 +1606,12 @@ static int delete_dead_snapshot_keys_v2(struct btree_trans *trans) if (ret) break; - bch2_btree_iter_set_pos(trans, &iter, POS(0, k.k->p.offset + 1)); + bch2_btree_iter_set_pos(&iter, POS(0, k.k->p.offset + 1)); } else { - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) goto err; @@ -1712,12 +1691,14 @@ static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, interior_delete_list *skip) { guard(rcu)(); + struct snapshot_table *t = rcu_dereference(c->snapshots); + while (interior_delete_has_id(skip, id)) - id = __bch2_snapshot_parent(c, id); + id = __bch2_snapshot_parent(t, id); while (n--) { do { - id = __bch2_snapshot_parent(c, id); + id = __bch2_snapshot_parent(t, id); } while (interior_delete_has_id(skip, id)); } @@ -1944,7 +1925,6 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -1955,12 +1935,9 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, if (!bkey_eq(pos, k.k->p)) break; - if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) { - ret = 1; - break; - } + if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) + return 1; } - bch2_trans_iter_exit(trans, &iter); return ret; } diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h index 6dcb118b0fbd..fef32a0118c4 100644 --- a/fs/bcachefs/snapshot.h +++ b/fs/bcachefs/snapshot.h @@ -63,19 +63,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id) return __bch2_snapshot_parent_early(c, id); } -static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id) +static inline u32 __bch2_snapshot_parent(struct snapshot_table *t, u32 id) { - const struct snapshot_t *s = snapshot_t(c, id); + const struct snapshot_t *s = __snapshot_t(t, id); if (!s) return 0; u32 parent = s->parent; if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && parent && - s->depth != snapshot_t(c, parent)->depth + 1) + s->depth != __snapshot_t(t, parent)->depth + 1) panic("id %u depth=%u parent %u depth=%u\n", - id, snapshot_t(c, id)->depth, - parent, snapshot_t(c, parent)->depth); + id, __snapshot_t(t, id)->depth, + parent, __snapshot_t(t, parent)->depth); return parent; } @@ -83,14 +83,16 @@ static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id) static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id) { guard(rcu)(); - return __bch2_snapshot_parent(c, id); + return __bch2_snapshot_parent(rcu_dereference(c->snapshots), id); } static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n) { guard(rcu)(); + struct snapshot_table *t = rcu_dereference(c->snapshots); + while (n--) - id = __bch2_snapshot_parent(c, id); + id = __bch2_snapshot_parent(t, id); return id; } @@ -100,23 +102,29 @@ u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32); static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id) { guard(rcu)(); + struct snapshot_table *t = rcu_dereference(c->snapshots); u32 parent; - while ((parent = __bch2_snapshot_parent(c, id))) + while ((parent = __bch2_snapshot_parent(t, id))) id = parent; return id; } -static inline enum snapshot_id_state __bch2_snapshot_id_state(struct bch_fs *c, u32 id) +static inline enum snapshot_id_state __bch2_snapshot_id_state(struct snapshot_table *t, u32 id) { - const struct snapshot_t *s = snapshot_t(c, id); + const struct snapshot_t *s = __snapshot_t(t, id); return s ? s->state : SNAPSHOT_ID_empty; } static inline enum snapshot_id_state bch2_snapshot_id_state(struct bch_fs *c, u32 id) { guard(rcu)(); - return __bch2_snapshot_id_state(c, id); + return __bch2_snapshot_id_state(rcu_dereference(c->snapshots), id); +} + +static inline bool __bch2_snapshot_exists(struct snapshot_table *t, u32 id) +{ + return __bch2_snapshot_id_state(t, id) == SNAPSHOT_ID_live; } static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id) diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c index 3e08e55d2dc1..68392fb6532e 100644 --- a/fs/bcachefs/str_hash.c +++ b/fs/bcachefs/str_hash.c @@ -18,16 +18,14 @@ static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dir return ret; return !ret; } else { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, le64_to_cpu(d.v->d_inum), d.k->p.snapshot), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; - ret = bkey_is_inode(k.k); - bch2_trans_iter_exit(trans, &iter); - return ret; + return bkey_is_inode(k.k); } } @@ -206,7 +204,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, bch_err_throw(c, transaction_restart_nested); err: fsck_err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -328,7 +326,7 @@ duplicate_entries: } out: fsck_err: - bch2_trans_iter_exit(trans, dup_iter); + bch2_trans_iter_exit(dup_iter); if (free_snapshots_seen) darray_exit(&s->ids); return ret; @@ -371,11 +369,11 @@ int __bch2_str_hash_check_key(struct btree_trans *trans, if (bkey_deleted(k.k)) goto bad_hash; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); fsck_err: return ret; bad_hash: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); /* * Before doing any repair, check hash_info itself: */ diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index 353a927857f1..8c0fb44929cc 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -159,8 +159,11 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct bkey_s_c k; int ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, - SPOS(inum.inum, desc.hash_key(info, key), snapshot), + bch2_trans_iter_init(trans, iter, + desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), + BTREE_ITER_slots|flags); + + for_each_btree_key_max_continue_norestart(*iter, POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { if (is_visible_key(desc, inum, k)) { @@ -173,7 +176,7 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return bkey_s_c_err(ret ?: bch_err_throw(trans->c, ENOENT_str_hash_lookup)); } @@ -209,13 +212,16 @@ bch2_hash_hole(struct btree_trans *trans, if (ret) return ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, - SPOS(inum.inum, desc.hash_key(info, key), snapshot), + bch2_trans_iter_init(trans, iter, desc.btree_id, + SPOS(inum.inum, desc.hash_key(info, key), snapshot), + BTREE_ITER_slots|BTREE_ITER_intent); + + for_each_btree_key_max_continue_norestart(*iter, POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) if (!is_visible_key(desc, inum, k)) return 0; - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ret ?: bch_err_throw(trans->c, ENOSPC_str_hash_create); } @@ -230,11 +236,11 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, struct bkey_s_c k; int ret; - bch2_trans_copy_iter(trans, &iter, start); + bch2_trans_copy_iter(&iter, start); - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); - for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) { + for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) { if (k.k->type != desc.key_type && k.k->type != KEY_TYPE_hash_whiteout) break; @@ -246,7 +252,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, } } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -265,10 +271,13 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, bool found = false; int ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, + bch2_trans_iter_init(trans, iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), + BTREE_ITER_slots|BTREE_ITER_intent|flags); + + for_each_btree_key_max_continue_norestart(*iter, POS(insert->k.p.inode, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) { if (is_visible_key(desc, inum, k)) { @@ -280,7 +289,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, } if (!slot.path && !(flags & STR_HASH_must_replace)) - bch2_trans_copy_iter(trans, &slot, iter); + bch2_trans_copy_iter(&slot, iter); if (k.k->type != KEY_TYPE_hash_whiteout) goto not_found; @@ -289,14 +298,14 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, if (!ret) ret = bch_err_throw(c, ENOSPC_str_hash_create); out: - bch2_trans_iter_exit(trans, &slot); - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(&slot); + bch2_trans_iter_exit(iter); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; found: found = true; not_found: if (found && (flags & STR_HASH_must_create)) { - bch2_trans_iter_exit(trans, &slot); + bch2_trans_iter_exit(&slot); return k; } else if (!found && (flags & STR_HASH_must_replace)) { ret = bch_err_throw(c, ENOENT_str_hash_set_must_replace); @@ -326,7 +335,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans, if (ret) return ret; if (k.k) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return bch_err_throw(trans->c, EEXIST_str_hash_set); } @@ -389,7 +398,7 @@ int bch2_hash_delete(struct btree_trans *trans, return ret; ret = bch2_hash_delete_at(trans, desc, info, &iter, 0); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index 2d2d6b22df88..a38a58ef7a8c 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -46,7 +46,6 @@ static int check_subvol(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct btree_iter subvol_children_iter = {}; struct bch_subvolume subvol; struct bch_snapshot snapshot; CLASS(printbuf, buf)(); @@ -81,30 +80,28 @@ static int check_subvol(struct btree_trans *trans, bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(n); if (ret) - goto err; + return ret; n->v.fs_path_parent = 0; } if (subvol.fs_path_parent) { - struct bpos pos = subvolume_children_pos(k); - - struct bkey_s_c subvol_children_k = - bch2_bkey_get_iter(trans, &subvol_children_iter, - BTREE_ID_subvolume_children, pos, 0); + CLASS(btree_iter, subvol_children_iter)(trans, + BTREE_ID_subvolume_children, subvolume_children_pos(k), 0); + struct bkey_s_c subvol_children_k = bch2_btree_iter_peek_slot(&subvol_children_iter); ret = bkey_err(subvol_children_k); if (ret) - goto err; + return ret; if (fsck_err_on(subvol_children_k.k->type != KEY_TYPE_set, trans, subvol_children_not_set, "subvolume not set in subvolume_children btree at %llu:%llu\n%s", - pos.inode, pos.offset, + subvol_children_iter.pos.inode, subvol_children_iter.pos.offset, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, pos, true); + ret = bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, subvol_children_iter.pos, true); if (ret) - goto err; + return ret; } } @@ -122,7 +119,7 @@ static int check_subvol(struct btree_trans *trans, inode.bi_snapshot = le32_to_cpu(subvol.snapshot); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) - goto err; + return ret; } } else if (bch2_err_matches(ret, ENOENT)) { if (fsck_err(trans, subvol_to_missing_root, @@ -142,10 +139,10 @@ static int check_subvol(struct btree_trans *trans, inode.bi_parent_subvol = le32_to_cpu(subvol.fs_path_parent); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) - goto err; + return ret; } } else { - goto err; + return ret; } if (!BCH_SUBVOLUME_SNAP(&subvol)) { @@ -159,7 +156,7 @@ static int check_subvol(struct btree_trans *trans, "%s: snapshot tree %u not found", __func__, snapshot_tree); if (ret) - goto err; + return ret; if (fsck_err_on(le32_to_cpu(st.master_subvol) != k.k->p.offset, trans, subvol_not_master_and_not_snapshot, @@ -169,14 +166,12 @@ static int check_subvol(struct btree_trans *trans, bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(s); if (ret) - goto err; + return ret; SET_BCH_SUBVOLUME_SNAP(&s->v, true); } } -err: fsck_err: - bch2_trans_iter_exit(trans, &subvol_children_iter); return ret; } @@ -297,11 +292,8 @@ int bch2_subvolume_trigger(struct btree_trans *trans, int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol) { - struct btree_iter iter; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0); - struct bkey_s_c k = bch2_btree_iter_peek(trans, &iter); - bch2_trans_iter_exit(trans, &iter); + CLASS(btree_iter, iter)(trans, BTREE_ID_subvolume_children, POS(subvol, 0), 0); + struct bkey_s_c k = bch2_btree_iter_peek(&iter); return bkey_err(k) ?: k.k && k.k->p.inode == subvol ? bch_err_throw(trans->c, ENOTEMPTY_subvol_not_empty) @@ -373,7 +365,7 @@ int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid, if (likely(!ret)) *snapid = le32_to_cpu(subvol.v->snapshot); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -486,9 +478,9 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) ret = bch2_btree_delete_at(trans, &subvol_iter, 0) ?: bch2_snapshot_node_set_deleted(trans, snapid); err: - bch2_trans_iter_exit(trans, &snapshot_tree_iter); - bch2_trans_iter_exit(trans, &snapshot_iter); - bch2_trans_iter_exit(trans, &subvol_iter); + bch2_trans_iter_exit(&snapshot_tree_iter); + bch2_trans_iter_exit(&snapshot_iter); + bch2_trans_iter_exit(&subvol_iter); return ret; } @@ -590,7 +582,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) SET_BCH_SUBVOLUME_UNLINKED(&n->v, true); n->v.fs_path_parent = 0; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -602,7 +594,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, bool ro) { struct bch_fs *c = trans->c; - struct btree_iter dst_iter, src_iter = {}; + struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL }; struct bkey_i_subvolume *new_subvol = NULL; struct bkey_i_subvolume *src_subvol = NULL; u32 parent = 0, new_nodes[2], snapshot_subvols[2]; @@ -665,8 +657,8 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, *new_subvolid = new_subvol->k.p.offset; *new_snapshotid = new_nodes[0]; err: - bch2_trans_iter_exit(trans, &src_iter); - bch2_trans_iter_exit(trans, &dst_iter); + bch2_trans_iter_exit(&src_iter); + bch2_trans_iter_exit(&dst_iter); return ret; } @@ -702,33 +694,25 @@ int bch2_initialize_subvolumes(struct bch_fs *c) static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) { - struct btree_iter iter; - struct bkey_s_c k; - struct bch_inode_unpacked inode; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) return ret; if (!bkey_is_inode(k.k)) { struct bch_fs *c = trans->c; bch_err(c, "root inode not found"); - ret = bch_err_throw(c, ENOENT_inode); - goto err; + return bch_err_throw(c, ENOENT_inode); } + struct bch_inode_unpacked inode; ret = bch2_inode_unpack(k, &inode); BUG_ON(ret); inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; - ret = bch2_inode_write(trans, &iter, &inode); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_inode_write(trans, &iter, &inode); } /* set bi_subvol on root inode */ diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h index 075f55e25c70..b6d7c1f4a256 100644 --- a/fs/bcachefs/subvolume.h +++ b/fs/bcachefs/subvolume.h @@ -33,45 +33,41 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32); int bch2_subvol_is_ro(struct bch_fs *, u32); static inline struct bkey_s_c -bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end, u32 subvolid, unsigned flags) +bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end, + u32 subvolid, unsigned flags) { u32 snapshot; - int ret = bch2_subvolume_get_snapshot(trans, subvolid, &snapshot); + int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot); if (ret) return bkey_s_c_err(ret); - bch2_btree_iter_set_snapshot(trans, iter, snapshot); - return bch2_btree_iter_peek_max_type(trans, iter, end, flags); + bch2_btree_iter_set_snapshot(iter, snapshot); + return bch2_btree_iter_peek_max_type(iter, end, flags); } #define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do) \ ({ \ - struct bkey_s_c _k; \ int _ret3 = 0; \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_in_subvolume_max_type(trans, &(_iter),\ + struct bkey_s_c _k = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter),\ _end, _subvolid, (_flags)); \ if (!(_k).k) \ break; \ \ bkey_err(_k) ?: (_do); \ })); \ - } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \ + } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \ \ - bch2_trans_iter_exit((_trans), &(_iter)); \ _ret3; \ }) #define for_each_btree_key_in_subvolume_max(_trans, _iter, _btree_id, \ _start, _end, _subvolid, _flags, _k, _do) \ ({ \ - struct btree_iter _iter; \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ + CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ \ for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do); \ diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 40fa87ce1d09..c88759964575 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -79,7 +79,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v } else { darray_for_each(c->incompat_versions_requested, i) if (version == *i) - return -BCH_ERR_may_not_use_incompat_feature; + return bch_err_throw(c, may_not_use_incompat_feature); darray_push(&c->incompat_versions_requested, version); CLASS(printbuf, buf)(); @@ -90,7 +90,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v prt_printf(&buf, "\n set version_upgrade=incompat to enable"); bch_notice(c, "%s", buf.buf); - return -BCH_ERR_may_not_use_incompat_feature; + return bch_err_throw(c, may_not_use_incompat_feature); } } diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 0fc0b2221036..b3b2d8353a36 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -729,6 +729,8 @@ void __bch2_fs_stop(struct bch_fs *c) cancel_work_sync(&ca->io_error_work); cancel_work_sync(&c->read_only_work); + + flush_work(&c->btree_interior_update_work); } void bch2_fs_free(struct bch_fs *c) diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index ea27df30cfcb..baaaedf68422 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -31,76 +31,66 @@ static void delete_test_keys(struct bch_fs *c) static int test_delete(struct bch_fs *c, u64 nr) { - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k.p.snapshot = U32_MAX; - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, - BTREE_ITER_intent); + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent); - ret = commit_do(trans, NULL, NULL, 0, - bch2_btree_iter_traverse(trans, &iter) ?: + int ret = commit_do(trans, NULL, NULL, 0, + bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &k.k_i, 0)); bch_err_msg(c, ret, "update error"); if (ret) - goto err; + return ret; pr_info("deleting once"); ret = commit_do(trans, NULL, NULL, 0, - bch2_btree_iter_traverse(trans, &iter) ?: + bch2_btree_iter_traverse(&iter) ?: bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error (first)"); if (ret) - goto err; + return ret; pr_info("deleting twice"); ret = commit_do(trans, NULL, NULL, 0, - bch2_btree_iter_traverse(trans, &iter) ?: + bch2_btree_iter_traverse(&iter) ?: bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error (second)"); if (ret) - goto err; -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return ret; + + return 0; } static int test_delete_written(struct bch_fs *c, u64 nr) { - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k.p.snapshot = U32_MAX; - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, - BTREE_ITER_intent); + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent); - ret = commit_do(trans, NULL, NULL, 0, - bch2_btree_iter_traverse(trans, &iter) ?: + int ret = commit_do(trans, NULL, NULL, 0, + bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &k.k_i, 0)); bch_err_msg(c, ret, "update error"); if (ret) - goto err; + return ret; bch2_trans_unlock(trans); bch2_journal_flush_all_pins(&c->journal); ret = commit_do(trans, NULL, NULL, 0, - bch2_btree_iter_traverse(trans, &iter) ?: + bch2_btree_iter_traverse(&iter) ?: bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error"); if (ret) - goto err; -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return ret; + + return 0; } static int test_iterate(struct bch_fs *c, u64 nr) @@ -343,19 +333,15 @@ static int test_peek_end(struct bch_fs *c, u64 nr) delete_test_keys(c); CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); - - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); + struct bkey_s_c k; + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - bch2_trans_iter_exit(trans, &iter); return 0; } @@ -364,19 +350,15 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) delete_test_keys(c); CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(0, 0, U32_MAX), 0); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - SPOS(0, 0, U32_MAX), 0); - - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); + struct bkey_s_c k; + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - bch2_trans_iter_exit(trans, &iter); return 0; } @@ -470,25 +452,21 @@ static int test_extent_create_overlapping(struct bch_fs *c, u64 inum) /* Test skipping over keys in unrelated snapshots: */ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) { - struct btree_iter iter; - struct bkey_s_c k; struct bkey_i_cookie cookie; - int ret; - bkey_cookie_init(&cookie.k_i); cookie.k.p.snapshot = snapid_hi; - ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); + int ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); if (ret) return ret; CLASS(btree_trans, trans)(c); - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, snapid_lo), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0); + + struct bkey_s_c k; + ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k->p.snapshot != U32_MAX); - bch2_trans_iter_exit(trans, &iter); return ret; } @@ -583,24 +561,18 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr) static int rand_lookup(struct bch_fs *c, u64 nr) { CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); for (u64 i = 0; i < nr; i++) { - bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX)); + bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX)); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter))); - ret = bkey_err(k); + struct bkey_s_c k; + int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter))); if (ret) - break; + return ret; } - bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } static int rand_mixed_trans(struct btree_trans *trans, @@ -611,9 +583,9 @@ static int rand_mixed_trans(struct btree_trans *trans, struct bkey_s_c k; int ret; - bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX)); + bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX)); - k = bch2_btree_iter_peek(trans, iter); + k = bch2_btree_iter_peek(iter); ret = bkey_err(k); bch_err_msg(trans->c, ret, "lookup error"); if (ret) @@ -631,45 +603,33 @@ static int rand_mixed_trans(struct btree_trans *trans, static int rand_mixed(struct bch_fs *c, u64 nr) { CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_i_cookie cookie; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); for (u64 i = 0; i < nr; i++) { u64 rand = test_rand(); - ret = commit_do(trans, NULL, NULL, 0, + struct bkey_i_cookie cookie; + int ret = commit_do(trans, NULL, NULL, 0, rand_mixed_trans(trans, &iter, &cookie, i, rand)); if (ret) - break; + return ret; } - bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } static int __do_delete(struct btree_trans *trans, struct bpos pos) { - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos, - BTREE_ITER_intent); - k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, pos, + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)); + int ret = bkey_err(k); if (ret) - goto err; + return ret; if (!k.k) - goto err; + return 0; - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; + return bch2_btree_delete_at(trans, &iter, 0); } static int rand_delete(struct bch_fs *c, u64 nr) diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 903e20cd34fa..6094b568dd33 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -157,7 +157,7 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info else memcpy(buffer, xattr_val(xattr.v), ret); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -168,7 +168,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum, int type, int flags) { struct bch_fs *c = trans->c; - struct btree_iter inode_iter = {}; + struct btree_iter inode_iter = { NULL }; int ret; ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?: @@ -184,7 +184,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum, inode_u->bi_ctime = bch2_current_time(c); ret = bch2_inode_write(trans, &inode_iter, inode_u); - bch2_trans_iter_exit(trans, &inode_iter); + bch2_trans_iter_exit(&inode_iter); if (ret) return ret; |