diff options
Diffstat (limited to 'fs/bcachefs')
46 files changed, 911 insertions, 1309 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 1c2cd841e8a0..3fc728efbf5c 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -473,13 +473,14 @@ struct bkey_i_alloc_v4 * bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter, struct bpos pos) { - struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos, - BTREE_ITER_with_updates| - BTREE_ITER_cached| - BTREE_ITER_intent); + bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos, + BTREE_ITER_with_updates| + BTREE_ITER_cached| + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); int ret = bkey_err(k); if (unlikely(ret)) - return ERR_PTR(ret); + goto err; struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); ret = PTR_ERR_OR_ZERO(a); @@ -495,29 +496,24 @@ __flatten struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, pos, - BTREE_ITER_with_updates| - BTREE_ITER_cached| - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, pos, + BTREE_ITER_with_updates| + BTREE_ITER_cached| + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (unlikely(ret)) return ERR_PTR(ret); if ((void *) k.v >= trans->mem && - (void *) k.v < trans->mem + trans->mem_top) { - bch2_trans_iter_exit(&iter); + (void *) k.v < trans->mem + trans->mem_top) return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v); - } struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); - if (IS_ERR(a)) { - bch2_trans_iter_exit(&iter); + if (IS_ERR(a)) return a; - } ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_); - bch2_trans_iter_exit(&iter); return unlikely(ret) ? ERR_PTR(ret) : a; } @@ -744,8 +740,8 @@ static int bch2_bucket_do_index(struct btree_trans *trans, return 0; } - struct btree_iter iter; - struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_intent); + struct bkey_s_c old = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(old); if (ret) return ret; @@ -755,30 +751,25 @@ static int bch2_bucket_do_index(struct btree_trans *trans, trans, alloc_k, set, btree == BTREE_ID_need_discard, false); - ret = bch2_btree_bit_mod_iter(trans, &iter, set); + return bch2_btree_bit_mod_iter(trans, &iter, set); fsck_err: - bch2_trans_iter_exit(&iter); return ret; } static noinline int bch2_bucket_gen_update(struct btree_trans *trans, struct bpos bucket, u8 gen) { - struct btree_iter iter; - unsigned offset; - struct bpos pos = alloc_gens_pos(bucket, &offset); - struct bkey_i_bucket_gens *g; - struct bkey_s_c k; - int ret; - - g = bch2_trans_kmalloc(trans, sizeof(*g)); - ret = PTR_ERR_OR_ZERO(g); + struct bkey_i_bucket_gens *g = bch2_trans_kmalloc(trans, sizeof(*g)); + int ret = PTR_ERR_OR_ZERO(g); if (ret) return ret; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, - BTREE_ITER_intent| - BTREE_ITER_with_updates); + unsigned offset; + struct bpos pos = alloc_gens_pos(bucket, &offset); + + CLASS(btree_iter, iter)(trans, BTREE_ID_bucket_gens, pos, + BTREE_ITER_intent|BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) return ret; @@ -1353,8 +1344,8 @@ struct check_discard_freespace_key_async { static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); + CLASS(btree_iter, iter)(trans, pos.btree, pos.pos, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; @@ -1797,16 +1788,12 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bpos pos = need_discard_iter->pos; - struct btree_iter iter = { NULL }; - struct bkey_s_c k; - struct bkey_i_alloc_v4 *a; - CLASS(printbuf, buf)(); bool discard_locked = false; int ret = 0; if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { s->open++; - goto out; + return 0; } u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, @@ -1814,30 +1801,29 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, if (seq_ready > c->journal.flushed_seq_ondisk) { if (seq_ready > c->journal.flushing_seq) s->need_journal_commit++; - goto out; + return 0; } - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - need_discard_iter->pos, - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, need_discard_iter->pos, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) - goto out; + return ret; - a = bch2_alloc_to_v4_mut(trans, k); + struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); ret = PTR_ERR_OR_ZERO(a); if (ret) - goto out; + return ret; if (a->v.data_type != BCH_DATA_need_discard) { if (need_discard_or_freespace_err(trans, k, true, true, true)) { ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); if (ret) - goto out; + return ret; goto commit; } - goto out; + return 0; } if (!fastpath) { @@ -1890,7 +1876,6 @@ fsck_err: discard_in_flight_remove(ca, iter.pos.offset); if (!ret) s->seen++; - bch2_trans_iter_exit(&iter); return ret; } @@ -1954,9 +1939,8 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans, struct bpos *discard_pos_done, struct discard_buckets_state *s) { - struct btree_iter need_discard_iter; - struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, - BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + CLASS(btree_iter, need_discard_iter)(trans, BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + struct bkey_s_c discard_k = bch2_btree_iter_peek_slot(&need_discard_iter); int ret = bkey_err(discard_k); if (ret) return ret; @@ -1965,12 +1949,10 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans, trans, discarding_bucket_not_in_need_discard_btree, "attempting to discard bucket %u:%llu not in need_discard btree", ca->dev_idx, bucket)) - goto out; + return 0; - ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); -out: + return bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); fsck_err: - bch2_trans_iter_exit(&need_discard_iter); return ret; } @@ -2106,7 +2088,6 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); - struct btree_iter alloc_iter = {}; int ret = 0; if (*nr_to_invalidate <= 0) @@ -2117,54 +2098,53 @@ static int invalidate_one_bucket(struct btree_trans *trans, "lru key points to nonexistent device:bucket %llu:%llu", bucket.inode, bucket.offset)) return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); - goto out; + return 0; } if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) return 0; - struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, - BTREE_ID_alloc, bucket, - BTREE_ITER_cached); - ret = bkey_err(alloc_k); - if (ret) - return ret; + { + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, BTREE_ITER_cached); + struct bkey_s_c alloc_k = bch2_btree_iter_peek_slot(&alloc_iter); + ret = bkey_err(alloc_k); + if (ret) + return ret; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); - /* We expect harmless races here due to the btree write buffer: */ - if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) - goto out; + /* We expect harmless races here due to the btree write buffer: */ + if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) + return 0; - /* - * Impossible since alloc_lru_idx_read() only returns nonzero if the - * bucket is supposed to be on the cached bucket LRU (i.e. - * BCH_DATA_cached) - * - * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0 - */ - BUG_ON(a->data_type != BCH_DATA_cached); - BUG_ON(a->dirty_sectors); + /* + * Impossible since alloc_lru_idx_read() only returns nonzero if the + * bucket is supposed to be on the cached bucket LRU (i.e. + * BCH_DATA_cached) + * + * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0 + */ + BUG_ON(a->data_type != BCH_DATA_cached); + BUG_ON(a->dirty_sectors); - if (!a->cached_sectors) { - bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, - true, last_flushed); - goto out; - } + if (!a->cached_sectors) { + bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, + true, last_flushed); + return 0; + } - unsigned cached_sectors = a->cached_sectors; - u8 gen = a->gen; + unsigned cached_sectors = a->cached_sectors; + u8 gen = a->gen; - ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); - if (ret) - goto out; + ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); + if (ret) + return ret; - trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); - --*nr_to_invalidate; -out: + trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); + --*nr_to_invalidate; + } fsck_err: - bch2_trans_iter_exit(&alloc_iter); return ret; } diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 70895afc0d0d..0a5b3d31d52c 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -285,8 +285,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bch_dev *ca = req->ca; - struct btree_iter iter, citer; - struct bkey_s_c k, ck; + struct bkey_s_c k; struct open_bucket *ob = NULL; u64 first_bucket = ca->mi.first_bucket; u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; @@ -306,7 +305,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, again: for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), BTREE_ITER_slots, k, ret) { - u64 bucket = k.k->p.offset; + u64 bucket = alloc_cursor = k.k->p.offset; if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; @@ -333,29 +332,23 @@ again: continue; /* now check the cached key to serialize concurrent allocs of the bucket */ - ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached); + CLASS(btree_iter, citer)(trans, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached|BTREE_ITER_nopreserve); + struct bkey_s_c ck = bch2_btree_iter_peek_slot(&citer); ret = bkey_err(ck); if (ret) break; a = bch2_alloc_to_v4(ck, &a_convert); - if (a->data_type != BCH_DATA_free) - goto next; - - req->counters.buckets_seen++; + if (a->data_type == BCH_DATA_free) { + req->counters.buckets_seen++; - ob = may_alloc_bucket(c, req, k.k->p) - ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) - : NULL; -next: - bch2_set_btree_iter_dontneed(&citer); - bch2_trans_iter_exit(&citer); - if (ob) - break; + ob = may_alloc_bucket(c, req, k.k->p) + ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) + : NULL; + if (ob) + break; + } } - bch2_trans_iter_exit(&iter); - - alloc_cursor = iter.pos.offset; if (!ob && ret) ob = ERR_PTR(ret); @@ -375,7 +368,6 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, struct closure *cl) { struct bch_dev *ca = req->ca; - struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; @@ -430,7 +422,6 @@ next: break; } fail: - bch2_trans_iter_exit(&iter); BUG_ON(ob && ret); diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 42c321d42721..45d3db41225a 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -154,12 +154,10 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, struct bkey_i_backpointer *bp, bool insert) { - struct btree_iter bp_iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bp->k.p, - BTREE_ITER_intent| - BTREE_ITER_slots| - BTREE_ITER_with_updates); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp->k.p, + BTREE_ITER_intent| + BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&bp_iter); int ret = bkey_err(k); if (ret) return ret; @@ -170,7 +168,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) { ret = backpointer_mod_err(trans, orig_k, bp, k, insert); if (ret) - goto err; + return ret; } if (!insert) { @@ -178,10 +176,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, set_bkey_val_u64s(&bp->k, 0); } - ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); -err: - bch2_trans_iter_exit(&bp_iter); - return ret; + return bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); } static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) @@ -384,8 +379,6 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st return 0; struct bch_fs *c = trans->c; - struct btree_iter alloc_iter = { NULL }; - struct bkey_s_c alloc_k; CLASS(printbuf, buf)(); int ret = 0; @@ -393,34 +386,35 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) { ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); if (ret) - goto out; + return ret; if (fsck_err(trans, backpointer_to_missing_device, "backpointer for missing device:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ret = bch2_backpointer_del(trans, k.k->p); - goto out; + return ret; } - alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0); - ret = bkey_err(alloc_k); - if (ret) - goto out; - - if (alloc_k.k->type != KEY_TYPE_alloc_v4) { - ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + { + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, 0); + struct bkey_s_c alloc_k = bch2_btree_iter_peek_slot(&alloc_iter); + ret = bkey_err(alloc_k); if (ret) - goto out; + return ret; - if (fsck_err(trans, backpointer_to_missing_alloc, - "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", - alloc_iter.pos.inode, alloc_iter.pos.offset, - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = bch2_backpointer_del(trans, k.k->p); + if (alloc_k.k->type != KEY_TYPE_alloc_v4) { + ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + if (ret) + return ret; + + if (fsck_err(trans, backpointer_to_missing_alloc, + "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", + alloc_iter.pos.inode, alloc_iter.pos.offset, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + ret = bch2_backpointer_del(trans, k.k->p); + } } -out: fsck_err: - bch2_trans_iter_exit(&alloc_iter); return ret; } @@ -542,17 +536,17 @@ static int check_bp_exists(struct btree_trans *trans, bpos_gt(bp->k.p, s->bp_end)) return 0; - struct btree_iter bp_iter; - struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp->k.p, 0); + struct bkey_s_c bp_k = bch2_btree_iter_peek_slot(&bp_iter); int ret = bkey_err(bp_k); if (ret) - goto err; + return ret; if (bp_k.k->type != KEY_TYPE_backpointer || memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) { ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); if (ret) - goto err; + return ret; goto check_existing_bp; } @@ -560,7 +554,6 @@ out: err: fsck_err: bch2_trans_iter_exit(&other_extent_iter); - bch2_trans_iter_exit(&bp_iter); return ret; check_existing_bp: /* Do we have a backpointer for a different extent? */ @@ -894,7 +887,6 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b if (!ca) return 0; - struct btree_iter iter; struct bkey_s_c bp_k; int ret = 0; for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers, @@ -910,7 +902,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b bp.v->pad)) { ret = bch2_backpointer_del(trans, bp_k.k->p); if (ret) - break; + return ret; need_commit = true; continue; @@ -925,7 +917,6 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b sectors[alloc_counter] += bp.v->bucket_len; }; - bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -1173,17 +1164,13 @@ static int check_bucket_backpointer_pos_mismatch(struct btree_trans *trans, bool *had_mismatch, struct bkey_buf *last_flushed) { - struct btree_iter alloc_iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &alloc_iter, - BTREE_ID_alloc, bucket, - BTREE_ITER_cached); + CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&alloc_iter); int ret = bkey_err(k); if (ret) return ret; - ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); - bch2_trans_iter_exit(&alloc_iter); - return ret; + return check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); } int bch2_check_bucket_backpointer_mismatch(struct btree_trans *trans, diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 45c15bdaa6f4..cdf593c59922 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -845,8 +845,8 @@ struct bch_fs { unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)]; u64 btrees_lost_data; } sb; - DARRAY(enum bcachefs_metadata_version) - incompat_versions_requested; + + unsigned long incompat_versions_requested[BITS_TO_LONGS(BCH_VERSION_MINOR(bcachefs_metadata_version_current))]; struct unicode_map *cf_encoding; diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 2f7c384a8c81..8716eedd43fc 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -214,7 +214,7 @@ void bch2_node_pin(struct bch_fs *c, struct btree *b) struct btree_cache *bc = &c->btree_cache; guard(mutex)(&bc->lock); - if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { + if (!btree_node_is_root(c, b) && !btree_node_pinned(b)) { set_btree_node_pinned(b); list_move(&b->list, &bc->live[1].list); bc->live[0].nr--; diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index 649e9dfd178a..035b2cb25077 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -144,6 +144,14 @@ static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b) return r ? r->b : NULL; } +static inline bool btree_node_is_root(struct bch_fs *c, struct btree *b) +{ + struct btree *root = btree_node_root(c, b); + + BUG_ON(b != root && b->c.level >= root->c.level); + return b == root; +} + const char *bch2_btree_id_str(enum btree_id); /* avoid */ void bch2_btree_id_to_text(struct printbuf *, enum btree_id); void bch2_btree_id_level_to_text(struct printbuf *, enum btree_id, unsigned); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index ce3c7750a922..e27536d315b1 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -282,6 +282,38 @@ fsck_err: return ret; } +static int btree_check_root_boundaries(struct btree_trans *trans, struct btree *b) +{ + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + int ret = 0; + + BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && + !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key, + b->data->min_key)); + + if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), + trans, btree_node_topology_bad_root_min_key, + "btree root with incorrect min_key%s", buf.buf)) { + ret = set_node_min(c, b, POS_MIN); + if (ret) + goto err; + } + + if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), + trans, btree_node_topology_bad_root_max_key, + "btree root with incorrect min_key%s", buf.buf)) { + ret = set_node_max(c, b, SPOS_MAX); + if (ret) + goto err; + } + +err: +fsck_err: + printbuf_exit(&buf); + return ret; +} + static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, struct btree *child, struct bpos *pulled_from_scan) { @@ -586,7 +618,8 @@ recover: struct btree *b = r->b; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); - ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan); + ret = btree_check_root_boundaries(trans, b) ?: + bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan); six_unlock_read(&b->c.lock); if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) { diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 7e1046fe478f..a67babf69d39 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -275,9 +275,6 @@ static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { struct btree_trans *trans = iter->trans; - struct btree_iter copy; - struct bkey_s_c prev; - int ret = 0; if (!(iter->flags & BTREE_ITER_filter_snapshots)) return 0; @@ -289,16 +286,16 @@ static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c iter->snapshot, k.k->p.snapshot)); - bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, - BTREE_ITER_nopreserve| - BTREE_ITER_all_snapshots); - prev = bch2_btree_iter_prev(©); + CLASS(btree_iter, copy)(trans, iter->btree_id, iter->pos, + BTREE_ITER_nopreserve| + BTREE_ITER_all_snapshots); + struct bkey_s_c prev = bch2_btree_iter_prev(©); if (!prev.k) - goto out; + return 0; - ret = bkey_err(prev); + int ret = bkey_err(prev); if (ret) - goto out; + return ret; if (bkey_eq(prev.k->p, k.k->p) && bch2_snapshot_is_ancestor(trans->c, iter->snapshot, @@ -314,9 +311,8 @@ static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c iter->snapshot, buf1.buf, buf2.buf); } -out: - bch2_trans_iter_exit(©); - return ret; + + return 0; } void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, @@ -2455,7 +2451,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en } if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_key_cache_fill)) { + !(iter->flags & BTREE_ITER_nofilter_whiteouts)) { search_key = bkey_successor(iter, k.k->p); continue; } @@ -2871,7 +2867,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) if (unlikely(k.k->type == KEY_TYPE_whiteout && (iter->flags & BTREE_ITER_filter_snapshots) && - !(iter->flags & BTREE_ITER_key_cache_fill))) + !(iter->flags & BTREE_ITER_nofilter_whiteouts))) iter->k.type = KEY_TYPE_deleted; } else { struct bpos next; @@ -3127,11 +3123,12 @@ void bch2_trans_iter_exit(struct btree_iter *iter) void bch2_trans_iter_init_outlined(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree_id, struct bpos pos, - enum btree_iter_update_trigger_flags flags) + enum btree_iter_update_trigger_flags flags, + unsigned long ip) { bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, bch2_btree_iter_flags(trans, btree_id, 0, flags), - _RET_IP_); + ip); } void bch2_trans_node_iter_init(struct btree_trans *trans, diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 9fcfd00c88f7..b117cb5d7f94 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -535,7 +535,8 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans, void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *, enum btree_id, struct bpos, - enum btree_iter_update_trigger_flags); + enum btree_iter_update_trigger_flags, + unsigned long ip); static inline void bch2_trans_iter_init(struct btree_trans *trans, struct btree_iter *iter, @@ -546,11 +547,25 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans, __builtin_constant_p(flags)) bch2_trans_iter_init_common(trans, iter, btree, pos, 0, 0, bch2_btree_iter_flags(trans, btree, 0, flags), - _THIS_IP_); + _RET_IP_); else - bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags); + bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags, _RET_IP_); } +#define bch2_trans_iter_class_init(_trans, _btree, _pos, _flags) \ +({ \ + struct btree_iter iter; \ + bch2_trans_iter_init(_trans, &iter, (_btree), (_pos), (_flags)); \ + iter; \ +}) + +DEFINE_CLASS(btree_iter, struct btree_iter, + bch2_trans_iter_exit(&_T), + bch2_trans_iter_class_init(trans, btree, pos, flags), + struct btree_trans *trans, + enum btree_id btree, struct bpos pos, + enum btree_iter_update_trigger_flags flags); + void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *, enum btree_id, struct bpos, unsigned, unsigned, @@ -639,7 +654,7 @@ static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans, k = bch2_btree_iter_peek_slot(iter); if (!bkey_err(k) && type && k.k->type != type) - k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch); + k = bkey_s_c_err(bch_err_throw(trans->c, ENOENT_bkey_type_mismatch)); if (unlikely(bkey_err(k))) bch2_trans_iter_exit(iter); return k; @@ -653,9 +668,18 @@ static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans, return __bch2_bkey_get_iter(trans, iter, btree, pos, flags, 0); } -#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ - bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ - _btree_id, _pos, _flags, KEY_TYPE_##_type)) +static inline struct bkey_s_c __bch2_bkey_get_typed(struct btree_iter *iter, + enum bch_bkey_type type) +{ + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); + + if (!bkey_err(k) && type && k.k->type != type) + k = bkey_s_c_err(bch_err_throw(iter->trans->c, ENOENT_bkey_type_mismatch)); + return k; +} + +#define bch2_bkey_get_typed(_iter, _type) \ + bkey_s_c_to_##_type(__bch2_bkey_get_typed(_iter, KEY_TYPE_##_type)) static inline void __bkey_val_copy(void *dst_v, unsigned dst_size, struct bkey_s_c src_k) { @@ -677,14 +701,11 @@ static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, enum bch_bkey_type type, unsigned val_size, void *val) { - struct btree_iter iter; - struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree, pos, flags, type); + CLASS(btree_iter, iter)(trans, btree, pos, flags); + struct bkey_s_c k = __bch2_bkey_get_typed(&iter, type); int ret = bkey_err(k); - if (!ret) { + if (!ret) __bkey_val_copy(val, val_size, k); - bch2_trans_iter_exit(&iter); - } - return ret; } @@ -806,7 +827,7 @@ transaction_restart: \ if (!_ret2) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \ + _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \ }) #define for_each_btree_key_max_continue(_trans, _iter, \ @@ -832,48 +853,37 @@ transaction_restart: \ #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \ for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_max(_trans, _iter, _btree_id, \ - _start, _end, _flags, _k, _do) \ -({ \ - bch2_trans_begin(trans); \ - \ - struct btree_iter _iter; \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - \ - int _ret = for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\ - bch2_trans_iter_exit(&(_iter)); \ - _ret; \ +#define for_each_btree_key_max(_trans, _iter, _btree_id, \ + _start, _end, _flags, _k, _do) \ +({ \ + bch2_trans_begin(trans); \ + \ + CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ + for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do); \ }) -#define for_each_btree_key(_trans, _iter, _btree_id, \ - _start, _flags, _k, _do) \ - for_each_btree_key_max(_trans, _iter, _btree_id, _start, \ - SPOS_MAX, _flags, _k, _do) +#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k, _do) \ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ - _start, _flags, _k, _do) \ -({ \ - struct btree_iter _iter; \ - struct bkey_s_c _k; \ - int _ret3 = 0; \ - \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - \ - do { \ - _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \ - (_flags)); \ - if (!(_k).k) \ - break; \ - \ - bkey_err(_k) ?: (_do); \ - })); \ - } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \ - \ - bch2_trans_iter_exit(&(_iter)); \ - _ret3; \ +#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ + _start, _flags, _k, _do) \ +({ \ + int _ret3 = 0; \ + \ + CLASS(btree_iter, iter)((_trans), (_btree_id), (_start), (_flags)); \ + \ + do { \ + _ret3 = lockrestart_do(_trans, ({ \ + struct bkey_s_c _k = \ + bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\ + if (!(_k).k) \ + break; \ + \ + bkey_err(_k) ?: (_do); \ + })); \ + } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \ + \ + _ret3; \ }) #define for_each_btree_key_commit(_trans, _iter, _btree_id, \ @@ -902,36 +912,35 @@ transaction_restart: \ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); -#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ - _start, _end, _flags, _k, _ret) \ - for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\ - !((_ret) = bkey_err(_k)) && (_k).k; \ +#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ + _start, _end, _flags, _k, _ret) \ + for (CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) -#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\ - for (; \ - (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ - !((_ret) = bkey_err(_k)) && (_k).k; \ - bch2_btree_iter_advance(&(_iter))) - -#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ - _start, _flags, _k, _ret) \ - for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\ +#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ + _start, _flags, _k, _ret) \ + for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start, \ SPOS_MAX, _flags, _k, _ret) -#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ - _start, _flags, _k, _ret) \ - for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \ - !((_ret) = bkey_err(_k)) && (_k).k; \ - bch2_btree_iter_rewind(&(_iter))) +#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret) \ + for (; \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ + bch2_btree_iter_advance(&(_iter))) -#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ +#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) +#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ + _start, _flags, _k, _ret) \ + for (CLASS(btree_iter, _iter)((_trans), (_btree_id), \ + (_start), (_flags)); \ + (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \ + !((_ret) = bkey_err(_k)) && (_k).k; \ + bch2_btree_iter_rewind(&(_iter))) + /* * This should not be used in a fastpath, without first trying _do in * nonblocking mode - it will cause excessive transaction restarts and diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index bf376865d0ae..4890cbc88e7c 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -322,19 +322,17 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, } struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - int ret; - bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos, - BTREE_ITER_intent| - BTREE_ITER_key_cache_fill| - BTREE_ITER_cached_nofill); + CLASS(btree_iter, iter)(trans, ck_path->btree_id, ck_path->pos, + BTREE_ITER_intent| + BTREE_ITER_nofilter_whiteouts| + BTREE_ITER_key_cache_fill| + BTREE_ITER_cached_nofill); iter.flags &= ~BTREE_ITER_with_journal; - k = bch2_btree_iter_peek_slot(&iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) - goto err; + return ret; /* Recheck after btree lookup, before allocating: */ ck_path = trans->paths + ck_path_idx; @@ -344,15 +342,13 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k); if (ret) - goto err; + return ret; if (trace_key_cache_fill_enabled()) do_trace_key_cache_fill(trans, ck_path, k); out: /* We're not likely to need this iterator again: */ bch2_set_btree_iter_dontneed(&iter); -err: - bch2_trans_iter_exit(&iter); return ret; } @@ -424,35 +420,34 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct journal *j = &c->journal; - struct btree_iter c_iter, b_iter; struct bkey_cached *ck = NULL; int ret; - bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos, - BTREE_ITER_slots| - BTREE_ITER_intent| - BTREE_ITER_all_snapshots); - bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos, - BTREE_ITER_cached| - BTREE_ITER_intent); + CLASS(btree_iter, b_iter)(trans, key.btree_id, key.pos, + BTREE_ITER_slots| + BTREE_ITER_intent| + BTREE_ITER_all_snapshots); + CLASS(btree_iter, c_iter)(trans, key.btree_id, key.pos, + BTREE_ITER_cached| + BTREE_ITER_intent); b_iter.flags &= ~BTREE_ITER_with_key_cache; ret = bch2_btree_iter_traverse(&c_iter); if (ret) - goto out; + return ret; ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; if (!ck) - goto out; + return 0; if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { if (evict) goto evict; - goto out; + return 0; } if (journal_seq && ck->journal.seq != journal_seq) - goto out; + return 0; trans->journal_res.seq = ck->journal.seq; @@ -528,8 +523,6 @@ evict: } } out: - bch2_trans_iter_exit(&b_iter); - bch2_trans_iter_exit(&c_iter); return ret; } diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index ffa250008d91..e893eb938bb3 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -229,6 +229,7 @@ struct btree_node_iter { x(snapshot_field) \ x(all_snapshots) \ x(filter_snapshots) \ + x(nofilter_whiteouts) \ x(nopreserve) \ x(cached_nofill) \ x(key_cache_fill) \ @@ -839,15 +840,15 @@ static inline bool btree_node_type_has_triggers(enum btree_node_type type) return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRIGGERS; } -static inline bool btree_id_is_extents(enum btree_id btree) -{ - const u64 mask = 0 +static const u64 btree_is_extents_mask = 0 #define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_extents)) << nr) - BCH_BTREE_IDS() +BCH_BTREE_IDS() #undef x - ; +; - return BIT_ULL(btree) & mask; +static inline bool btree_id_is_extents(enum btree_id btree) +{ + return BIT_ULL(btree) & btree_is_extents_mask; } static inline bool btree_node_type_is_extents(enum btree_node_type type) @@ -866,6 +867,11 @@ static inline bool btree_type_has_snapshots(enum btree_id btree) return BIT_ULL(btree) & btree_has_snapshots_mask; } +static inline bool btree_id_is_extents_snapshots(enum btree_id btree) +{ + return BIT_ULL(btree) & btree_has_snapshots_mask & btree_is_extents_mask; +} + static inline bool btree_type_has_snapshot_field(enum btree_id btree) { const u64 mask = 0 diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index d2efb4b42bb4..6f3b57573cba 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -95,7 +95,6 @@ static noinline int extent_back_merge(struct btree_trans *trans, static int need_whiteout_for_snapshot(struct btree_trans *trans, enum btree_id btree_id, struct bpos pos) { - struct btree_iter iter; struct bkey_s_c k; u32 snapshot = pos.snapshot; int ret; @@ -117,7 +116,6 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(&iter); return ret; } @@ -131,10 +129,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, darray_for_each(*s, id) { pos.snapshot = *id; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, pos, - BTREE_ITER_not_extents| - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_not_extents|BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) break; @@ -143,7 +139,6 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, struct bkey_i *update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i)); ret = PTR_ERR_OR_ZERO(update); if (ret) { - bch2_trans_iter_exit(&iter); break; } @@ -154,7 +149,6 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, update, BTREE_UPDATE_internal_snapshot_node); } - bch2_trans_iter_exit(&iter); if (ret) break; @@ -221,7 +215,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans, return ret; } - if (bkey_le(old.k->p, new.k->p)) { + if (!back_split) { update = bch2_trans_kmalloc(trans, sizeof(*update)); if ((ret = PTR_ERR_OR_ZERO(update))) return ret; @@ -244,9 +238,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans, BTREE_UPDATE_internal_snapshot_node|flags); if (ret) return ret; - } - - if (back_split) { + } else { update = bch2_bkey_make_mut_noupdate(trans, old); if ((ret = PTR_ERR_OR_ZERO(update))) return ret; @@ -268,18 +260,16 @@ static int bch2_trans_update_extent(struct btree_trans *trans, struct bkey_i *insert, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - struct bkey_s_c k; enum btree_id btree_id = orig_iter->btree_id; - int ret = 0; - bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k), - BTREE_ITER_intent| - BTREE_ITER_with_updates| - BTREE_ITER_not_extents); - k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); - if ((ret = bkey_err(k))) - goto err; + CLASS(btree_iter, iter)(trans, btree_id, bkey_start_pos(&insert->k), + BTREE_ITER_intent| + BTREE_ITER_with_updates| + BTREE_ITER_not_extents); + struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); + int ret = bkey_err(k); + if (ret) + return ret; if (!k.k) goto out; @@ -287,7 +277,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, if (bch2_bkey_maybe_mergable(k.k, &insert->k)) { ret = extent_front_merge(trans, &iter, k, &insert, flags); if (ret) - goto err; + return ret; } goto next; @@ -298,7 +288,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert)); if (ret) - goto err; + return ret; if (done) goto out; @@ -306,7 +296,7 @@ next: bch2_btree_iter_advance(&iter); k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) - goto err; + return ret; if (!k.k) goto out; } @@ -314,15 +304,12 @@ next: if (bch2_bkey_maybe_mergable(&insert->k, k.k)) { ret = extent_back_merge(trans, &iter, insert, k); if (ret) - goto err; + return ret; } out: - if (!bkey_deleted(&insert->k)) - ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags); -err: - bch2_trans_iter_exit(&iter); - - return ret; + return !bkey_deleted(&insert->k) + ? bch2_btree_insert_nonextent(trans, btree_id, insert, flags) + : 0; } static inline struct btree_insert_entry * @@ -629,29 +616,21 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, btree, k->k.p, - BTREE_ITER_cached| - BTREE_ITER_not_extents| - BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(&iter) ?: + CLASS(btree_iter, iter)(trans, btree, k->k.p, + BTREE_ITER_cached| + BTREE_ITER_not_extents| + BTREE_ITER_intent); + return bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, k, flags); - bch2_trans_iter_exit(&iter); - return ret; } -int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id, +int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k), - BTREE_ITER_intent|flags); - int ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update(trans, &iter, k, flags); - bch2_trans_iter_exit(&iter); - return ret; + CLASS(btree_iter, iter)(trans, btree, bkey_start_pos(&k->k), + BTREE_ITER_intent|flags); + return bch2_btree_iter_traverse(&iter) ?: + bch2_trans_update(trans, &iter, k, flags); } /** @@ -693,30 +672,24 @@ int bch2_btree_delete(struct btree_trans *trans, enum btree_id btree, struct bpos pos, enum btree_iter_update_trigger_flags flags) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, btree, pos, - BTREE_ITER_cached| - BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(&iter) ?: + CLASS(btree_iter, iter)(trans, btree, pos, + BTREE_ITER_cached| + BTREE_ITER_intent); + return bch2_btree_iter_traverse(&iter) ?: bch2_btree_delete_at(trans, &iter, flags); - bch2_trans_iter_exit(&iter); - - return ret; } -int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, +int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id btree, struct bpos start, struct bpos end, enum btree_iter_update_trigger_flags flags, u64 *journal_seq) { u32 restart_count = trans->restart_count; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; - bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent|flags); + CLASS(btree_iter, iter)(trans, btree, start, BTREE_ITER_intent|flags); + while ((k = bch2_btree_iter_peek_max(&iter, end)).k) { struct disk_reservation disk_res = bch2_disk_reservation_init(trans->c, 0); @@ -767,7 +740,6 @@ err: if (ret) break; } - bch2_trans_iter_exit(&iter); return ret ?: trans_was_restarted(trans, restart_count); } @@ -808,13 +780,10 @@ int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, struct bpos pos, bool set) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, pos, BTREE_ITER_intent); - int ret = bch2_btree_iter_traverse(&iter) ?: - bch2_btree_bit_mod_iter(trans, &iter, set); - bch2_trans_iter_exit(&iter); - return ret; + return bch2_btree_iter_traverse(&iter) ?: + bch2_btree_bit_mod_iter(trans, &iter, set); } int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree, diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 6790e0254a63..663739db82b1 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -370,72 +370,52 @@ static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\ KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) -static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans, - struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - enum btree_iter_update_trigger_flags flags, +static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_iter *iter, unsigned type, unsigned min_bytes) { - struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, - btree_id, pos, flags|BTREE_ITER_intent, type); - struct bkey_i *ret = IS_ERR(k.k) + struct bkey_s_c k = __bch2_bkey_get_typed(iter, type); + return IS_ERR(k.k) ? ERR_CAST(k.k) - : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes); - if (IS_ERR(ret)) - bch2_trans_iter_exit(iter); - return ret; + : __bch2_bkey_make_mut_noupdate(iter->trans, k, 0, min_bytes); } -static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans, - struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - enum btree_iter_update_trigger_flags flags) +static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_iter *iter) { - return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0); + return __bch2_bkey_get_mut_noupdate(iter, 0, 0); } static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, - struct btree_iter *iter, - unsigned btree_id, struct bpos pos, + enum btree_id btree, struct bpos pos, enum btree_iter_update_trigger_flags flags, unsigned type, unsigned min_bytes) { - struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter, - btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes); - int ret; - + CLASS(btree_iter, iter)(trans, btree, pos, flags|BTREE_ITER_intent); + struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(&iter, type, min_bytes); if (IS_ERR(mut)) return mut; - - ret = bch2_trans_update(trans, iter, mut, flags); - if (ret) { - bch2_trans_iter_exit(iter); + int ret = bch2_trans_update(trans, &iter, mut, flags); + if (ret) return ERR_PTR(ret); - } - return mut; } static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans, - struct btree_iter *iter, unsigned btree_id, struct bpos pos, enum btree_iter_update_trigger_flags flags, unsigned min_bytes) { - return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes); + return __bch2_bkey_get_mut(trans, btree_id, pos, flags, 0, min_bytes); } static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, - struct btree_iter *iter, unsigned btree_id, struct bpos pos, enum btree_iter_update_trigger_flags flags) { - return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0); + return __bch2_bkey_get_mut(trans, btree_id, pos, flags, 0, 0); } -#define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ - bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \ - _btree_id, _pos, _flags, \ +#define bch2_bkey_get_mut_typed(_trans, _btree_id, _pos, _flags, _type) \ + bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _btree_id, _pos, _flags, \ KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter, diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 5f4f82967105..76897cf15946 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -66,6 +66,10 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) bkey_init(&prev.k->k); bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + /* + * Don't use btree_node_is_root(): we're called by btree split, after + * creating a new root but before setting it + */ if (b == btree_node_root(c, b)) { if (!bpos_eq(b->data->min_key, POS_MIN)) { bch2_log_msg_start(c, &buf); @@ -1655,7 +1659,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, int ret = 0; bch2_verify_btree_nr_keys(b); - BUG_ON(!parent && (b != btree_node_root(c, b))); + BUG_ON(!parent && !btree_node_is_root(c, b)); BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1)); ret = bch2_btree_node_check_topology(trans, b); @@ -2527,7 +2531,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, if (ret) goto err; } else { - BUG_ON(btree_node_root(c, b) != b); + BUG_ON(!btree_node_is_root(c, b)); struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(new_key->k.u64s)); diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 036b718ae975..afad11831e1d 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -203,19 +203,14 @@ static int btree_write_buffered_insert(struct btree_trans *trans, struct btree_write_buffered_key *wb) { - struct btree_iter iter; - int ret; - - bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k), - BTREE_ITER_cached|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, wb->btree, bkey_start_pos(&wb->k.k), + BTREE_ITER_cached|BTREE_ITER_intent); trans->journal_res.seq = wb->journal_seq; - ret = bch2_btree_iter_traverse(&iter) ?: + return bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &wb->k, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(&iter); - return ret; } static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 0a357005e9e8..87a6f4dce296 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -663,24 +663,23 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans, struct bch_fs *c = trans->c; if (flags & BTREE_TRIGGER_transactional) { - struct btree_iter iter; - struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_stripes, POS(0, p.ec.idx), - BTREE_ITER_with_updates, stripe); + struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, + BTREE_ID_stripes, POS(0, p.ec.idx), + BTREE_ITER_with_updates, + stripe); int ret = PTR_ERR_OR_ZERO(s); if (unlikely(ret)) { bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans, "pointer to nonexistent stripe %llu", (u64) p.ec.idx); - goto err; + return ret; } if (!bch2_ptr_matches_stripe(&s->v, p)) { bch2_trans_inconsistent(trans, "stripe pointer doesn't match stripe %llu", (u64) p.ec.idx); - ret = bch_err_throw(c, trigger_stripe_pointer); - goto err; + return bch_err_throw(c, trigger_stripe_pointer); } stripe_blockcount_set(&s->v, p.ec.block, @@ -692,10 +691,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans, acc.type = BCH_DISK_ACCOUNTING_replicas; bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i)); acc.replicas.data_type = data_type; - ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_disk_accounting_mod(trans, &acc, §ors, 1, false); } if (flags & BTREE_TRIGGER_gc) { diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 91edec7706b2..01838a3a189d 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -258,11 +258,10 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, struct bch_write_op *op) { struct bch_fs *c = op->c; - struct btree_iter iter; struct data_update *m = container_of(op, struct data_update, op); int ret = 0; - bch2_trans_iter_init(trans, &iter, m->btree_id, + CLASS(btree_iter, iter)(trans, m->btree_id, bkey_start_pos(&bch2_keylist_front(&op->insert_keys)->k), BTREE_ITER_slots|BTREE_ITER_intent); @@ -487,7 +486,6 @@ nowork: goto next; } out: - bch2_trans_iter_exit(&iter); BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); return ret; } diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index 3bcbb677a808..cb44b35e0f1d 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -633,7 +633,6 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir, int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 snapshot) { - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -647,7 +646,6 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty); break; } - bch2_trans_iter_exit(&iter); return ret; } @@ -721,7 +719,6 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr, struct bch_inode_unpacked *inode) { - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -737,31 +734,28 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr, ret = bch_err_throw(trans->c, ENOENT_inode); found: bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr); - bch2_trans_iter_exit(&iter); return ret; } int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bch_inode_unpacked dir_inode; - struct bch_hash_info dir_hash_info; - int ret; - ret = lookup_first_inode(trans, pos.inode, &dir_inode); + struct bch_inode_unpacked dir_inode; + int ret = lookup_first_inode(trans, pos.inode, &dir_inode); if (ret) goto err; - dir_hash_info = bch2_hash_info_init(c, &dir_inode); + { + struct bch_hash_info dir_hash_info = bch2_hash_info_init(c, &dir_inode); - bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, pos, BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(&iter) ?: - bch2_hash_delete_at(trans, bch2_dirent_hash_desc, - &dir_hash_info, &iter, - BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(&iter); + ret = bch2_btree_iter_traverse(&iter) ?: + bch2_hash_delete_at(trans, bch2_dirent_hash_desc, + &dir_hash_info, &iter, + BTREE_UPDATE_internal_snapshot_node); + } err: bch_err_fn(c, ret); return ret; diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index e735b1e9b275..c2840cb674b2 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -785,23 +785,15 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, struct ec_stripe_buf *stripe) { - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, - POS(0, idx), BTREE_ITER_slots); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_slots); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) - goto err; - if (k.k->type != KEY_TYPE_stripe) { - ret = -ENOENT; - goto err; - } + return ret; + if (k.k->type != KEY_TYPE_stripe) + return -ENOENT; bkey_reassemble(&stripe->key, k); -err: - bch2_trans_iter_exit(&iter); - return ret; + return 0; } /* recovery read path: */ @@ -950,13 +942,11 @@ static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) static int ec_stripe_delete(struct btree_trans *trans, u64 idx) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_stripes, POS(0, idx), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* * We expect write buffer races here @@ -965,10 +955,9 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx) if (k.k->type == KEY_TYPE_stripe && !bch2_stripe_is_open(trans->c, idx) && stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1) - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_btree_delete_at(trans, &iter, 0); + + return 0; } /* @@ -1009,20 +998,17 @@ static int ec_stripe_key_update(struct btree_trans *trans, struct bch_fs *c = trans->c; bool create = !old; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, - new->k.p, BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, new->k.p, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe), c, "error %s stripe: got existing key type %s", create ? "creating" : "updating", - bch2_bkey_types[k.k->type])) { - ret = -EINVAL; - goto err; - } + bch2_bkey_types[k.k->type])) + return -EINVAL; if (k.k->type == KEY_TYPE_stripe) { const struct bch_stripe *v = bkey_s_c_to_stripe(k).v; @@ -1042,8 +1028,7 @@ static int ec_stripe_key_update(struct btree_trans *trans, prt_str(&buf, "\nnew: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i)); bch2_fs_inconsistent(c, "%s", buf.buf); - ret = -EINVAL; - goto err; + return -EINVAL; } /* @@ -1061,10 +1046,7 @@ static int ec_stripe_key_update(struct btree_trans *trans, } } - ret = bch2_trans_update(trans, &iter, &new->k_i, 0); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_trans_update(trans, &iter, &new->k_i, 0); } static int ec_stripe_update_extent(struct btree_trans *trans, @@ -1785,20 +1767,19 @@ static int __get_existing_stripe(struct btree_trans *trans, { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_stripes, POS(0, idx), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_nopreserve); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* We expect write buffer races here */ if (k.k->type != KEY_TYPE_stripe) - goto out; + return 0; struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); if (stripe_lru_pos(s.v) <= 1) - goto out; + return 0; if (s.v->disk_label == head->disk_label && s.v->algorithm == head->algo && @@ -1806,13 +1787,10 @@ static int __get_existing_stripe(struct btree_trans *trans, le16_to_cpu(s.v->sectors) == head->blocksize && bch2_try_open_stripe(c, head->s, idx)) { bkey_reassemble(&stripe->key, k); - ret = 1; + return 1; } -out: - bch2_set_btree_iter_dontneed(&iter); -err: - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s) @@ -1871,7 +1849,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri if (may_create_new_stripe(c)) return -1; - struct btree_iter lru_iter; struct bkey_s_c lru_k; int ret = 0; @@ -1883,7 +1860,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri if (ret) break; } - bch2_trans_iter_exit(&lru_iter); if (!ret) ret = bch_err_throw(c, stripe_alloc_blocked); if (ret == 1) @@ -1898,7 +1874,6 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st struct ec_stripe_new *s) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; struct bpos min_pos = POS(0, 1); struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); @@ -1919,6 +1894,8 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st */ for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { + c->ec_stripe_hint = iter.pos.offset; + if (bkey_gt(k.k->p, POS(0, U32_MAX))) { if (start_pos.offset) { start_pos = min_pos; @@ -1931,28 +1908,18 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st } if (bkey_deleted(k.k) && - bch2_try_open_stripe(c, s, k.k->p.offset)) + bch2_try_open_stripe(c, s, k.k->p.offset)) { + ret = ec_stripe_mem_alloc(trans, &iter); + if (ret) + bch2_stripe_close(c, s); + s->new_stripe.key.k.p = iter.pos; break; + } } - c->ec_stripe_hint = iter.pos.offset; - if (ret) - goto err; - - ret = ec_stripe_mem_alloc(trans, &iter); - if (ret) { - bch2_stripe_close(c, s); - goto err; - } - - s->new_stripe.key.k.p = iter.pos; -out: - bch2_trans_iter_exit(&iter); + bch2_disk_reservation_put(c, &s->res); return ret; -err: - bch2_disk_reservation_put(c, &s->res); - goto out; } struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, @@ -2146,17 +2113,13 @@ static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, s return bch_err_throw(c, invalidate_stripe_to_dev); } - struct btree_iter iter; - struct bkey_s_c_stripe s = - bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe), - BTREE_ITER_slots, stripe); + CLASS(btree_iter, iter)(trans, BTREE_ID_stripes, POS(0, a->stripe), 0); + struct bkey_s_c_stripe s = bch2_bkey_get_typed(&iter, stripe); int ret = bkey_err(s); if (ret) return ret; - ret = bch2_invalidate_stripe_to_dev(trans, &iter, s.s_c, k_a.k->p.inode, flags); - bch2_trans_iter_exit(&iter); - return ret; + return bch2_invalidate_stripe_to_dev(trans, &iter, s.s_c, k_a.k->p.inode, flags); } int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx, unsigned flags) diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c index 0c1f6f2ec02c..c4b0ea1adaa8 100644 --- a/fs/bcachefs/extent_update.c +++ b/fs/bcachefs/extent_update.c @@ -68,7 +68,6 @@ static int count_iters_for_insert(struct btree_trans *trans, u64 idx = REFLINK_P_IDX(p.v); unsigned sectors = bpos_min(*end, p.k->p).offset - bkey_start_offset(p.k); - struct btree_iter iter; struct bkey_s_c r_k; for_each_btree_key_norestart(trans, iter, @@ -88,11 +87,9 @@ static int count_iters_for_insert(struct btree_trans *trans, r_k.k->p.offset - idx); *end = bpos_min(*end, pos); - ret = 1; - break; + return 1; } } - bch2_trans_iter_exit(&iter); break; } diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c index b5b3a92cee00..0005569ecace 100644 --- a/fs/bcachefs/fs-io-buffered.c +++ b/fs/bcachefs/fs-io-buffered.c @@ -157,7 +157,6 @@ static void bchfs_read(struct btree_trans *trans, struct readpages_iter *readpages_iter) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_buf sk; int flags = BCH_READ_retry_if_stale| BCH_READ_may_promote; @@ -167,7 +166,7 @@ static void bchfs_read(struct btree_trans *trans, bch2_bkey_buf_init(&sk); bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, rbio->bio.bi_iter.bi_sector), BTREE_ITER_slots); while (1) { @@ -251,7 +250,6 @@ err: !bch2_err_matches(ret, BCH_ERR_transaction_restart)) break; } - bch2_trans_iter_exit(&iter); if (ret) { CLASS(printbuf, buf)(); diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index 8d5b2468f4cd..79823234160f 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -253,11 +253,9 @@ static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum, unsigned nr_replicas, bool compressed) { CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bkey_s_c k; u64 end = offset + size; u32 snapshot; - bool ret = true; int err; retry: bch2_trans_begin(trans); @@ -269,24 +267,21 @@ retry: for_each_btree_key_norestart(trans, iter, BTREE_ID_extents, SPOS(inum.inum, offset, snapshot), BTREE_ITER_slots, k, err) { + offset = iter.pos.offset; + if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end))) break; if (k.k->p.snapshot != snapshot || nr_replicas > bch2_bkey_replicas(c, k) || - (!compressed && bch2_bkey_sectors_compressed(k))) { - ret = false; - break; - } + (!compressed && bch2_bkey_sectors_compressed(k))) + return false; } - - offset = iter.pos.offset; - bch2_trans_iter_exit(&iter); err: if (bch2_err_matches(err, BCH_ERR_transaction_restart)) goto retry; - return err ? false : ret; + return !err; } static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio) diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 92fe1de6e4a9..de0d965f3fde 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -626,15 +626,14 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, u64 start_sector, u64 end_sector) { struct bch_fs *c = inode->v.i_sb->s_fs_info; - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bpos end_pos = POS(inode->v.i_ino, end_sector); struct bch_io_opts opts; int ret = 0; bch2_inode_opts_get(&opts, c, &inode->ei_inode); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inode->v.i_ino, start_sector), BTREE_ITER_slots|BTREE_ITER_intent); @@ -747,7 +746,6 @@ bkey_err: bch2_quota_reservation_put(c, inode, "a_res); } - bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index bf75eed72e2d..3b289f696612 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1397,21 +1397,20 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, if (ret) return ret; - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - SPOS(inode->ei_inum.inum, start, snapshot), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + SPOS(inode->ei_inum.inum, start, snapshot), 0); struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(inode->ei_inum.inum, end)); ret = bkey_err(k); if (ret) - goto err; + return ret; u64 pagecache_end = k.k ? max(start, bkey_start_offset(k.k)) : end; ret = bch2_next_fiemap_pagecache_extent(trans, inode, start, pagecache_end, cur); if (ret) - goto err; + return ret; struct bpos pagecache_start = bkey_start_pos(&cur->kbuf.k->k); @@ -1447,7 +1446,7 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, &cur->kbuf); if (ret) - goto err; + return ret; struct bkey_i *k = cur->kbuf.k; sectors = min_t(unsigned, sectors, k->k.size - offset_into_extent); @@ -1459,9 +1458,8 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, k->k.p = iter.pos; k->k.p.offset += k->k.size; } -err: - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, @@ -1948,8 +1946,6 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child struct bch_inode_info *inode = to_bch_ei(child->d_inode); struct bch_inode_info *dir = to_bch_ei(parent->d_inode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_iter iter1; - struct btree_iter iter2; struct bkey_s_c k; struct bkey_s_c_dirent d; struct bch_inode_unpacked inode_u; @@ -1963,10 +1959,10 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child return -EINVAL; CLASS(btree_trans, trans)(c); - bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, - POS(dir->ei_inode.bi_inum, 0), 0); - bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, - POS(dir->ei_inode.bi_inum, 0), 0); + CLASS(btree_iter, iter1)(trans, BTREE_ID_dirents, + POS(dir->ei_inode.bi_inum, 0), 0); + CLASS(btree_iter, iter2)(trans, BTREE_ID_dirents, + POS(dir->ei_inode.bi_inum, 0), 0); retry: bch2_trans_begin(trans); @@ -2039,8 +2035,6 @@ err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(&iter1); - bch2_trans_iter_exit(&iter2); return ret; } diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index de87a0e820bd..6ccea09243ab 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -137,7 +137,6 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans, static int find_snapshot_tree_subvol(struct btree_trans *trans, u32 tree_id, u32 *subvol) { - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -151,13 +150,11 @@ static int find_snapshot_tree_subvol(struct btree_trans *trans, if (s.v->subvol) { *subvol = le32_to_cpu(s.v->subvol); - goto found; + return 0; } } - ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol); -found: - bch2_trans_iter_exit(&iter); - return ret; + + return ret ?: bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol); } /* Get lost+found, create if it doesn't exist: */ @@ -193,8 +190,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, return ret; if (!subvol.inode) { - struct btree_iter iter; - struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter, + struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, subvolid), 0, subvolume); ret = PTR_ERR_OR_ZERO(subvol); @@ -202,7 +198,6 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, return ret; subvol->v.inode = cpu_to_le64(reattaching_inum); - bch2_trans_iter_exit(&iter); } subvol_inum root_inum = { @@ -333,11 +328,11 @@ static inline bool inode_should_reattach(struct bch_inode_unpacked *inode) static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents, - SPOS(d_pos.inode, d_pos.offset, snapshot), - BTREE_ITER_intent| - BTREE_ITER_with_updates); + CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, + SPOS(d_pos.inode, d_pos.offset, snapshot), + BTREE_ITER_intent| + BTREE_ITER_with_updates); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; @@ -350,16 +345,15 @@ static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); ret = PTR_ERR_OR_ZERO(k); if (ret) - goto err; + return ret; bkey_init(&k->k); k->k.type = KEY_TYPE_whiteout; k->k.p = iter.pos; - ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); + return bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); } -err: - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) @@ -373,9 +367,8 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * if (inode->bi_subvol) { inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL; - struct btree_iter subvol_iter; struct bkey_i_subvolume *subvol = - bch2_bkey_get_mut_typed(trans, &subvol_iter, + bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, inode->bi_subvol), 0, subvolume); ret = PTR_ERR_OR_ZERO(subvol); @@ -383,7 +376,6 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * return ret; subvol->v.fs_path_parent = BCACHEFS_ROOT_SUBVOL; - bch2_trans_iter_exit(&subvol_iter); u64 root_inum; ret = subvol_lookup(trans, inode->bi_parent_subvol, @@ -455,7 +447,6 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * */ if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { CLASS(snapshot_id_list, whiteouts_done)(); - struct btree_iter iter; struct bkey_s_c k; darray_init(&whiteouts_done); @@ -474,19 +465,16 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * struct bch_inode_unpacked child_inode; ret = bch2_inode_unpack(k, &child_inode); if (ret) - break; + return ret; if (!inode_should_reattach(&child_inode)) { - ret = maybe_delete_dirent(trans, - SPOS(lostfound.bi_inum, inode->bi_dir_offset, - dirent_snapshot), - k.k->p.snapshot); + ret = maybe_delete_dirent(trans, + SPOS(lostfound.bi_inum, inode->bi_dir_offset, + dirent_snapshot), + k.k->p.snapshot) ?: + snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); if (ret) - break; - - ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); - if (ret) - break; + return ret; } else { iter.snapshot = k.k->p.snapshot; child_inode.bi_dir = inode->bi_dir; @@ -495,10 +483,9 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * ret = bch2_inode_write_flags(trans, &iter, &child_inode, BTREE_UPDATE_internal_snapshot_node); if (ret) - break; + return ret; } } - bch2_trans_iter_exit(&iter); } return ret; @@ -508,7 +495,11 @@ static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans, struct btree_iter *iter, struct bpos pos) { - return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent); + bch2_trans_iter_init(trans, iter, BTREE_ID_dirents, pos, 0); + struct bkey_s_c_dirent d = bch2_bkey_get_typed(iter, dirent); + if (bkey_err(d.s_c)) + bch2_trans_iter_exit(iter); + return d; } static int remove_backpointer(struct btree_trans *trans, @@ -607,8 +598,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub if (ret) return ret; - struct btree_iter iter; - struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter, + struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, snapshotid), 0, snapshot); ret = PTR_ERR_OR_ZERO(s); @@ -620,9 +610,8 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub s->v.subvol = cpu_to_le32(subvolid); SET_BCH_SNAPSHOT_SUBVOL(&s->v, true); - bch2_trans_iter_exit(&iter); - struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter, + struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshot_trees, POS(0, snapshot_tree), 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(st); @@ -632,8 +621,6 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub if (!st->v.master_subvol) st->v.master_subvol = cpu_to_le32(subvolid); - - bch2_trans_iter_exit(&iter); return 0; } @@ -645,11 +632,8 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 switch (btree) { case BTREE_ID_extents: { - struct btree_iter iter = {}; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0)); - bch2_trans_iter_exit(&iter); int ret = bkey_err(k); if (ret) return ret; @@ -847,7 +831,6 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, struct inode_walker *w, u64 inum) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -867,7 +850,6 @@ static int get_inodes_all_snapshots(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -883,7 +865,6 @@ static int get_visible_inodes(struct btree_trans *trans, u64 inum) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -907,7 +888,6 @@ static int get_visible_inodes(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(&iter); return ret; } @@ -943,9 +923,10 @@ lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, str bkey_init(&whiteout.k); whiteout.k.type = KEY_TYPE_whiteout; whiteout.k.p = SPOS(0, i->inode.bi_inum, k.k->p.snapshot); - ret = bch2_btree_insert_nonextent(trans, BTREE_ID_inodes, - &whiteout, - BTREE_UPDATE_internal_snapshot_node); + ret = bch2_btree_insert_trans(trans, BTREE_ID_inodes, + &whiteout, + BTREE_ITER_cached| + BTREE_UPDATE_internal_snapshot_node); } if (ret) @@ -1046,11 +1027,9 @@ static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0); - int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set; - bch2_trans_iter_exit(&iter); - return ret; + CLASS(btree_iter, iter)(trans, BTREE_ID_deleted_inodes, p, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + return bkey_err(k) ?: k.k->type == KEY_TYPE_set; } static int check_inode_dirent_inode(struct btree_trans *trans, @@ -1348,7 +1327,6 @@ static int find_oldest_inode_needs_reattach(struct btree_trans *trans, struct bch_inode_unpacked *inode) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -1380,7 +1358,6 @@ static int find_oldest_inode_needs_reattach(struct btree_trans *trans, *inode = parent_inode; } - bch2_trans_iter_exit(&iter); return ret; } @@ -1463,13 +1440,12 @@ static int check_key_has_inode(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter iter2 = {}; int ret = PTR_ERR_OR_ZERO(i); if (ret) return ret; if (k.k->type == KEY_TYPE_whiteout) - goto out; + return 0; bool have_inode = i && !i->whiteout; @@ -1477,7 +1453,7 @@ static int check_key_has_inode(struct btree_trans *trans, goto reconstruct; if (have_inode && btree_matches_i_mode(iter->btree_id, i->inode.bi_mode)) - goto out; + return 0; prt_printf(&buf, ", "); @@ -1557,7 +1533,6 @@ static int check_key_has_inode(struct btree_trans *trans, out: err: fsck_err: - bch2_trans_iter_exit(&iter2); bch_err_fn(c, ret); return ret; delete: @@ -1583,7 +1558,6 @@ static int maybe_reconstruct_inum_btree(struct btree_trans *trans, u64 inum, u32 snapshot, enum btree_id btree) { - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -1594,7 +1568,6 @@ static int maybe_reconstruct_inum_btree(struct btree_trans *trans, ret = 1; break; } - bch2_trans_iter_exit(&iter); if (ret <= 0) return ret; @@ -1740,15 +1713,15 @@ static int overlapping_extents_found(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter iter1, iter2 = {}; + struct btree_iter iter2 = {}; struct bkey_s_c k1, k2; int ret; BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2))); - bch2_trans_iter_init(trans, &iter1, btree, pos1, - BTREE_ITER_all_snapshots| - BTREE_ITER_not_extents); + CLASS(btree_iter, iter1)(trans, btree, pos1, + BTREE_ITER_all_snapshots| + BTREE_ITER_not_extents); k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX)); ret = bkey_err(k1); if (ret) @@ -1844,7 +1817,6 @@ static int overlapping_extents_found(struct btree_trans *trans, fsck_err: err: bch2_trans_iter_exit(&iter2); - bch2_trans_iter_exit(&iter1); return ret; } @@ -1910,6 +1882,7 @@ static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *it return 0; } +noinline_for_stack static int check_extent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct inode_walker *inode, @@ -2126,7 +2099,6 @@ static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_wa /* find a subvolume that's a descendent of @snapshot: */ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid) { - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -2138,14 +2110,11 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) { bch2_trans_iter_exit(&iter); *subvolid = k.k->p.offset; - goto found; + return 0; } } - if (!ret) - ret = -ENOENT; -found: - bch2_trans_iter_exit(&iter); - return ret; + + return ret ?: -ENOENT; } noinline_for_stack @@ -2205,15 +2174,13 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent); ret = PTR_ERR_OR_ZERO(new_dirent); if (ret) - goto err; + return ret; new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol); } - struct bkey_s_c_subvolume s = - bch2_bkey_get_iter_typed(trans, &subvol_iter, - BTREE_ID_subvolumes, POS(0, target_subvol), - 0, subvolume); + bch2_trans_iter_init(trans, &subvol_iter, BTREE_ID_subvolumes, POS(0, target_subvol), 0); + struct bkey_s_c_subvolume s = bch2_bkey_get_typed(&subvol_iter, subvolume); ret = bkey_err(s.s_c); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; @@ -2424,8 +2391,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - struct btree_iter delete_iter; - bch2_trans_iter_init(trans, &delete_iter, + CLASS(btree_iter, delete_iter)(trans, BTREE_ID_dirents, SPOS(k.k->p.inode, k.k->p.offset, *i), BTREE_ITER_intent); @@ -2434,7 +2400,6 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, hash_info, &delete_iter, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(&delete_iter); if (ret) return ret; @@ -2628,7 +2593,6 @@ int bch2_check_root(struct bch_fs *c) static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct btree_iter parent_iter = {}; CLASS(darray_u32, subvol_path)(); CLASS(printbuf, buf)(); int ret = 0; @@ -2636,6 +2600,8 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, if (k.k->type != KEY_TYPE_subvolume) return 0; + CLASS(btree_iter, parent_iter)(trans, BTREE_ID_subvolumes, POS_MIN, 0); + subvol_inum start = { .subvol = k.k->p.offset, .inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode), @@ -2644,7 +2610,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) { ret = darray_push(&subvol_path, k.k->p.offset); if (ret) - goto err; + return ret; struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); @@ -2663,20 +2629,18 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, ret = bch2_inum_to_path(trans, start, &buf); if (ret) - goto err; + return ret; if (fsck_err(trans, subvol_loop, "%s", buf.buf)) ret = reattach_subvol(trans, s); break; } - bch2_trans_iter_exit(&parent_iter); - bch2_trans_iter_init(trans, &parent_iter, - BTREE_ID_subvolumes, POS(0, parent), 0); + bch2_btree_iter_set_pos(&parent_iter, POS(0, parent)); k = bch2_btree_iter_peek_slot(&parent_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, trans, subvol_unreachable, @@ -2684,13 +2648,10 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = reattach_subvol(trans, s); - break; + return reattach_subvol(trans, s); } } fsck_err: -err: - bch2_trans_iter_exit(&parent_iter); return ret; } @@ -2713,25 +2674,23 @@ static int bch2_bi_depth_renumber_one(struct btree_trans *trans, u64 inum, u32 snapshot, u32 new_depth) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inum, snapshot), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, inum, snapshot), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); struct bch_inode_unpacked inode; int ret = bkey_err(k) ?: !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode : bch2_inode_unpack(k, &inode); if (ret) - goto err; + return ret; if (inode.bi_depth != new_depth) { inode.bi_depth = new_depth; - ret = __bch2_fsck_write_inode(trans, &inode) ?: - bch2_trans_commit(trans, NULL, NULL, 0); + return __bch2_fsck_write_inode(trans, &inode) ?: + bch2_trans_commit(trans, NULL, NULL, 0); } -err: - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path, @@ -2756,7 +2715,6 @@ static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path, static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) { struct bch_fs *c = trans->c; - struct btree_iter inode_iter = {}; CLASS(darray_u64, path)(); CLASS(printbuf, buf)(); u32 snapshot = inode_k.k->p.snapshot; @@ -2771,6 +2729,8 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) if (ret) return ret; + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, POS_MIN, 0); + /* * If we're running full fsck, check_dirents() will have already ran, * and we shouldn't see any missing backpointers here - otherwise that's @@ -2804,9 +2764,8 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) if (ret) return ret; - bch2_trans_iter_exit(&inode_iter); - inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, - SPOS(0, inode.bi_dir, snapshot), 0); + bch2_btree_iter_set_pos(&inode_iter, SPOS(0, inode.bi_dir, snapshot)); + inode_k = bch2_btree_iter_peek_slot(&inode_iter); struct bch_inode_unpacked parent_inode; ret = bkey_err(inode_k) ?: @@ -2863,7 +2822,6 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) ret = bch2_bi_depth_renumber(trans, &path, snapshot, min_bi_depth); out: fsck_err: - bch2_trans_iter_exit(&inode_iter); bch_err_fn(c, ret); return ret; } diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 3f983a5b780c..d5e5190f0663 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -345,12 +345,12 @@ int __bch2_inode_peek(struct btree_trans *trans, if (ret) return ret; - struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes, - SPOS(0, inum.inum, snapshot), - flags|BTREE_ITER_cached); + bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, SPOS(0, inum.inum, snapshot), + flags|BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); ret = bkey_err(k); if (ret) - return ret; + goto err; ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode; if (ret) @@ -373,19 +373,15 @@ int bch2_inode_find_by_inum_snapshot(struct btree_trans *trans, struct bch_inode_unpacked *inode, unsigned flags) { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inode_nr, snapshot), flags); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, inode_nr, snapshot), flags); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; - ret = bkey_is_inode(k.k) + return bkey_is_inode(k.k) ? bch2_inode_unpack(k, inode) : -BCH_ERR_ENOENT_inode; -err: - bch2_trans_iter_exit(&iter); - return ret; } int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, @@ -424,7 +420,6 @@ int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, struct bch_inode_unpacked *root) { - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -433,15 +428,11 @@ int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, BTREE_ITER_all_snapshots, k, ret) { if (k.k->p.offset != inum) break; - if (bkey_is_inode(k.k)) { - ret = bch2_inode_unpack(k, root); - goto out; - } + if (bkey_is_inode(k.k)) + return bch2_inode_unpack(k, root); } /* We're only called when we know we have an inode for @inum */ BUG_ON(!ret); -out: - bch2_trans_iter_exit(&iter); return ret; } @@ -472,9 +463,10 @@ int __bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked bch2_inode_pack(inode_p, inode); inode_p->inode.k.p.snapshot = inode->bi_snapshot; - return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes, - &inode_p->inode.k_i, - BTREE_UPDATE_internal_snapshot_node); + return bch2_btree_insert_trans(trans, BTREE_ID_inodes, + &inode_p->inode.k_i, + BTREE_ITER_cached| + BTREE_UPDATE_internal_snapshot_node); } int bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) @@ -696,10 +688,11 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter struct bkey_s_c k; int ret = 0; - for_each_btree_key_max_norestart(trans, *iter, btree, - bpos_successor(pos), - SPOS(pos.inode, pos.offset, U32_MAX), - flags|BTREE_ITER_all_snapshots, k, ret) + bch2_trans_iter_init(trans, iter, btree, bpos_successor(pos), + flags|BTREE_ITER_all_snapshots); + + for_each_btree_key_max_continue_norestart(*iter, SPOS(pos.inode, pos.offset, U32_MAX), + flags|BTREE_ITER_all_snapshots, k, ret) if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot)) return k; @@ -727,7 +720,6 @@ again: int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -740,7 +732,6 @@ int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) ret = 1; break; } - bch2_trans_iter_exit(&iter); return ret; } @@ -961,11 +952,10 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m cursor_idx &= ~(~0ULL << c->opts.shard_inode_numbers_bits); - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_logged_ops, - POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_logged_ops, + POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), + BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ERR_PTR(ret); @@ -974,9 +964,8 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m k.k->type == KEY_TYPE_inode_alloc_cursor ? bch2_bkey_make_mut_typed(trans, &iter, &k, 0, inode_alloc_cursor) : bch2_bkey_alloc(trans, &iter, 0, inode_alloc_cursor); - ret = PTR_ERR_OR_ZERO(cursor); - if (ret) - goto err; + if (IS_ERR(cursor)) + return cursor; if (c->opts.inodes_32bit) { *min = BLOCKDEV_INODE_MAX; @@ -997,9 +986,8 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m cursor->v.idx = cpu_to_le64(*min); le32_add_cpu(&cursor->v.gen, 1); } -err: - bch2_trans_iter_exit(&iter); - return ret ? ERR_PTR(ret) : cursor; + + return cursor; } /* @@ -1080,7 +1068,6 @@ found_slot: static int bch2_inode_delete_keys(struct btree_trans *trans, subvol_inum inum, enum btree_id id) { - struct btree_iter iter; struct bkey_s_c k; struct bkey_i delete; struct bpos end = POS(inum.inum, U64_MAX); @@ -1091,8 +1078,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans, * We're never going to be deleting partial extents, no need to use an * extent iterator: */ - bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, id, POS(inum.inum, 0), BTREE_ITER_intent); while (1) { bch2_trans_begin(trans); @@ -1127,7 +1113,6 @@ err: break; } - bch2_trans_iter_exit(&iter); return ret; } @@ -1306,9 +1291,6 @@ static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum { struct bch_fs *c = trans->c; struct btree_iter iter = { NULL }; - struct bkey_i_inode_generation delete; - struct bch_inode_unpacked inode_u; - struct bkey_s_c k; int ret; do { @@ -1330,8 +1312,8 @@ static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum retry: bch2_trans_begin(trans); - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inum, snapshot), BTREE_ITER_intent); + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, inum, snapshot), BTREE_ITER_intent); ret = bkey_err(k); if (ret) goto err; @@ -1344,12 +1326,14 @@ retry: goto err; } + struct bch_inode_unpacked inode_u; bch2_inode_unpack(k, &inode_u); /* Subvolume root? */ if (inode_u.bi_subvol) bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum); + struct bkey_i_inode_generation delete; bkey_inode_generation_init(&delete.k_i); delete.k.p = iter.pos; delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); @@ -1409,12 +1393,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, bool from_deleted_inodes) { struct bch_fs *c = trans->c; - struct btree_iter inode_iter; - struct bkey_s_c k; CLASS(printbuf, buf)(); int ret; - k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached); + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, pos, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&inode_iter); ret = bkey_err(k); if (ret) return ret; @@ -1426,11 +1409,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = bch2_inode_unpack(k, inode); if (ret) - goto out; + return ret; if (S_ISDIR(inode->bi_mode)) { ret = bch2_empty_dir_snapshot(trans, pos.offset, 0, pos.snapshot); @@ -1441,7 +1424,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; } ret = inode->bi_flags & BCH_INODE_unlinked ? 0 : bch_err_throw(c, inode_not_unlinked); @@ -1451,7 +1434,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = !(inode->bi_flags & BCH_INODE_has_child_snapshot) ? 0 : bch_err_throw(c, inode_has_child_snapshot); @@ -1462,11 +1445,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, pos.offset, pos.snapshot)) goto delete; if (ret) - goto out; + return ret; ret = bch2_inode_has_child_snapshots(trans, k.k->p); if (ret < 0) - goto out; + return ret; if (ret) { if (fsck_err(trans, inode_has_child_snapshots_wrong, @@ -1477,13 +1460,12 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, inode->bi_flags |= BCH_INODE_has_child_snapshot; ret = __bch2_fsck_write_inode(trans, inode); if (ret) - goto out; + return ret; } if (!from_deleted_inodes) { - ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: bch_err_throw(c, inode_has_child_snapshot); - goto out; } goto delete; @@ -1494,20 +1476,15 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, if (test_bit(BCH_FS_clean_recovery, &c->flags) && !fsck_err(trans, deleted_inode_but_clean, "filesystem marked as clean but have deleted inode %llu:%u", - pos.offset, pos.snapshot)) { - ret = 0; - goto out; - } + pos.offset, pos.snapshot)) + return 0; ret = 1; } -out: fsck_err: - bch2_trans_iter_exit(&inode_iter); return ret; delete: - ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); - goto out; + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); } static int may_delete_deleted_inum(struct btree_trans *trans, subvol_inum inum, diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index d3496eb8d682..fa0b06e17d17 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -222,16 +222,11 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end, s64 *i_sectors_delta) { CLASS(btree_trans, trans)(c); - - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - POS(inum.inum, start), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, start), + BTREE_ITER_intent); int ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); - bch2_trans_iter_exit(&iter); - return bch2_err_matches(ret, BCH_ERR_transaction_restart) ? 0 : ret; } @@ -268,7 +263,6 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans, u64 *i_sectors_delta) { struct bch_fs *c = trans->c; - struct btree_iter fpunch_iter; struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k); subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) }; u64 new_i_size = le64_to_cpu(op->v.new_i_size); @@ -280,14 +274,15 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans, if (ret) goto err; - bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents, - POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9), - BTREE_ITER_intent); - ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta); - bch2_trans_iter_exit(&fpunch_iter); + { + CLASS(btree_iter, fpunch_iter)(trans, BTREE_ID_extents, + POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9), + BTREE_ITER_intent); + ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - ret = 0; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + ret = 0; + } err: if (warn_errors) bch_err_fn(c, ret); diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index 571b1b9c0fa1..c4f0f9d8f959 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -559,15 +559,14 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re if (flags & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) return 0; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, bkey_start_pos(read_k.k), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, btree, bkey_start_pos(read_k.k), BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; if (!bkey_and_val_eq(k, read_k)) - goto out; + return 0; struct bkey_i *new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_extent_flags)); @@ -576,17 +575,17 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re bch2_bkey_extent_flags_set(c, new, flags|BIT_ULL(BCH_EXTENT_FLAG_poisoned)) ?: bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node) ?: bch2_trans_commit(trans, NULL, NULL, 0); + if (ret) + return ret; /* * Propagate key change back to data update path, in particular so it * knows the extent has been poisoned and it's safe to change the * checksum */ - if (u && !ret) + if (u) bch2_bkey_buf_copy(&u->k, c, new); -out: - bch2_trans_iter_exit(&iter); - return ret; + return 0; } static noinline int bch2_read_retry_nodecode(struct btree_trans *trans, @@ -755,56 +754,48 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, { struct bch_fs *c = rbio->c; u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset; - struct bch_extent_crc_unpacked new_crc; - struct btree_iter iter; - struct bkey_i *new; - struct bkey_s_c k; int ret = 0; if (crc_is_compressed(rbio->pick.crc)) return 0; - k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, rbio->data_btree, rbio->data_pos, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); if ((ret = bkey_err(k))) - goto out; + return ret; if (bversion_cmp(k.k->bversion, rbio->version) || !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) - goto out; + return 0; /* Extent was merged? */ if (bkey_start_offset(k.k) < data_offset || k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) - goto out; + return 0; + struct bch_extent_crc_unpacked new_crc; if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version, rbio->pick.crc, NULL, &new_crc, bkey_start_offset(k.k) - data_offset, k.k->size, rbio->pick.crc.csum_type)) { bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)"); - ret = 0; - goto out; + return 0; } /* * going to be temporarily appending another checksum entry: */ - new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + - sizeof(struct bch_extent_crc128)); + struct bkey_i *new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + + sizeof(struct bch_extent_crc128)); if ((ret = PTR_ERR_OR_ZERO(new))) - goto out; + return ret; bkey_reassemble(new, k); if (!bch2_bkey_narrow_crcs(new, new_crc)) - goto out; + return 0; - ret = bch2_trans_update(trans, &iter, new, - BTREE_UPDATE_internal_snapshot_node); -out: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node); } static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) @@ -1030,13 +1021,10 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, struct bch_extent_ptr ptr) { struct bch_fs *c = trans->c; - struct btree_iter iter; CLASS(printbuf, buf)(); - int ret; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - PTR_BUCKET_POS(ca, &ptr), - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, + PTR_BUCKET_POS(ca, &ptr), + BTREE_ITER_cached); int gen = bucket_gen_get(ca, iter.pos.offset); if (gen >= 0) { @@ -1048,7 +1036,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, prt_printf(&buf, "memory gen: %u", gen); - ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); + int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); if (!ret) { prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, k); @@ -1066,8 +1054,6 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, } bch2_fs_inconsistent(c, "%s", buf.buf); - - bch2_trans_iter_exit(&iter); } int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, @@ -1411,7 +1397,6 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, unsigned flags) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_buf sk; struct bkey_s_c k; enum btree_id data_btree; @@ -1420,9 +1405,9 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, EBUG_ON(rbio->data_update); bch2_bkey_buf_init(&sk); - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - POS(inum.inum, bvec_iter.bi_sector), - BTREE_ITER_slots); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + POS(inum.inum, bvec_iter.bi_sector), + BTREE_ITER_slots); while (1) { data_btree = BTREE_ID_extents; @@ -1514,7 +1499,6 @@ err: bch2_rbio_done(rbio); } - bch2_trans_iter_exit(&iter); bch2_bkey_buf_exit(&sk, c); return ret; } diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 6b9f8b5e55dc..1d83dcc9731e 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -220,13 +220,13 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, */ unsigned inode_update_flags = BTREE_UPDATE_nojournal; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, - extent_iter->pos.inode, - extent_iter->snapshot), - BTREE_ITER_intent| - BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, + SPOS(0, + extent_iter->pos.inode, + extent_iter->snapshot), + BTREE_ITER_intent| + BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (unlikely(ret)) return ret; @@ -238,7 +238,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8); ret = PTR_ERR_OR_ZERO(k_mut); if (unlikely(ret)) - goto err; + return ret; bkey_reassemble(k_mut, k); @@ -246,7 +246,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, k_mut = bch2_inode_to_v3(trans, k_mut); ret = PTR_ERR_OR_ZERO(k_mut); if (unlikely(ret)) - goto err; + return ret; } struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut); @@ -291,12 +291,9 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, inode_update_flags = 0; } - ret = bch2_trans_update(trans, &iter, &inode->k_i, - BTREE_UPDATE_internal_snapshot_node| - inode_update_flags); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_trans_update(trans, &iter, &inode->k_i, + BTREE_UPDATE_internal_snapshot_node| + inode_update_flags); } int bch2_extent_update(struct btree_trans *trans, @@ -374,7 +371,6 @@ static int bch2_write_index_default(struct bch_write_op *op) struct bkey_buf sk; struct keylist *keys = &op->insert_keys; struct bkey_i *k = bch2_keylist_front(keys); - struct btree_iter iter; subvol_inum inum = { .subvol = op->subvol, .inum = k->k.p.inode, @@ -399,15 +395,14 @@ static int bch2_write_index_default(struct bch_write_op *op) if (ret) break; - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - bkey_start_pos(&sk.k->k), - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, + bkey_start_pos(&sk.k->k), + BTREE_ITER_slots|BTREE_ITER_intent); ret = bch2_extent_update(trans, inum, &iter, sk.k, &op->res, op->new_i_size, &op->i_sectors_delta, op->flags & BCH_WRITE_check_enospc); - bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; diff --git a/fs/bcachefs/logged_ops.h b/fs/bcachefs/logged_ops.h index 30ae9ef737dd..6dea6e2ac7a8 100644 --- a/fs/bcachefs/logged_ops.h +++ b/fs/bcachefs/logged_ops.h @@ -10,7 +10,7 @@ static inline int bch2_logged_op_update(struct btree_trans *trans, struct bkey_i *op) { - return bch2_btree_insert_nonextent(trans, BTREE_ID_logged_ops, op, 0); + return bch2_btree_insert_trans(trans, BTREE_ID_logged_ops, op, BTREE_ITER_cached); } int bch2_resume_logged_ops(struct bch_fs *); diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c index 39ae70e5c81b..b9c0834498dd 100644 --- a/fs/bcachefs/lru.c +++ b/fs/bcachefs/lru.c @@ -88,10 +88,8 @@ int bch2_lru_check_set(struct btree_trans *trans, { struct bch_fs *c = trans->c; CLASS(printbuf, buf)(); - struct btree_iter lru_iter; - struct bkey_s_c lru_k = - bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, - lru_pos(lru_id, dev_bucket, time), 0); + CLASS(btree_iter, lru_iter)(trans, BTREE_ID_lru, lru_pos(lru_id, dev_bucket, time), 0); + struct bkey_s_c lru_k = bch2_btree_iter_peek_slot(&lru_iter); int ret = bkey_err(lru_k); if (ret) return ret; @@ -99,7 +97,7 @@ int bch2_lru_check_set(struct btree_trans *trans, if (lru_k.k->type != KEY_TYPE_set) { ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed); if (ret) - goto err; + return ret; if (fsck_err(trans, alloc_key_to_missing_lru_entry, "missing %s lru entry\n%s", @@ -107,12 +105,10 @@ int bch2_lru_check_set(struct btree_trans *trans, (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) { ret = bch2_lru_set(trans, lru_id, dev_bucket, time); if (ret) - goto err; + return ret; } } -err: fsck_err: - bch2_trans_iter_exit(&lru_iter); return ret; } @@ -171,11 +167,11 @@ static int bch2_check_lru_key(struct btree_trans *trans, struct bbpos bp = lru_pos_to_bp(lru_k); - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, bp.btree, bp.pos, 0); + CLASS(btree_iter, iter)(trans, bp.btree, bp.pos, 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; enum bch_lru_type type = lru_type(lru_k); u64 idx = bkey_lru_type_idx(c, type, k); @@ -183,7 +179,7 @@ static int bch2_check_lru_key(struct btree_trans *trans, if (lru_pos_time(lru_k.k->p) != idx) { ret = bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed); if (ret) - goto err; + return ret; if (fsck_err(trans, lru_entry_bad, "incorrect lru entry: lru %s time %llu\n" @@ -193,11 +189,9 @@ static int bch2_check_lru_key(struct btree_trans *trans, lru_pos_time(lru_k.k->p), (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf), (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) - ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); } -err: fsck_err: - bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index ae9fb58702ba..a38996f5366f 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -511,25 +511,22 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans, *io_opts = bch2_opts_to_inode_opts(c->opts); /* reflink btree? */ - if (!extent_k.k->p.inode) - goto out; - - struct btree_iter inode_iter; - struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, - SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), - BTREE_ITER_cached); - int ret = bkey_err(inode_k); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - return ret; + if (extent_k.k->p.inode) { + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, + SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), + BTREE_ITER_cached); + struct bkey_s_c inode_k = bch2_btree_iter_peek_slot(&inode_iter); + int ret = bkey_err(inode_k); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; - if (!ret && bkey_is_inode(inode_k.k)) { - struct bch_inode_unpacked inode; - bch2_inode_unpack(inode_k, &inode); - bch2_inode_opts_get(io_opts, c, &inode); + if (!ret && bkey_is_inode(inode_k.k)) { + struct bch_inode_unpacked inode; + bch2_inode_unpack(inode_k, &inode); + bch2_inode_opts_get(io_opts, c, &inode); + } } - bch2_trans_iter_exit(&inode_iter); - /* seem to be spinning here? */ -out: + return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k); } @@ -853,7 +850,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, struct bch_fs *c = trans->c; bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_iter iter = {}, bp_iter = {}; + struct btree_iter iter = {}; struct bkey_buf sk; struct bkey_s_c k; struct bkey_buf last_flushed; @@ -878,7 +875,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, */ bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp_start, 0); ret = bch2_btree_write_buffer_tryflush(trans); if (!bch2_err_matches(ret, EROFS)) @@ -996,7 +993,6 @@ next: bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++, copygc, &last_flushed); err: - bch2_trans_iter_exit(&bp_iter); bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&last_flushed, c); return ret; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index f391eceef4f4..b0cbe3c1aab6 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -64,23 +64,22 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset)) return 0; - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - b->k.bucket, BTREE_ITER_cached); + CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, b->k.bucket, BTREE_ITER_cached); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; CLASS(bch2_dev_bucket_tryget, ca)(c, k.k->p); if (!ca) - goto out; + return 0; if (bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b->k.bucket.offset)) - goto out; + return 0; if (ca->mi.state != BCH_MEMBER_STATE_rw || !bch2_dev_is_online(ca)) - goto out; + return 0; struct bch_alloc_v4 _a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); @@ -88,10 +87,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, b->sectors = bch2_bucket_sectors_dirty(*a); u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); - ret = lru_idx && lru_idx <= time; -out: - bch2_trans_iter_exit(&iter); - return ret; + return lru_idx && lru_idx <= time; } static void move_bucket_free(struct buckets_in_flight *list, diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c index cfed2041c2c3..d1019052f182 100644 --- a/fs/bcachefs/namei.c +++ b/fs/bcachefs/namei.c @@ -383,9 +383,8 @@ bool bch2_reinherit_attrs(struct bch_inode_unpacked *dst_u, static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_parent) { - struct btree_iter iter; struct bkey_i_subvolume *s = - bch2_bkey_get_mut_typed(trans, &iter, + bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, subvol), BTREE_ITER_cached, subvolume); int ret = PTR_ERR_OR_ZERO(s); @@ -393,7 +392,6 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p return ret; s->v.fs_path_parent = cpu_to_le32(new_parent); - bch2_trans_iter_exit(&iter); return 0; } @@ -687,10 +685,9 @@ static int __bch2_inum_to_path(struct btree_trans *trans, goto disconnected; } - struct btree_iter d_iter; - struct bkey_s_c_dirent d = bch2_bkey_get_iter_typed(trans, &d_iter, - BTREE_ID_dirents, SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot), - 0, dirent); + CLASS(btree_iter, d_iter)(trans, BTREE_ID_dirents, + SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot), 0); + struct bkey_s_c_dirent d = bch2_bkey_get_typed(&d_iter, dirent); ret = bkey_err(d.s_c); if (ret) goto disconnected; @@ -700,8 +697,6 @@ static int __bch2_inum_to_path(struct btree_trans *trans, prt_bytes_reversed(path, dirent_name.name, dirent_name.len); prt_char(path, '/'); - - bch2_trans_iter_exit(&d_iter); } if (orig_pos == path->pos) @@ -779,10 +774,9 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans, return __bch2_fsck_write_inode(trans, target); } - struct bkey_s_c_dirent bp_dirent = - bch2_bkey_get_iter_typed(trans, &bp_iter, BTREE_ID_dirents, - SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot), - 0, dirent); + bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_dirents, + SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot), 0); + struct bkey_s_c_dirent bp_dirent = bch2_bkey_get_typed(&bp_iter, dirent); ret = bkey_err(bp_dirent); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c index 64a7f5eeeb5c..eaa43ad9baa6 100644 --- a/fs/bcachefs/quota.c +++ b/fs/bcachefs/quota.c @@ -798,10 +798,9 @@ static int bch2_set_quota_trans(struct btree_trans *trans, struct bkey_i_quota *new_quota, struct qc_dqblk *qdq) { - struct btree_iter iter; - struct bkey_s_c k = - bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p, - BTREE_ITER_slots|BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_quotas, new_quota->k.p, + BTREE_ITER_slots|BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (unlikely(ret)) return ret; @@ -819,9 +818,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans, if (qdq->d_fieldmask & QC_INO_HARD) new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit); - ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0); - bch2_trans_iter_exit(&iter); - return ret; + return bch2_trans_update(trans, &iter, &new_quota->k_i, 0); } static int bch2_set_quota(struct super_block *sb, struct kqid qid, diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index f2918804fab5..c0c5fe961a83 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -235,14 +235,13 @@ static const char * const bch2_rebalance_state_strs[] = { int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, - SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work, + SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), + BTREE_ITER_intent); struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) @@ -251,16 +250,13 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) struct bkey_i_cookie *cookie = bch2_trans_kmalloc(trans, sizeof(*cookie)); ret = PTR_ERR_OR_ZERO(cookie); if (ret) - goto err; + return ret; bkey_cookie_init(&cookie->k_i); cookie->k.p = iter.pos; cookie->v.cookie = cpu_to_le64(v + 1); - ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_trans_update(trans, &iter, &cookie->k_i, 0); } int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum) @@ -279,24 +275,21 @@ int bch2_set_fs_needs_rebalance(struct bch_fs *c) static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, - SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), - BTREE_ITER_intent); + CLASS(btree_iter, iter)(trans, BTREE_ID_rebalance_work, + SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), + BTREE_ITER_intent); struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) : 0; - if (v == cookie) - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(&iter); - return ret; + return v == cookie + ? bch2_btree_delete_at(trans, &iter, 0) + : 0; } static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans, @@ -531,7 +524,7 @@ static int do_rebalance(struct moving_context *ctxt) struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; struct bch_fs_rebalance *r = &c->rebalance; - struct btree_iter rebalance_work_iter, extent_iter = { NULL }; + struct btree_iter extent_iter = { NULL }; struct bkey_s_c k; u32 kick = r->kick; int ret = 0; @@ -541,9 +534,9 @@ static int do_rebalance(struct moving_context *ctxt) bch2_move_stats_init(&r->work_stats, "rebalance_work"); bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); - bch2_trans_iter_init(trans, &rebalance_work_iter, - BTREE_ID_rebalance_work, POS_MIN, - BTREE_ITER_all_snapshots); + CLASS(btree_iter, rebalance_work_iter)(trans, + BTREE_ID_rebalance_work, POS_MIN, + BTREE_ITER_all_snapshots); while (!bch2_move_ratelimit(ctxt)) { if (!bch2_rebalance_enabled(c)) { @@ -577,7 +570,6 @@ static int do_rebalance(struct moving_context *ctxt) } bch2_trans_iter_exit(&extent_iter); - bch2_trans_iter_exit(&rebalance_work_iter); bch2_move_stats_exit(&r->scan_stats, c); if (!ret && @@ -845,15 +837,10 @@ fsck_err: int bch2_check_rebalance_work(struct bch_fs *c) { CLASS(btree_trans, trans)(c); - struct btree_iter rebalance_iter, extent_iter; - int ret = 0; - - bch2_trans_iter_init(trans, &extent_iter, - BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch); - bch2_trans_iter_init(trans, &rebalance_iter, - BTREE_ID_rebalance_work, POS_MIN, - BTREE_ITER_prefetch); + CLASS(btree_iter, extent_iter)(trans, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch); + CLASS(btree_iter, rebalance_iter)(trans, BTREE_ID_rebalance_work, POS_MIN, + BTREE_ITER_prefetch); struct bkey_buf last_flushed; bch2_bkey_buf_init(&last_flushed); @@ -862,6 +849,7 @@ int bch2_check_rebalance_work(struct bch_fs *c) struct progress_indicator_state progress; bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_rebalance_work)); + int ret = 0; while (!ret) { progress_update_iter(trans, &progress, &rebalance_iter); @@ -874,7 +862,5 @@ int bch2_check_rebalance_work(struct bch_fs *c) } bch2_bkey_buf_exit(&last_flushed, c); - bch2_trans_iter_exit(&extent_iter); - bch2_trans_iter_exit(&rebalance_iter); return ret < 0 ? ret : 0; } diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index 07cebc697b38..238a362de19e 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -264,32 +264,32 @@ struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *trans, u64 reflink_offset = REFLINK_P_IDX(p.v) + *offset_into_extent; - struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_reflink, - POS(0, reflink_offset), iter_flags); - if (bkey_err(k)) - return k; + bch2_trans_iter_init(trans, iter, BTREE_ID_reflink, POS(0, reflink_offset), iter_flags); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); + int ret = bkey_err(k); + if (ret) + goto err; if (unlikely(!bkey_extent_is_reflink_data(k.k))) { u64 missing_end = min(k.k->p.offset, REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad)); BUG_ON(reflink_offset == missing_end); - int ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset, - missing_end, should_commit); - if (ret) { - bch2_trans_iter_exit(iter); - return bkey_s_c_err(ret); - } + ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset, + missing_end, should_commit); + if (ret) + goto err; } else if (unlikely(REFLINK_P_ERROR(p.v))) { - int ret = bch2_indirect_extent_not_missing(trans, p, should_commit); - if (ret) { - bch2_trans_iter_exit(iter); - return bkey_s_c_err(ret); - } + ret = bch2_indirect_extent_not_missing(trans, p, should_commit); + if (ret) + goto err; } *offset_into_extent = reflink_offset - bkey_start_offset(k.k); return k; +err: + bch2_trans_iter_exit(iter); + return bkey_s_c_err(ret); } /* reflink pointer trigger */ @@ -497,13 +497,12 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (orig->k.type == KEY_TYPE_inline_data) bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data); - struct btree_iter reflink_iter; - bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX, - BTREE_ITER_intent); + CLASS(btree_iter, reflink_iter)(trans, BTREE_ID_reflink, POS_MAX, + BTREE_ITER_intent); struct bkey_s_c k = bch2_btree_iter_peek_prev(&reflink_iter); int ret = bkey_err(k); if (ret) - goto err; + return ret; /* * XXX: we're assuming that 56 bits will be enough for the life of the @@ -516,7 +515,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, struct bkey_i *r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) - goto err; + return ret; bkey_init(&r_v->k); r_v->k.type = bkey_type_to_indirect(&orig->k); @@ -532,7 +531,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, ret = bch2_trans_update(trans, &reflink_iter, r_v, 0); if (ret) - goto err; + return ret; /* * orig is in a bkey_buf which statically allocates 5 64s for the val, @@ -555,12 +554,8 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (reflink_p_may_update_opts_field) SET_REFLINK_P_MAY_UPDATE_OPTIONS(&r_p->v, true); - ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, - BTREE_UPDATE_internal_snapshot_node); -err: - bch2_trans_iter_exit(&reflink_iter); - - return ret; + return bch2_trans_update(trans, extent_iter, &r_p->k_i, + BTREE_UPDATE_internal_snapshot_node); } static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index dd4ee46606d7..5317b1bfe2e5 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -76,6 +76,8 @@ enum bch_fsck_flags { x(btree_node_read_error, 62, FSCK_AUTOFIX) \ x(btree_node_topology_bad_min_key, 63, FSCK_AUTOFIX) \ x(btree_node_topology_bad_max_key, 64, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_root_min_key, 323, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_root_max_key, 324, FSCK_AUTOFIX) \ x(btree_node_topology_overwritten_by_prev_node, 65, FSCK_AUTOFIX) \ x(btree_node_topology_overwritten_by_next_node, 66, FSCK_AUTOFIX) \ x(btree_node_topology_interior_node_empty, 67, FSCK_AUTOFIX) \ @@ -334,7 +336,7 @@ enum bch_fsck_flags { x(dirent_stray_data_after_cf_name, 305, 0) \ x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \ x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \ - x(MAX, 323, 0) + x(MAX, 325, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index dfdc54ffd57d..5a1f81749661 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -431,9 +431,7 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, u32 snapshot_root, u32 *subvol_id) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; - bool found = false; int ret; for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, @@ -446,28 +444,23 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, continue; if (!BCH_SUBVOLUME_SNAP(s.v)) { *subvol_id = s.k->p.offset; - found = true; - break; + return 0; } } - bch2_trans_iter_exit(&iter); - - if (!ret && !found) { - struct bkey_i_subvolume *u; + if (ret) + return ret; - *subvol_id = bch2_snapshot_oldest_subvol(c, snapshot_root, NULL); + *subvol_id = bch2_snapshot_oldest_subvol(c, snapshot_root, NULL); - u = bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_subvolumes, POS(0, *subvol_id), - 0, subvolume); - ret = PTR_ERR_OR_ZERO(u); - if (ret) - return ret; - - SET_BCH_SUBVOLUME_SNAP(&u->v, false); - } + struct bkey_i_subvolume *u = + bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, *subvol_id), + 0, subvolume); + ret = PTR_ERR_OR_ZERO(u); + if (ret) + return ret; - return ret; + SET_BCH_SUBVOLUME_SNAP(&u->v, false); + return 0; } static int check_snapshot_tree(struct btree_trans *trans, @@ -475,27 +468,21 @@ static int check_snapshot_tree(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct bkey_s_c_snapshot_tree st; - struct bch_snapshot s; - struct bch_subvolume subvol; CLASS(printbuf, buf)(); - struct btree_iter snapshot_iter = {}; - u32 root_id; - int ret; if (k.k->type != KEY_TYPE_snapshot_tree) return 0; - st = bkey_s_c_to_snapshot_tree(k); - root_id = le32_to_cpu(st.v->root_snapshot); + struct bkey_s_c_snapshot_tree st = bkey_s_c_to_snapshot_tree(k); + u32 root_id = le32_to_cpu(st.v->root_snapshot); - struct bkey_s_c_snapshot snapshot_k = - bch2_bkey_get_iter_typed(trans, &snapshot_iter, BTREE_ID_snapshots, - POS(0, root_id), 0, snapshot); - ret = bkey_err(snapshot_k); + CLASS(btree_iter, snapshot_iter)(trans, BTREE_ID_snapshots, POS(0, root_id), 0); + struct bkey_s_c_snapshot snapshot_k = bch2_bkey_get_typed(&snapshot_iter, snapshot); + int ret = bkey_err(snapshot_k); if (ret && !bch2_err_matches(ret, ENOENT)) - goto err; + return ret; + struct bch_snapshot s; if (!ret) bkey_val_copy(&s, snapshot_k); @@ -509,17 +496,16 @@ static int check_snapshot_tree(struct btree_trans *trans, ret ? prt_printf(&buf, "(%s)", bch2_err_str(ret)) : bch2_bkey_val_to_text(&buf, c, snapshot_k.s_c), - buf.buf))) { - ret = bch2_btree_delete_at(trans, iter, 0); - goto err; - } + buf.buf))) + return bch2_btree_delete_at(trans, iter, 0); if (!st.v->master_subvol) - goto out; + return 0; + struct bch_subvolume subvol; ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol), false, &subvol); if (ret && !bch2_err_matches(ret, ENOENT)) - goto err; + return ret; if (fsck_err_on(ret, trans, snapshot_tree_to_missing_subvol, @@ -544,26 +530,21 @@ static int check_snapshot_tree(struct btree_trans *trans, ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id); bch_err_fn(c, ret); - if (bch2_err_matches(ret, ENOENT)) { /* nothing to be done here */ - ret = 0; - goto err; - } + if (bch2_err_matches(ret, ENOENT)) /* nothing to be done here */ + return 0; if (ret) - goto err; + return ret; u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(u); if (ret) - goto err; + return ret; u->v.master_subvol = cpu_to_le32(subvol_id); st = snapshot_tree_i_to_s_c(u); } -out: -err: fsck_err: - bch2_trans_iter_exit(&snapshot_iter); return ret; } @@ -641,22 +622,19 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans, struct bch_snapshot *s) { struct bch_fs *c = trans->c; - struct btree_iter root_iter; - struct bch_snapshot_tree s_t; - struct bkey_s_c_snapshot root; struct bkey_i_snapshot *u; - u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id; - int ret; + u32 root_id = bch2_snapshot_root(c, k.k->p.offset); - root = bch2_bkey_get_iter_typed(trans, &root_iter, - BTREE_ID_snapshots, POS(0, root_id), - BTREE_ITER_with_updates, snapshot); - ret = bkey_err(root); + CLASS(btree_iter, root_iter)(trans, BTREE_ID_snapshots, POS(0, root_id), + BTREE_ITER_with_updates); + struct bkey_s_c_snapshot root = bch2_bkey_get_typed(&root_iter, snapshot); + int ret = bkey_err(root); if (ret) - goto err; + return ret; - tree_id = le32_to_cpu(root.v->tree); + u32 tree_id = le32_to_cpu(root.v->tree); + struct bch_snapshot_tree s_t; ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; @@ -668,7 +646,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans, bch2_snapshot_oldest_subvol(c, root_id, NULL), &tree_id); if (ret) - goto err; + return ret; u->v.tree = cpu_to_le32(tree_id); if (k.k->p.offset == root_id) @@ -679,14 +657,13 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans, u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); ret = PTR_ERR_OR_ZERO(u); if (ret) - goto err; + return ret; u->v.tree = cpu_to_le32(tree_id); *s = u->v; } -err: - bch2_trans_iter_exit(&root_iter); - return ret; + + return 0; } static int check_snapshot(struct btree_trans *trans, @@ -855,7 +832,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) struct bch_fs *c = trans->c; /* Do we need to reconstruct the snapshot_tree entry as well? */ - struct btree_iter iter; struct bkey_s_c k; int ret = 0; u32 tree_id = 0; @@ -868,7 +844,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) break; } } - bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -898,7 +873,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) break; } } - bch2_trans_iter_exit(&iter); return bch2_snapshot_table_make_room(c, id) ?: bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0); @@ -1083,7 +1057,6 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans, snapshot_id_list *s) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret = 0; @@ -1100,7 +1073,6 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans, if (ret) break; } - bch2_trans_iter_exit(&iter); if (ret) darray_exit(s); @@ -1112,28 +1084,21 @@ int __bch2_get_snapshot_overwrites(struct btree_trans *trans, */ int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) { - struct btree_iter iter; struct bkey_i_snapshot *s = - bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_snapshots, POS(0, id), - 0, snapshot); + bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, id), 0, snapshot); int ret = PTR_ERR_OR_ZERO(s); - if (unlikely(ret)) { - bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), - trans->c, "missing snapshot %u", id); + bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, "missing snapshot %u", id); + if (unlikely(ret)) return ret; - } /* already deleted? */ if (BCH_SNAPSHOT_WILL_DELETE(&s->v)) - goto err; + return 0; SET_BCH_SNAPSHOT_WILL_DELETE(&s->v, true); SET_BCH_SNAPSHOT_SUBVOL(&s->v, false); s->v.subvol = 0; -err: - bch2_trans_iter_exit(&iter); - return ret; + return 0; } static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s) @@ -1145,22 +1110,17 @@ static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s) static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) { struct bch_fs *c = trans->c; - struct btree_iter iter, p_iter = {}; - struct btree_iter c_iter = {}; - struct btree_iter tree_iter = {}; u32 parent_id, child_id; unsigned i; - int ret = 0; struct bkey_i_snapshot *s = - bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id), - BTREE_ITER_intent, snapshot); - ret = PTR_ERR_OR_ZERO(s); + bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, id), 0, snapshot); + int ret = PTR_ERR_OR_ZERO(s); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, "missing snapshot %u", id); if (ret) - goto err; + return ret; BUG_ON(BCH_SNAPSHOT_DELETED(&s->v)); BUG_ON(s->v.children[1]); @@ -1169,16 +1129,14 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) child_id = le32_to_cpu(s->v.children[0]); if (parent_id) { - struct bkey_i_snapshot *parent; - - parent = bch2_bkey_get_mut_typed(trans, &p_iter, - BTREE_ID_snapshots, POS(0, parent_id), - 0, snapshot); + struct bkey_i_snapshot *parent = + bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, parent_id), + 0, snapshot); ret = PTR_ERR_OR_ZERO(parent); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, "missing snapshot %u", parent_id); if (unlikely(ret)) - goto err; + return ret; /* find entry in parent->children for node being deleted */ for (i = 0; i < 2; i++) @@ -1188,7 +1146,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) if (bch2_fs_inconsistent_on(i == 2, c, "snapshot %u missing child pointer to %u", parent_id, id)) - goto err; + return ret; parent->v.children[i] = cpu_to_le32(child_id); @@ -1196,16 +1154,14 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) } if (child_id) { - struct bkey_i_snapshot *child; - - child = bch2_bkey_get_mut_typed(trans, &c_iter, - BTREE_ID_snapshots, POS(0, child_id), - 0, snapshot); + struct bkey_i_snapshot *child = + bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, child_id), + 0, snapshot); ret = PTR_ERR_OR_ZERO(child); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, "missing snapshot %u", child_id); if (unlikely(ret)) - goto err; + return ret; child->v.parent = cpu_to_le32(parent_id); @@ -1222,16 +1178,15 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) * snapshot_tree entry to point to the new root, or delete it if * this is the last snapshot ID in this tree: */ - struct bkey_i_snapshot_tree *s_t; BUG_ON(s->v.children[1]); - s_t = bch2_bkey_get_mut_typed(trans, &tree_iter, + struct bkey_i_snapshot_tree *s_t = bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s->v.tree)), 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(s_t); if (ret) - goto err; + return ret; if (s->v.children[0]) { s_t->v.root_snapshot = s->v.children[0]; @@ -1256,12 +1211,8 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) s->k.type = KEY_TYPE_deleted; set_bkey_val_u64s(&s->k, 0); } -err: - bch2_trans_iter_exit(&tree_iter); - bch2_trans_iter_exit(&p_iter); - bch2_trans_iter_exit(&c_iter); - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, @@ -1270,35 +1221,29 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, unsigned nr_snapids) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_i_snapshot *n; - struct bkey_s_c k; - unsigned i, j; u32 depth = bch2_snapshot_depth(c, parent); - int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, - POS_MIN, BTREE_ITER_intent); - k = bch2_btree_iter_peek(&iter); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_snapshots, POS_MIN, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek(&iter); + int ret = bkey_err(k); if (ret) - goto err; + return ret; - for (i = 0; i < nr_snapids; i++) { + for (unsigned i = 0; i < nr_snapids; i++) { k = bch2_btree_iter_prev_slot(&iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (!k.k || !k.k->p.offset) { - ret = bch_err_throw(c, ENOSPC_snapshot_create); - goto err; + return bch_err_throw(c, ENOSPC_snapshot_create); } n = bch2_bkey_alloc(trans, &iter, 0, snapshot); ret = PTR_ERR_OR_ZERO(n); if (ret) - goto err; + return ret; n->v.flags = 0; n->v.parent = cpu_to_le32(parent); @@ -1308,7 +1253,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, n->v.btime.lo = cpu_to_le64(bch2_current_time(c)); n->v.btime.hi = 0; - for (j = 0; j < ARRAY_SIZE(n->v.skip); j++) + for (unsigned j = 0; j < ARRAY_SIZE(n->v.skip); j++) n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent)); bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32); @@ -1317,13 +1262,12 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0); if (ret) - goto err; + return ret; new_snapids[i] = iter.pos.offset; } -err: - bch2_trans_iter_exit(&iter); - return ret; + + return 0; } /* @@ -1334,14 +1278,9 @@ static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 par u32 *snapshot_subvols, unsigned nr_snapids) { - struct btree_iter iter; - struct bkey_i_snapshot *n_parent; - int ret = 0; - - n_parent = bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_snapshots, POS(0, parent), - 0, snapshot); - ret = PTR_ERR_OR_ZERO(n_parent); + struct bkey_i_snapshot *n_parent = + bch2_bkey_get_mut_typed(trans, BTREE_ID_snapshots, POS(0, parent), 0, snapshot); + int ret = PTR_ERR_OR_ZERO(n_parent); if (unlikely(ret)) { if (bch2_err_matches(ret, ENOENT)) bch_err(trans->c, "snapshot %u not found", parent); @@ -1350,22 +1289,19 @@ static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 par if (n_parent->v.children[0] || n_parent->v.children[1]) { bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children"); - ret = -EINVAL; - goto err; + return -EINVAL; } ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree), new_snapids, snapshot_subvols, nr_snapids); if (ret) - goto err; + return ret; n_parent->v.children[0] = cpu_to_le32(new_snapids[0]); n_parent->v.children[1] = cpu_to_le32(new_snapids[1]); n_parent->v.subvol = 0; SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false); -err: - bch2_trans_iter_exit(&iter); - return ret; + return 0; } /* @@ -1474,23 +1410,19 @@ static int delete_dead_snapshots_process_key(struct btree_trans *trans, new->k.p.snapshot = live_child; - struct btree_iter dst_iter; - struct bkey_s_c dst_k = bch2_bkey_get_iter(trans, &dst_iter, - iter->btree_id, new->k.p, - BTREE_ITER_all_snapshots| - BTREE_ITER_intent); + CLASS(btree_iter, dst_iter)(trans, iter->btree_id, new->k.p, + BTREE_ITER_all_snapshots|BTREE_ITER_intent); + struct bkey_s_c dst_k = bch2_btree_iter_peek_slot(&dst_iter); ret = bkey_err(dst_k); if (ret) return ret; - ret = (bkey_deleted(dst_k.k) + return (bkey_deleted(dst_k.k) ? bch2_trans_update(trans, &dst_iter, new, BTREE_UPDATE_internal_snapshot_node) : 0) ?: bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(&dst_iter); - return ret; } return 0; @@ -1937,7 +1869,6 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; int ret; @@ -1948,12 +1879,9 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, if (!bkey_eq(pos, k.k->p)) break; - if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) { - ret = 1; - break; - } + if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) + return 1; } - bch2_trans_iter_exit(&iter); return ret; } diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c index a6503ec58acc..ce2a54902a64 100644 --- a/fs/bcachefs/str_hash.c +++ b/fs/bcachefs/str_hash.c @@ -18,16 +18,14 @@ static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dir return ret; return !ret; } else { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, le64_to_cpu(d.v->d_inum), d.k->p.snapshot), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); int ret = bkey_err(k); if (ret) return ret; - ret = bkey_is_inode(k.k); - bch2_trans_iter_exit(&iter); - return ret; + return bkey_is_inode(k.k); } } @@ -123,7 +121,6 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, struct bch_inode_unpacked *snapshot_root) { struct bch_fs *c = trans->c; - struct btree_iter iter; struct bkey_s_c k; CLASS(printbuf, buf)(); bool need_commit = false; @@ -180,7 +177,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, } if (ret) - goto err; + return ret; if (!need_commit) { printbuf_reset(&buf); @@ -198,15 +195,12 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, prt_printf(&buf, " %llx %llx", hash_info->siphash_key.k0, hash_info->siphash_key.k1); #endif bch2_print_str(c, KERN_ERR, buf.buf); - ret = bch_err_throw(c, fsck_repair_unimplemented); - goto err; + return bch_err_throw(c, fsck_repair_unimplemented); } ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: bch_err_throw(c, transaction_restart_nested); -err: fsck_err: - bch2_trans_iter_exit(&iter); return ret; } @@ -351,10 +345,14 @@ int __bch2_str_hash_check_key(struct btree_trans *trans, if (hash_k.k->p.offset < hash) goto bad_hash; - for_each_btree_key_norestart(trans, iter, desc->btree_id, - SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot), - BTREE_ITER_slots| - BTREE_ITER_with_updates, k, ret) { + bch2_trans_iter_init(trans, &iter, desc->btree_id, + SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot), + BTREE_ITER_slots| + BTREE_ITER_with_updates); + + for_each_btree_key_continue_norestart(iter, + BTREE_ITER_slots| + BTREE_ITER_with_updates, k, ret) { if (bkey_eq(k.k->p, hash_k.k->p)) break; diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index 7b4e7e9eb993..8c0fb44929cc 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -159,8 +159,11 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct bkey_s_c k; int ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, - SPOS(inum.inum, desc.hash_key(info, key), snapshot), + bch2_trans_iter_init(trans, iter, + desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), + BTREE_ITER_slots|flags); + + for_each_btree_key_max_continue_norestart(*iter, POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { if (is_visible_key(desc, inum, k)) { @@ -209,8 +212,11 @@ bch2_hash_hole(struct btree_trans *trans, if (ret) return ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, - SPOS(inum.inum, desc.hash_key(info, key), snapshot), + bch2_trans_iter_init(trans, iter, desc.btree_id, + SPOS(inum.inum, desc.hash_key(info, key), snapshot), + BTREE_ITER_slots|BTREE_ITER_intent); + + for_each_btree_key_max_continue_norestart(*iter, POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) if (!is_visible_key(desc, inum, k)) @@ -265,10 +271,13 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, bool found = false; int ret; - for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, + bch2_trans_iter_init(trans, iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), + BTREE_ITER_slots|BTREE_ITER_intent|flags); + + for_each_btree_key_max_continue_norestart(*iter, POS(insert->k.p.inode, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent|flags, k, ret) { if (is_visible_key(desc, inum, k)) { diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index b6d0dc0a46de..6023ae46ca72 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -46,7 +46,6 @@ static int check_subvol(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct btree_iter subvol_children_iter = {}; struct bch_subvolume subvol; struct bch_snapshot snapshot; CLASS(printbuf, buf)(); @@ -81,30 +80,28 @@ static int check_subvol(struct btree_trans *trans, bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(n); if (ret) - goto err; + return ret; n->v.fs_path_parent = 0; } if (subvol.fs_path_parent) { - struct bpos pos = subvolume_children_pos(k); - - struct bkey_s_c subvol_children_k = - bch2_bkey_get_iter(trans, &subvol_children_iter, - BTREE_ID_subvolume_children, pos, 0); + CLASS(btree_iter, subvol_children_iter)(trans, + BTREE_ID_subvolume_children, subvolume_children_pos(k), 0); + struct bkey_s_c subvol_children_k = bch2_btree_iter_peek_slot(&subvol_children_iter); ret = bkey_err(subvol_children_k); if (ret) - goto err; + return ret; if (fsck_err_on(subvol_children_k.k->type != KEY_TYPE_set, trans, subvol_children_not_set, "subvolume not set in subvolume_children btree at %llu:%llu\n%s", - pos.inode, pos.offset, + subvol_children_iter.pos.inode, subvol_children_iter.pos.offset, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, pos, true); + ret = bch2_btree_bit_mod(trans, BTREE_ID_subvolume_children, subvol_children_iter.pos, true); if (ret) - goto err; + return ret; } } @@ -122,7 +119,7 @@ static int check_subvol(struct btree_trans *trans, inode.bi_snapshot = le32_to_cpu(subvol.snapshot); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) - goto err; + return ret; } } else if (bch2_err_matches(ret, ENOENT)) { if (fsck_err(trans, subvol_to_missing_root, @@ -142,10 +139,10 @@ static int check_subvol(struct btree_trans *trans, inode.bi_parent_subvol = le32_to_cpu(subvol.fs_path_parent); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) - goto err; + return ret; } } else { - goto err; + return ret; } if (!BCH_SUBVOLUME_SNAP(&subvol)) { @@ -159,7 +156,7 @@ static int check_subvol(struct btree_trans *trans, "%s: snapshot tree %u not found", __func__, snapshot_tree); if (ret) - goto err; + return ret; if (fsck_err_on(le32_to_cpu(st.master_subvol) != k.k->p.offset, trans, subvol_not_master_and_not_snapshot, @@ -169,14 +166,12 @@ static int check_subvol(struct btree_trans *trans, bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(s); if (ret) - goto err; + return ret; SET_BCH_SUBVOLUME_SNAP(&s->v, true); } } -err: fsck_err: - bch2_trans_iter_exit(&subvol_children_iter); return ret; } @@ -297,11 +292,8 @@ int bch2_subvolume_trigger(struct btree_trans *trans, int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol) { - struct btree_iter iter; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_subvolume_children, POS(subvol, 0), 0); struct bkey_s_c k = bch2_btree_iter_peek(&iter); - bch2_trans_iter_exit(&iter); return bkey_err(k) ?: k.k && k.k->p.inode == subvol ? bch_err_throw(trans->c, ENOTEMPTY_subvol_not_empty) @@ -358,22 +350,16 @@ int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot, int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid, u32 *snapid, bool warn) { - struct btree_iter iter; - struct bkey_s_c_subvolume subvol; - int ret; - - subvol = bch2_bkey_get_iter_typed(trans, &iter, - BTREE_ID_subvolumes, POS(0, subvolid), - BTREE_ITER_cached|BTREE_ITER_with_updates, - subvolume); - ret = bkey_err(subvol); + CLASS(btree_iter, iter)(trans, BTREE_ID_subvolumes, POS(0, subvolid), + BTREE_ITER_cached|BTREE_ITER_with_updates); + struct bkey_s_c_subvolume subvol = bch2_bkey_get_typed(&iter, subvolume); + int ret = bkey_err(subvol); if (bch2_err_matches(ret, ENOENT)) ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; if (likely(!ret)) *snapid = le32_to_cpu(subvol.v->snapshot); - bch2_trans_iter_exit(&iter); return ret; } @@ -434,42 +420,35 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d */ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) { - struct btree_iter subvol_iter = {}, snapshot_iter = {}, snapshot_tree_iter = {}; - - struct bkey_s_c_subvolume subvol = - bch2_bkey_get_iter_typed(trans, &subvol_iter, - BTREE_ID_subvolumes, POS(0, subvolid), - BTREE_ITER_cached|BTREE_ITER_intent, - subvolume); + CLASS(btree_iter, subvol_iter)(trans, BTREE_ID_subvolumes, POS(0, subvolid), + BTREE_ITER_cached|BTREE_ITER_intent); + struct bkey_s_c_subvolume subvol = bch2_bkey_get_typed(&subvol_iter, subvolume); int ret = bkey_err(subvol); if (bch2_err_matches(ret, ENOENT)) ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; if (ret) - goto err; + return ret; u32 snapid = le32_to_cpu(subvol.v->snapshot); - struct bkey_s_c_snapshot snapshot = - bch2_bkey_get_iter_typed(trans, &snapshot_iter, - BTREE_ID_snapshots, POS(0, snapid), - 0, snapshot); + CLASS(btree_iter, snapshot_iter)(trans, BTREE_ID_snapshots, POS(0, snapid), 0); + struct bkey_s_c_snapshot snapshot = bch2_bkey_get_typed(&snapshot_iter, snapshot); ret = bkey_err(snapshot); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, "missing snapshot %u", snapid); if (ret) - goto err; + return ret; u32 treeid = le32_to_cpu(snapshot.v->tree); + CLASS(btree_iter, snapshot_tree_iter)(trans, BTREE_ID_snapshot_trees, POS(0, treeid), 0); struct bkey_s_c_snapshot_tree snapshot_tree = - bch2_bkey_get_iter_typed(trans, &snapshot_tree_iter, - BTREE_ID_snapshot_trees, POS(0, treeid), - 0, snapshot_tree); + bch2_bkey_get_typed(&snapshot_tree_iter, snapshot_tree); ret = bkey_err(snapshot_tree); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, "missing snapshot tree %u", treeid); if (ret) - goto err; + return ret; if (le32_to_cpu(snapshot_tree.v->master_subvol) == subvolid) { struct bkey_i_snapshot_tree *snapshot_tree_mut = @@ -478,18 +457,13 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(snapshot_tree_mut); if (ret) - goto err; + return ret; snapshot_tree_mut->v.master_subvol = 0; } - ret = bch2_btree_delete_at(trans, &subvol_iter, 0) ?: + return bch2_btree_delete_at(trans, &subvol_iter, 0) ?: bch2_snapshot_node_set_deleted(trans, snapid); -err: - bch2_trans_iter_exit(&snapshot_tree_iter); - bch2_trans_iter_exit(&snapshot_iter); - bch2_trans_iter_exit(&subvol_iter); - return ret; } static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) @@ -565,13 +539,8 @@ static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) { - struct btree_iter iter; - struct bkey_i_subvolume *n; - struct subvolume_unlink_hook *h; - int ret = 0; - - h = bch2_trans_kmalloc(trans, sizeof(*h)); - ret = PTR_ERR_OR_ZERO(h); + struct subvolume_unlink_hook *h = bch2_trans_kmalloc(trans, sizeof(*h)); + int ret = PTR_ERR_OR_ZERO(h); if (ret) return ret; @@ -579,9 +548,9 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) h->subvol = subvolid; bch2_trans_commit_hook(trans, &h->h); - n = bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_subvolumes, POS(0, subvolid), - BTREE_ITER_cached, subvolume); + struct bkey_i_subvolume *n = + bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, subvolid), + BTREE_ITER_cached, subvolume); ret = PTR_ERR_OR_ZERO(n); if (bch2_err_matches(ret, ENOENT)) ret = bch2_subvolume_missing(trans->c, subvolid) ?: ret; @@ -590,7 +559,6 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) SET_BCH_SUBVOLUME_UNLINKED(&n->v, true); n->v.fs_path_parent = 0; - bch2_trans_iter_exit(&iter); return ret; } @@ -602,7 +570,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, bool ro) { struct bch_fs *c = trans->c; - struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL }; + struct btree_iter dst_iter; struct bkey_i_subvolume *new_subvol = NULL; struct bkey_i_subvolume *src_subvol = NULL; u32 parent = 0, new_nodes[2], snapshot_subvols[2]; @@ -621,9 +589,8 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, if (src_subvolid) { /* Creating a snapshot: */ - src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, - BTREE_ID_subvolumes, POS(0, src_subvolid), - BTREE_ITER_cached, subvolume); + src_subvol = bch2_bkey_get_mut_typed(trans, BTREE_ID_subvolumes, POS(0, src_subvolid), + BTREE_ITER_cached, subvolume); ret = PTR_ERR_OR_ZERO(src_subvol); if (bch2_err_matches(ret, ENOENT)) ret = bch2_subvolume_missing(trans->c, src_subvolid) ?: ret; @@ -639,12 +606,8 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, if (ret) goto err; - if (src_subvolid) { + if (src_subvolid) src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]); - ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0); - if (ret) - goto err; - } new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume); ret = PTR_ERR_OR_ZERO(new_subvol); @@ -665,7 +628,6 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode, *new_subvolid = new_subvol->k.p.offset; *new_snapshotid = new_nodes[0]; err: - bch2_trans_iter_exit(&src_iter); bch2_trans_iter_exit(&dst_iter); return ret; } @@ -702,33 +664,25 @@ int bch2_initialize_subvolumes(struct bch_fs *c) static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) { - struct btree_iter iter; - struct bkey_s_c k; - struct bch_inode_unpacked inode; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0); + struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); + int ret = bkey_err(k); if (ret) return ret; if (!bkey_is_inode(k.k)) { struct bch_fs *c = trans->c; bch_err(c, "root inode not found"); - ret = bch_err_throw(c, ENOENT_inode); - goto err; + return bch_err_throw(c, ENOENT_inode); } + struct bch_inode_unpacked inode; ret = bch2_inode_unpack(k, &inode); BUG_ON(ret); inode.bi_subvol = BCACHEFS_ROOT_SUBVOL; - ret = bch2_inode_write(trans, &iter, &inode); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_inode_write(trans, &iter, &inode); } /* set bi_subvol on root inode */ diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h index b39ff39b252d..b6d7c1f4a256 100644 --- a/fs/bcachefs/subvolume.h +++ b/fs/bcachefs/subvolume.h @@ -48,12 +48,11 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos #define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do) \ ({ \ - struct bkey_s_c _k; \ int _ret3 = 0; \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \ + struct bkey_s_c _k = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter),\ _end, _subvolid, (_flags)); \ if (!(_k).k) \ break; \ @@ -68,14 +67,10 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos #define for_each_btree_key_in_subvolume_max(_trans, _iter, _btree_id, \ _start, _end, _subvolid, _flags, _k, _do) \ ({ \ - struct btree_iter _iter; \ - bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ - (_start), (_flags)); \ + CLASS(btree_iter, _iter)((_trans), (_btree_id), (_start), (_flags)); \ \ - int _ret = for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ + for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do); \ - bch2_trans_iter_exit(&(_iter)); \ - _ret; \ }) int bch2_subvolume_unlink(struct btree_trans *, u32); diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index c88759964575..be7ed612d28f 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -68,28 +68,33 @@ enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_meta int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version) { - guard(mutex)(&c->sb_lock); - if (((c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) && version <= c->sb.version_incompat_allowed)) { - SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, - max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); - bch2_write_super(c); + guard(mutex)(&c->sb_lock); + + if (version > c->sb.version_incompat) { + SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, + max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); + bch2_write_super(c); + } return 0; } else { - darray_for_each(c->incompat_versions_requested, i) - if (version == *i) - return bch_err_throw(c, may_not_use_incompat_feature); + BUILD_BUG_ON(BCH_VERSION_MAJOR(bcachefs_metadata_version_current) != 1); - darray_push(&c->incompat_versions_requested, version); - CLASS(printbuf, buf)(); - prt_str(&buf, "requested incompat feature "); - bch2_version_to_text(&buf, version); - prt_str(&buf, " currently not enabled, allowed up to "); - bch2_version_to_text(&buf, version); - prt_printf(&buf, "\n set version_upgrade=incompat to enable"); + unsigned minor = BCH_VERSION_MINOR(version); + + if (!test_bit(minor, c->incompat_versions_requested) && + !test_and_set_bit(minor, c->incompat_versions_requested)) { + CLASS(printbuf, buf)(); + prt_str(&buf, "requested incompat feature "); + bch2_version_to_text(&buf, version); + prt_str(&buf, " currently not enabled, allowed up to "); + bch2_version_to_text(&buf, version); + prt_printf(&buf, "\n set version_upgrade=incompat to enable"); + + bch_notice(c, "%s", buf.buf); + } - bch_notice(c, "%s", buf.buf); return bch_err_throw(c, may_not_use_incompat_feature); } } diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index b3b2d8353a36..b0019488f586 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -653,7 +653,6 @@ static void __bch2_fs_free(struct bch_fs *c) free_percpu(c->online_reserved); } - darray_exit(&c->incompat_versions_requested); darray_exit(&c->btree_roots_extra); free_percpu(c->pcpu); free_percpu(c->usage); diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index 4628ff84eefc..baaaedf68422 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -31,23 +31,19 @@ static void delete_test_keys(struct bch_fs *c) static int test_delete(struct bch_fs *c, u64 nr) { - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k.p.snapshot = U32_MAX; - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, - BTREE_ITER_intent); + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent); - ret = commit_do(trans, NULL, NULL, 0, + int ret = commit_do(trans, NULL, NULL, 0, bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &k.k_i, 0)); bch_err_msg(c, ret, "update error"); if (ret) - goto err; + return ret; pr_info("deleting once"); ret = commit_do(trans, NULL, NULL, 0, @@ -55,7 +51,7 @@ static int test_delete(struct bch_fs *c, u64 nr) bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error (first)"); if (ret) - goto err; + return ret; pr_info("deleting twice"); ret = commit_do(trans, NULL, NULL, 0, @@ -63,31 +59,26 @@ static int test_delete(struct bch_fs *c, u64 nr) bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error (second)"); if (ret) - goto err; -err: - bch2_trans_iter_exit(&iter); - return ret; + return ret; + + return 0; } static int test_delete_written(struct bch_fs *c, u64 nr) { - CLASS(btree_trans, trans)(c); - struct btree_iter iter; struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k.p.snapshot = U32_MAX; - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, - BTREE_ITER_intent); + CLASS(btree_trans, trans)(c); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, k.k.p, BTREE_ITER_intent); - ret = commit_do(trans, NULL, NULL, 0, + int ret = commit_do(trans, NULL, NULL, 0, bch2_btree_iter_traverse(&iter) ?: bch2_trans_update(trans, &iter, &k.k_i, 0)); bch_err_msg(c, ret, "update error"); if (ret) - goto err; + return ret; bch2_trans_unlock(trans); bch2_journal_flush_all_pins(&c->journal); @@ -97,10 +88,9 @@ static int test_delete_written(struct bch_fs *c, u64 nr) bch2_btree_delete_at(trans, &iter, 0)); bch_err_msg(c, ret, "delete error"); if (ret) - goto err; -err: - bch2_trans_iter_exit(&iter); - return ret; + return ret; + + return 0; } static int test_iterate(struct bch_fs *c, u64 nr) @@ -343,19 +333,15 @@ static int test_peek_end(struct bch_fs *c, u64 nr) delete_test_keys(c); CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); + struct bkey_s_c k; lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - bch2_trans_iter_exit(&iter); return 0; } @@ -364,19 +350,15 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) delete_test_keys(c); CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_extents, SPOS(0, 0, U32_MAX), 0); + struct bkey_s_c k; lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - bch2_trans_iter_exit(&iter); return 0; } @@ -470,25 +452,21 @@ static int test_extent_create_overlapping(struct bch_fs *c, u64 inum) /* Test skipping over keys in unrelated snapshots: */ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) { - struct btree_iter iter; - struct bkey_s_c k; struct bkey_i_cookie cookie; - int ret; - bkey_cookie_init(&cookie.k_i); cookie.k.p.snapshot = snapid_hi; - ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); + int ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); if (ret) return ret; CLASS(btree_trans, trans)(c); - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, snapid_lo), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0); + + struct bkey_s_c k; + ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k->p.snapshot != U32_MAX); - bch2_trans_iter_exit(&iter); return ret; } @@ -583,24 +561,18 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr) static int rand_lookup(struct bch_fs *c, u64 nr) { CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); for (u64 i = 0; i < nr; i++) { bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX)); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter))); - ret = bkey_err(k); + struct bkey_s_c k; + int ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter))); if (ret) - break; + return ret; } - bch2_trans_iter_exit(&iter); - return ret; + return 0; } static int rand_mixed_trans(struct btree_trans *trans, @@ -631,45 +603,33 @@ static int rand_mixed_trans(struct btree_trans *trans, static int rand_mixed(struct bch_fs *c, u64 nr) { CLASS(btree_trans, trans)(c); - struct btree_iter iter; - struct bkey_i_cookie cookie; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), 0); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); for (u64 i = 0; i < nr; i++) { u64 rand = test_rand(); - ret = commit_do(trans, NULL, NULL, 0, + struct bkey_i_cookie cookie; + int ret = commit_do(trans, NULL, NULL, 0, rand_mixed_trans(trans, &iter, &cookie, i, rand)); if (ret) - break; + return ret; } - bch2_trans_iter_exit(&iter); - return ret; + return 0; } static int __do_delete(struct btree_trans *trans, struct bpos pos) { - struct btree_iter iter; - struct bkey_s_c k; - int ret = 0; - - bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos, - BTREE_ITER_intent); - k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)); - ret = bkey_err(k); + CLASS(btree_iter, iter)(trans, BTREE_ID_xattrs, pos, + BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)); + int ret = bkey_err(k); if (ret) - goto err; + return ret; if (!k.k) - goto err; + return 0; - ret = bch2_btree_delete_at(trans, &iter, 0); -err: - bch2_trans_iter_exit(&iter); - return ret; + return bch2_btree_delete_at(trans, &iter, 0); } static int rand_delete(struct bch_fs *c, u64 nr) |