diff options
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r-- | fs/bcachefs/btree_iter.c | 340 |
1 files changed, 181 insertions, 159 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 7463946898c0..8962c481e310 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -240,8 +240,10 @@ void __bch2_trans_verify_paths(struct btree_trans *trans) __bch2_btree_path_verify(trans, path); } -static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter) +static void __bch2_btree_iter_verify(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); BUG_ON((iter->flags & BTREE_ITER_is_extents) && @@ -270,12 +272,9 @@ static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) bkey_gt(iter->pos, iter->k.p))); } -static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, - struct btree_iter *iter, struct bkey_s_c k) +static int __bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { - struct btree_iter copy; - struct bkey_s_c prev; - int ret = 0; + struct btree_trans *trans = iter->trans; if (!(iter->flags & BTREE_ITER_filter_snapshots)) return 0; @@ -287,16 +286,16 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, iter->snapshot, k.k->p.snapshot)); - bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, - BTREE_ITER_nopreserve| - BTREE_ITER_all_snapshots); - prev = bch2_btree_iter_prev(trans, ©); + CLASS(btree_iter, copy)(trans, iter->btree_id, iter->pos, + BTREE_ITER_nopreserve| + BTREE_ITER_all_snapshots); + struct bkey_s_c prev = bch2_btree_iter_prev(©); if (!prev.k) - goto out; + return 0; - ret = bkey_err(prev); + int ret = bkey_err(prev); if (ret) - goto out; + return ret; if (bkey_eq(prev.k->p, k.k->p) && bch2_snapshot_is_ancestor(trans->c, iter->snapshot, @@ -312,9 +311,8 @@ static int __bch2_btree_iter_verify_ret(struct btree_trans *trans, iter->snapshot, buf1.buf, buf2.buf); } -out: - bch2_trans_iter_exit(trans, ©); - return ret; + + return 0; } void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, @@ -364,11 +362,10 @@ static inline void bch2_btree_path_verify(struct btree_trans *trans, __bch2_btree_path_verify(trans, path); } -static inline void bch2_btree_iter_verify(struct btree_trans *trans, - struct btree_iter *iter) +static inline void bch2_btree_iter_verify(struct btree_iter *iter) { if (static_branch_unlikely(&bch2_debug_check_iterators)) - __bch2_btree_iter_verify(trans, iter); + __bch2_btree_iter_verify(iter); } static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) @@ -377,11 +374,11 @@ static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) __bch2_btree_iter_verify_entry_exit(iter); } -static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter, +static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return static_branch_unlikely(&bch2_debug_check_iterators) - ? __bch2_btree_iter_verify_ret(trans, iter, k) + ? __bch2_btree_iter_verify_ret(iter, k) : 0; } @@ -891,7 +888,7 @@ static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, struct btree_path *path, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; struct btree_path_level *l = path_l(path); @@ -903,7 +900,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, k = bch2_btree_and_journal_iter_peek(&jiter); if (!k.k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "node not found at pos "); bch2_bpos_to_text(&buf, path->pos); @@ -911,7 +908,6 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, bch2_btree_pos_to_text(&buf, c, l->b); ret = bch2_fs_topology_error(c, "%s", buf.buf); - printbuf_exit(&buf); goto err; } @@ -930,7 +926,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans, struct btree_path *path) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "node not found at pos "); bch2_bpos_to_text(&buf, path->pos); @@ -944,7 +940,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans, static __always_inline int btree_path_down(struct btree_trans *trans, struct btree_path *path, - unsigned flags, + enum btree_iter_update_trigger_flags flags, unsigned long trace_ip) { struct bch_fs *c = trans->c; @@ -1152,7 +1148,7 @@ static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, */ int bch2_btree_path_traverse_one(struct btree_trans *trans, btree_path_idx_t path_idx, - unsigned flags, + enum btree_iter_update_trigger_flags flags, unsigned long trace_ip) { struct btree_path *path = &trans->paths[path_idx]; @@ -1451,7 +1447,7 @@ void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_ static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) { #ifdef CONFIG_BCACHEFS_DEBUG - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_prt_backtrace(&buf, &trans->last_restarted_trace); panic("in transaction restart: %s, last restarted by\n%s", bch2_err_str(trans->restarted), @@ -1601,13 +1597,13 @@ void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans) static noinline __cold void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); + bch2_log_msg_start(trans->c, &buf); __bch2_trans_paths_to_text(&buf, trans, nosort); bch2_trans_updates_to_text(&buf, trans); bch2_print_str(trans->c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } noinline __cold @@ -1620,22 +1616,19 @@ noinline __cold static void bch2_trans_update_max_paths(struct btree_trans *trans) { struct btree_transaction_stats *s = btree_trans_stats(trans); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths); bch2_trans_paths_to_text(&buf, trans); if (!buf.allocation_failure) { - mutex_lock(&s->lock); + guard(mutex)(&s->lock); if (nr > s->nr_max_paths) { s->nr_max_paths = nr; swap(s->max_paths_text, buf.buf); } - mutex_unlock(&s->lock); } - printbuf_exit(&buf); - trans->nr_paths_max = nr; } @@ -1643,11 +1636,10 @@ noinline __cold int __bch2_btree_trans_too_many_iters(struct btree_trans *trans) { if (trace_trans_restart_too_many_iters_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_trans_paths_to_text(&buf, trans); trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf); - printbuf_exit(&buf); } count_event(trans->c, trans_restart_too_many_iters); @@ -1737,7 +1729,8 @@ static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans, btree_path_idx_t bch2_path_get(struct btree_trans *trans, enum btree_id btree_id, struct bpos pos, unsigned locks_want, unsigned level, - unsigned flags, unsigned long ip) + enum btree_iter_update_trigger_flags flags, + unsigned long ip) { struct btree_path *path; bool cached = flags & BTREE_ITER_cached; @@ -1868,8 +1861,10 @@ hole: return (struct bkey_s_c) { u, NULL }; } -void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter) +void bch2_set_btree_iter_dontneed(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + if (!iter->path || trans->restarted) return; @@ -1881,14 +1876,17 @@ void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter * /* Btree iterators: */ int __must_check -__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) +__bch2_btree_iter_traverse(struct btree_iter *iter) { - return bch2_btree_path_traverse(trans, iter->path, iter->flags); + return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); } int __must_check -bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) +bch2_btree_iter_traverse(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + int ret; + bch2_trans_verify_not_unlocked_or_in_restart(trans); iter->path = bch2_btree_path_set_pos(trans, iter->path, @@ -1896,7 +1894,7 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) iter->flags & BTREE_ITER_intent, btree_iter_ip_allocated(iter)); - int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); if (ret) return ret; @@ -1908,14 +1906,14 @@ bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter) /* Iterate across nodes (leaf and interior nodes) */ -struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans, - struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct btree *b = NULL; int ret; EBUG_ON(trans->paths[iter->path].cached); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (ret) @@ -1937,7 +1935,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans, btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter)); out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return b; err: @@ -1946,26 +1944,26 @@ err: } /* Only kept for -tools */ -struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans, - struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter) { struct btree *b; - while (b = bch2_btree_iter_peek_node(trans, iter), + while (b = bch2_btree_iter_peek_node(iter), bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart)) - bch2_trans_begin(trans); + bch2_trans_begin(iter->trans); return b; } -struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter) +struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct btree *b = NULL; int ret; EBUG_ON(trans->paths[iter->path].cached); bch2_trans_verify_not_unlocked_or_in_restart(trans); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (ret) @@ -2039,7 +2037,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ EBUG_ON(btree_iter_path(trans, iter)->uptodate); out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return b; err: @@ -2049,7 +2047,7 @@ err: /* Iterate across keys (in leaf nodes only) */ -inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter) +inline bool bch2_btree_iter_advance(struct btree_iter *iter) { struct bpos pos = iter->k.p; bool ret = !(iter->flags & BTREE_ITER_all_snapshots @@ -2058,11 +2056,11 @@ inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter if (ret && !(iter->flags & BTREE_ITER_is_extents)) pos = bkey_successor(iter, pos); - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); return ret; } -inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter) +inline bool bch2_btree_iter_rewind(struct btree_iter *iter) { struct bpos pos = bkey_start_pos(&iter->k); bool ret = !(iter->flags & BTREE_ITER_all_snapshots @@ -2071,7 +2069,7 @@ inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter if (ret && !(iter->flags & BTREE_ITER_is_extents)) pos = bkey_predecessor(iter, pos); - bch2_btree_iter_set_pos(trans, iter, pos); + bch2_btree_iter_set_pos(iter, pos); return ret; } @@ -2203,9 +2201,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans, * bkey_s_c_null: */ static noinline -struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter, - struct bpos pos) +struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos) { + struct btree_trans *trans = iter->trans; struct bch_fs *c = trans->c; struct bkey u; struct bkey_s_c k; @@ -2251,14 +2249,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btr return k; } -static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter, - struct bpos search_key) +static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key) { + struct btree_trans *trans = iter->trans; struct bkey_s_c k, k2; int ret; EBUG_ON(btree_iter_path(trans, iter)->cached); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, @@ -2268,7 +2266,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); break; } @@ -2278,7 +2276,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct if (unlikely(!l->b)) { /* No btree nodes at requested level: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } @@ -2290,10 +2288,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && k.k && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) { + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { k = k2; if (bkey_err(k)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); break; } } @@ -2326,13 +2324,13 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct search_key = bpos_successor(l->b->key.k.p); } else { /* End of btree: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } } - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); if (trace___btree_iter_peek_enabled()) { CLASS(printbuf, buf)(); @@ -2353,15 +2351,14 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct /** * bch2_btree_iter_peek_max() - returns first key greater than or equal to * iterator's current position - * @trans: btree transaction object * @iter: iterator to peek from * @end: search limit: returns keys less than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end) { + struct btree_trans *trans = iter->trans; struct bpos search_key = btree_iter_search_key(iter); struct bkey_s_c k; struct bpos iter_pos = iter->pos; @@ -2383,7 +2380,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree } while (1) { - k = __bch2_btree_iter_peek(trans, iter, search_key); + k = __bch2_btree_iter_peek(iter, search_key); if (unlikely(!k.k)) goto end; if (unlikely(bkey_err(k))) @@ -2453,10 +2450,27 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree continue; } - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_key_cache_fill)) { - search_key = bkey_successor(iter, k.k->p); - continue; + if (!(iter->flags & BTREE_ITER_nofilter_whiteouts)) { + /* + * KEY_TYPE_extent_whiteout indicates that there + * are no extents that overlap with this + * whiteout - meaning bkey_start_pos() is + * monotonically increasing when including + * KEY_TYPE_extent_whiteout (not + * KEY_TYPE_whiteout). + * + * Without this @end wouldn't be able to + * terminate searches and we'd have to scan + * through tons of whiteouts: + */ + if (k.k->type == KEY_TYPE_extent_whiteout && + bkey_ge(k.k->p, end)) + goto end; + + if (bkey_extent_whiteout(k.k)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } } } @@ -2497,9 +2511,9 @@ out_no_locked: if (!(iter->flags & BTREE_ITER_all_snapshots)) iter->pos.snapshot = iter->snapshot; - ret = bch2_btree_iter_verify_ret(trans, iter, k); + ret = bch2_btree_iter_verify_ret(iter, k); if (unlikely(ret)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); } @@ -2520,7 +2534,7 @@ out_no_locked: return k; end: - bch2_btree_iter_set_pos(trans, iter, end); + bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } @@ -2528,25 +2542,24 @@ end: /** * bch2_btree_iter_next() - returns first key greater than iterator's current * position - * @trans: btree transaction object * @iter: iterator to peek from * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { - if (!bch2_btree_iter_advance(trans, iter)) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek(trans, iter); + return bch2_btree_iter_peek(iter); } -static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter, - struct bpos search_key) +static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key) { + struct btree_trans *trans = iter->trans; struct bkey_s_c k, k2; - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, @@ -2556,7 +2569,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); break; } @@ -2566,7 +2579,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st if (unlikely(!l->b)) { /* No btree nodes at requested level: */ - bch2_btree_iter_set_pos(trans, iter, SPOS_MAX); + bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; break; } @@ -2583,10 +2596,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && k.k && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) { + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { k = k2; if (bkey_err(k2)) { - bch2_btree_iter_set_pos(trans, iter, iter->pos); + bch2_btree_iter_set_pos(iter, iter->pos); break; } } @@ -2607,27 +2620,25 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, st search_key = bpos_predecessor(path->l[0].b->data->min_key); } else { /* Start of btree: */ - bch2_btree_iter_set_pos(trans, iter, POS_MIN); + bch2_btree_iter_set_pos(iter, POS_MIN); k = bkey_s_c_null; break; } } - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); return k; } /** * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to * iterator's current position - * @trans: btree transaction object * @iter: iterator to peek from * @end: search limit: returns keys greater than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end) { if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) && !bkey_eq(iter->pos, POS_MAX) && @@ -2642,7 +2653,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct * real visible extents - easiest to just use peek_slot() (which * internally uses peek() for extents) */ - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); if (bkey_err(k)) return k; @@ -2652,6 +2663,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct return k; } + struct btree_trans *trans = iter->trans; struct bpos search_key = iter->pos; struct bkey_s_c k; btree_path_idx_t saved_path = 0; @@ -2667,7 +2679,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct } while (1) { - k = __bch2_btree_iter_peek_prev(trans, iter, search_key); + k = __bch2_btree_iter_peek_prev(iter, search_key); if (unlikely(!k.k)) goto end; if (unlikely(bkey_err(k))) @@ -2716,7 +2728,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct saved_path = 0; } - if (!bkey_whiteout(k.k)) { + if (!bkey_extent_whiteout(k.k)) { saved_path = btree_path_clone(trans, iter->path, iter->flags & BTREE_ITER_intent, _THIS_IP_); @@ -2729,7 +2741,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct continue; } - if (bkey_whiteout(k.k)) { + if (bkey_extent_whiteout(k.k)) { search_key = bkey_predecessor(iter, k.k->p); search_key.snapshot = U32_MAX; continue; @@ -2749,7 +2761,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct } /* Extents can straddle iter->pos: */ - iter->pos = bpos_min(iter->pos, k.k->p);; + iter->pos = bpos_min(iter->pos, k.k->p); if (iter->flags & BTREE_ITER_filter_snapshots) iter->pos.snapshot = iter->snapshot; @@ -2758,7 +2770,7 @@ out_no_locked: bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent); bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); if (trace_btree_iter_peek_prev_min_enabled()) { CLASS(printbuf, buf)(); @@ -2774,7 +2786,7 @@ out_no_locked: } return k; end: - bch2_btree_iter_set_pos(trans, iter, end); + bch2_btree_iter_set_pos(iter, end); k = bkey_s_c_null; goto out_no_locked; } @@ -2782,27 +2794,27 @@ end: /** * bch2_btree_iter_prev() - returns first key less than iterator's current * position - * @trans: btree transaction object * @iter: iterator to peek from * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { - if (!bch2_btree_iter_rewind(trans, iter)) + if (!bch2_btree_iter_rewind(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_prev(trans, iter); + return bch2_btree_iter_peek_prev(iter); } -struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; struct bpos search_key; struct bkey_s_c k, k2; int ret; bch2_trans_verify_not_unlocked_or_in_restart(trans); - bch2_btree_iter_verify(trans, iter); + bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache)); @@ -2820,7 +2832,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre goto out2; } - bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos)); + bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); } search_key = btree_iter_search_key(iter); @@ -2863,15 +2875,16 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && !bkey_deleted(k.k) && - (k2 = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) { + (k2 = btree_trans_peek_key_cache(iter, iter->pos)).k) { k = k2; - if (!bkey_err(k)) - iter->k = *k.k; + if (bkey_err(k)) + goto out; + iter->k = *k.k; } - if (unlikely(k.k->type == KEY_TYPE_whiteout && + if (unlikely(bkey_extent_whiteout(k.k) && (iter->flags & BTREE_ITER_filter_snapshots) && - !(iter->flags & BTREE_ITER_key_cache_fill))) + !(iter->flags & BTREE_ITER_nofilter_whiteouts))) iter->k.type = KEY_TYPE_deleted; } else { struct bpos next; @@ -2882,31 +2895,40 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre EBUG_ON(btree_iter_path(trans, iter)->level); - if (iter->flags & BTREE_ITER_intent) { - struct btree_iter iter2; + struct btree_iter iter2; - bch2_trans_copy_iter(trans, &iter2, iter); - k = bch2_btree_iter_peek_max(trans, &iter2, end); + bch2_trans_copy_iter(&iter2, iter); + iter2.flags |= BTREE_ITER_nofilter_whiteouts; - if (k.k && !bkey_err(k)) { - swap(iter->key_cache_path, iter2.key_cache_path); - iter->k = iter2.k; - k.k = &iter->k; + while (1) { + k = bch2_btree_iter_peek_max(&iter2, end); + if ((iter2.flags & BTREE_ITER_is_extents) && + k.k && + !bkey_err(k) && + k.k->type == KEY_TYPE_whiteout) { + bch2_btree_iter_set_pos(&iter2, k.k->p); + continue; } - bch2_trans_iter_exit(trans, &iter2); - } else { - struct bpos pos = iter->pos; - k = bch2_btree_iter_peek_max(trans, iter, end); - if (unlikely(bkey_err(k))) - bch2_btree_iter_set_pos(trans, iter, pos); - else - iter->pos = pos; + break; + } + + if (k.k && !bkey_err(k)) { + swap(iter->key_cache_path, iter2.key_cache_path); + iter->k = iter2.k; + k.k = &iter->k; } + bch2_trans_iter_exit(&iter2); if (unlikely(bkey_err(k))) goto out; + if (unlikely(k.k && + bkey_extent_whiteout(k.k) && + (iter->flags & BTREE_ITER_filter_snapshots) && + !(iter->flags & BTREE_ITER_nofilter_whiteouts))) + iter->k.type = KEY_TYPE_deleted; + next = k.k ? bkey_start_pos(k.k) : POS_MAX; if (bkey_lt(iter->pos, next)) { @@ -2928,8 +2950,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre } out: bch2_btree_iter_verify_entry_exit(iter); - bch2_btree_iter_verify(trans, iter); - ret = bch2_btree_iter_verify_ret(trans, iter, k); + bch2_btree_iter_verify(iter); + ret = bch2_btree_iter_verify_ret(iter, k); if (unlikely(ret)) k = bkey_s_c_err(ret); out2: @@ -2949,31 +2971,31 @@ out2: return k; } -struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) { - if (!bch2_btree_iter_advance(trans, iter)) + if (!bch2_btree_iter_advance(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_slot(trans, iter); + return bch2_btree_iter_peek_slot(iter); } -struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) { - if (!bch2_btree_iter_rewind(trans, iter)) + if (!bch2_btree_iter_rewind(iter)) return bkey_s_c_null; - return bch2_btree_iter_peek_slot(trans, iter); + return bch2_btree_iter_peek_slot(iter); } /* Obsolete, but still used by rust wrapper in -tools */ -struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter) { struct bkey_s_c k; - while (btree_trans_too_many_iters(trans) || - (k = bch2_btree_iter_peek_type(trans, iter, iter->flags), + while (btree_trans_too_many_iters(iter->trans) || + (k = bch2_btree_iter_peek_type(iter, iter->flags), bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) - bch2_trans_begin(trans); + bch2_trans_begin(iter->trans); return k; } @@ -3105,8 +3127,10 @@ static inline void btree_path_list_add(struct btree_trans *trans, btree_trans_verify_sorted_refs(trans); } -void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) +void bch2_trans_iter_exit(struct btree_iter *iter) { + struct btree_trans *trans = iter->trans; + if (iter->update_path) bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent); @@ -3119,16 +3143,18 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) iter->path = 0; iter->update_path = 0; iter->key_cache_path = 0; + iter->trans = NULL; } void bch2_trans_iter_init_outlined(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags, + unsigned long ip) { bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, bch2_btree_iter_flags(trans, btree_id, 0, flags), - _RET_IP_); + ip); } void bch2_trans_node_iter_init(struct btree_trans *trans, @@ -3137,7 +3163,7 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, struct bpos pos, unsigned locks_want, unsigned depth, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { flags |= BTREE_ITER_not_extents; flags |= BTREE_ITER_snapshot_field; @@ -3158,9 +3184,10 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, BUG_ON(iter->min_depth != depth); } -void bch2_trans_copy_iter(struct btree_trans *trans, - struct btree_iter *dst, struct btree_iter *src) +void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src) { + struct btree_trans *trans = src->trans; + *dst = *src; #ifdef TRACK_PATH_ALLOCATED dst->ip_allocated = _RET_IP_; @@ -3196,14 +3223,13 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) { #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "bump allocator exceeded BTREE_TRANS_MEM_MAX (%u)\n", BTREE_TRANS_MEM_MAX); bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); #endif } @@ -3213,7 +3239,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long struct btree_transaction_stats *s = btree_trans_stats(trans); if (new_bytes > s->max_mem) { - mutex_lock(&s->lock); + guard(mutex)(&s->lock); #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE darray_resize(&s->trans_kmalloc_trace, trans->trans_kmalloc_trace.nr); s->trans_kmalloc_trace.nr = min(s->trans_kmalloc_trace.size, @@ -3225,7 +3251,6 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long s->trans_kmalloc_trace.nr); #endif s->max_mem = new_bytes; - mutex_unlock(&s->lock); } if (trans->used_mempool || new_bytes > BTREE_TRANS_MEM_MAX) { @@ -3535,7 +3560,7 @@ static void check_btree_paths_leaked(struct btree_trans *trans) struct btree_path *path; unsigned i; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "btree paths leaked from %s!\n", trans->fn); @@ -3547,7 +3572,6 @@ static void check_btree_paths_leaked(struct btree_trans *trans) bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } } #else @@ -3672,11 +3696,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) /* trans->paths is rcu protected vs. freeing */ guard(rcu)(); - out->atomic++; + guard(printbuf_atomic)(out); struct btree_path *paths = rcu_dereference(trans->paths); if (!paths) - goto out; + return; unsigned long *paths_allocated = trans_paths_allocated(paths); @@ -3712,8 +3736,6 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) bch2_btree_bkey_cached_common_to_text(out, b); prt_newline(out); } -out: - --out->atomic; } void bch2_fs_btree_iter_exit(struct bch_fs *c) |