diff options
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r-- | fs/bcachefs/btree_iter.c | 104 |
1 files changed, 66 insertions, 38 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index a67babf69d39..76f430f93dc1 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -650,7 +650,7 @@ static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, str i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v; if (unlikely(trans->journal_replay_not_finished)) { - struct bkey_i *j_k = + const struct bkey_i *j_k = bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p); @@ -848,7 +848,7 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p break; bch2_btree_and_journal_iter_advance(jiter); - k = bch2_btree_and_journal_iter_peek(jiter); + k = bch2_btree_and_journal_iter_peek(c, jiter); if (!k.k) break; @@ -898,7 +898,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos); - k = bch2_btree_and_journal_iter_peek(&jiter); + k = bch2_btree_and_journal_iter_peek(c, &jiter); if (!k.k) { CLASS(printbuf, buf)(); @@ -2120,10 +2120,10 @@ void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_ } } -static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, - struct btree_iter *iter, - struct bpos search_pos, - struct bpos end_pos) +static const struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos search_pos, + struct bpos end_pos) { struct btree_path *path = btree_iter_path(trans, iter); @@ -2139,7 +2139,7 @@ struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans, struct btree_iter *iter) { struct btree_path *path = btree_iter_path(trans, iter); - struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos, path->pos); + const struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos, path->pos); if (k) { iter->k = k->k; @@ -2156,7 +2156,7 @@ void btree_trans_peek_journal(struct btree_trans *trans, struct bkey_s_c *k) { struct btree_path *path = btree_iter_path(trans, iter); - struct bkey_i *next_journal = + const struct bkey_i *next_journal = bch2_btree_journal_peek(trans, iter, search_key, k->k ? k->k->p : path_l(path)->b->key.k.p); if (next_journal) { @@ -2165,10 +2165,10 @@ void btree_trans_peek_journal(struct btree_trans *trans, } } -static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans, - struct btree_iter *iter, - struct bpos search_key, - struct bpos end_pos) +static const struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos search_key, + struct bpos end_pos) { struct btree_path *path = btree_iter_path(trans, iter); @@ -2186,7 +2186,7 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans, struct bkey_s_c *k) { struct btree_path *path = btree_iter_path(trans, iter); - struct bkey_i *next_journal = + const struct bkey_i *next_journal = bch2_btree_journal_peek_prev(trans, iter, search_key, k->k ? k->k->p : path_l(path)->b->data->min_key); @@ -2366,7 +2366,9 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_btree_iter_verify_entry_exit(iter); - EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX)); + EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && + !(iter->flags & BTREE_ITER_nofilter_whiteouts) && + bkey_eq(end, POS_MAX)); ret = trans_maybe_inject_restart(trans, _RET_IP_); if (unlikely(ret)) { @@ -2450,10 +2452,27 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en continue; } - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_nofilter_whiteouts)) { - search_key = bkey_successor(iter, k.k->p); - continue; + if (!(iter->flags & BTREE_ITER_nofilter_whiteouts)) { + /* + * KEY_TYPE_extent_whiteout indicates that there + * are no extents that overlap with this + * whiteout - meaning bkey_start_pos() is + * monotonically increasing when including + * KEY_TYPE_extent_whiteout (not + * KEY_TYPE_whiteout). + * + * Without this @end wouldn't be able to + * terminate searches and we'd have to scan + * through tons of whiteouts: + */ + if (k.k->type == KEY_TYPE_extent_whiteout && + bkey_ge(k.k->p, end)) + goto end; + + if (bkey_extent_whiteout(k.k)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } } } @@ -2711,7 +2730,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp saved_path = 0; } - if (!bkey_whiteout(k.k)) { + if (!bkey_extent_whiteout(k.k)) { saved_path = btree_path_clone(trans, iter->path, iter->flags & BTREE_ITER_intent, _THIS_IP_); @@ -2724,7 +2743,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp continue; } - if (bkey_whiteout(k.k)) { + if (bkey_extent_whiteout(k.k)) { search_key = bkey_predecessor(iter, k.k->p); search_key.snapshot = U32_MAX; continue; @@ -2865,7 +2884,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) iter->k = *k.k; } - if (unlikely(k.k->type == KEY_TYPE_whiteout && + if (unlikely(bkey_extent_whiteout(k.k) && (iter->flags & BTREE_ITER_filter_snapshots) && !(iter->flags & BTREE_ITER_nofilter_whiteouts))) iter->k.type = KEY_TYPE_deleted; @@ -2878,31 +2897,40 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) EBUG_ON(btree_iter_path(trans, iter)->level); - if (iter->flags & BTREE_ITER_intent) { - struct btree_iter iter2; + struct btree_iter iter2; - bch2_trans_copy_iter(&iter2, iter); - k = bch2_btree_iter_peek_max(&iter2, end); + bch2_trans_copy_iter(&iter2, iter); + iter2.flags |= BTREE_ITER_nofilter_whiteouts; - if (k.k && !bkey_err(k)) { - swap(iter->key_cache_path, iter2.key_cache_path); - iter->k = iter2.k; - k.k = &iter->k; + while (1) { + k = bch2_btree_iter_peek_max(&iter2, end); + if ((iter2.flags & BTREE_ITER_is_extents) && + k.k && + !bkey_err(k) && + k.k->type == KEY_TYPE_whiteout) { + bch2_btree_iter_set_pos(&iter2, k.k->p); + continue; } - bch2_trans_iter_exit(&iter2); - } else { - struct bpos pos = iter->pos; - k = bch2_btree_iter_peek_max(iter, end); - if (unlikely(bkey_err(k))) - bch2_btree_iter_set_pos(iter, pos); - else - iter->pos = pos; + break; + } + + if (k.k && !bkey_err(k)) { + swap(iter->key_cache_path, iter2.key_cache_path); + iter->k = iter2.k; + k.k = &iter->k; } + bch2_trans_iter_exit(&iter2); if (unlikely(bkey_err(k))) goto out; + if (unlikely(k.k && + bkey_extent_whiteout(k.k) && + (iter->flags & BTREE_ITER_filter_snapshots) && + !(iter->flags & BTREE_ITER_nofilter_whiteouts))) + iter->k.type = KEY_TYPE_deleted; + next = k.k ? bkey_start_pos(k.k) : POS_MAX; if (bkey_lt(iter->pos, next)) { |