summaryrefslogtreecommitdiff
path: root/fs/bcachefs/recovery.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-05-21 13:10:39 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-05-12 19:42:05 -0400
commit6fe2a9894e314e365333cea325f6e707f84f56e2 (patch)
treeed508a307122ad8dc5b81d49cde8500244dc3fd4 /fs/bcachefs/recovery.c
parent5951c3d8a5c376f54b5816ad9b659941672580fc (diff)
bcachefs: Fix journal_keys_search() overhead
Previously, on every btree_iter_peek() operation we were searching the journal keys, doing a full binary search - which was slow. This patch fixes that by saving our position in the journal keys, so that we only do a full binary search when moving our position backwards or a large jump forwards. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/recovery.c')
-rw-r--r--fs/bcachefs/recovery.c54
1 files changed, 36 insertions, 18 deletions
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 2e782d5d968e..edb04f65a148 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -86,9 +86,9 @@ static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t i
return keys->d + idx_to_pos(keys, idx);
}
-size_t bch2_journal_key_search(struct journal_keys *keys,
- enum btree_id id, unsigned level,
- struct bpos pos)
+static size_t __bch2_journal_key_search(struct journal_keys *keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
{
size_t l = 0, r = keys->nr, m;
@@ -106,26 +106,42 @@ size_t bch2_journal_key_search(struct journal_keys *keys,
BUG_ON(l &&
__journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
- return idx_to_pos(keys, l);
+ return l;
+}
+
+static size_t bch2_journal_key_search(struct journal_keys *keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
}
struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
unsigned level, struct bpos pos,
- struct bpos end_pos)
+ struct bpos end_pos, size_t *idx)
{
struct journal_keys *keys = &c->journal_keys;
- size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
-
- while (idx < keys->size &&
- keys->d[idx].btree_id == btree_id &&
- keys->d[idx].level == level &&
- bpos_cmp(keys->d[idx].k->k.p, end_pos) <= 0) {
- if (!keys->d[idx].overwritten)
- return keys->d[idx].k;
-
- idx++;
- if (idx == keys->gap)
- idx += keys->size - keys->nr;
+ unsigned iters = 0;
+ struct journal_key *k;
+search:
+ if (!*idx)
+ *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
+
+ while (*idx < keys->nr &&
+ (k = idx_to_key(keys, *idx),
+ k->btree_id == btree_id &&
+ k->level == level &&
+ bpos_cmp(k->k->k.p, end_pos) <= 0)) {
+ if (bpos_cmp(k->k->k.p, pos) >= 0 &&
+ !k->overwritten)
+ return k->k;
+
+ (*idx)++;
+ iters++;
+ if (iters == 10) {
+ *idx = 0;
+ goto search;
+ }
}
return NULL;
@@ -134,7 +150,9 @@ struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree
struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
unsigned level, struct bpos pos)
{
- return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos);
+ size_t idx = 0;
+
+ return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
}
static void journal_iters_fix(struct bch_fs *c)