summaryrefslogtreecommitdiff
path: root/fs/bcachefs/bkey_sort.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-11-09 23:50:52 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:32 -0400
commitad44bdc351faeacb9b7294f1689ac76babf379ad (patch)
tree934bff1cc441503b9ee92e7702c14c45f08388a8 /fs/bcachefs/bkey_sort.c
parentaef90ce085123c3d0c3f110b4c50b77d007b2d5d (diff)
bcachefs: bkey noops
For upcoming inline data extents, we're going to need to be able to shorten the value of existing bkeys in the btree - and to make that work we're going to be able to need to pad out the space the value previously took up with something. This patch changes the various code that iterates over bkeys to handle k->u64s == 0 as meaning "skip the next 8 bytes". Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bkey_sort.c')
-rw-r--r--fs/bcachefs/bkey_sort.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
index f5c0507ad79d..5f9f3d2e6906 100644
--- a/fs/bcachefs/bkey_sort.c
+++ b/fs/bcachefs/bkey_sort.c
@@ -75,6 +75,10 @@ static void sort_key_next(struct btree_node_iter_large *iter,
{
i->k += __btree_node_offset_to_key(b, i->k)->u64s;
+ while (i->k != i->end &&
+ !__btree_node_offset_to_key(b, i->k)->u64s)
+ i->k++;
+
if (i->k == i->end)
*i = iter->data[--iter->used];
}
@@ -119,7 +123,7 @@ static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
{
- iter->data->k = bkey_next(iter->data->k);
+ iter->data->k = bkey_next_skip_noops(iter->data->k, iter->data->end);
BUG_ON(iter->data->k > iter->data->end);