summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-12-10 23:37:45 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-01-01 11:47:43 -0500
commitccb7b08fbbb85e9b0bb2867497d98172a5737ad5 (patch)
tree9cc04e7750d9226ca3160b7837e8e76ae45f111a /fs/bcachefs/btree_iter.h
parent4c5289e6323ca9d0d46b3663ace2fb44bb2594b7 (diff)
bcachefs: trans_for_each_path() no longer uses path->idx
path->idx is now a code smell: we should be using path_idx_t, since it's stable across btree path reallocation. This is also a bit faster, using the same loop counter vs. fetching path->idx from each path we iterate over. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.h')
-rw-r--r--fs/bcachefs/btree_iter.h36
1 files changed, 17 insertions, 19 deletions
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index b70aacafac20..a75d0e7d122a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -64,22 +64,6 @@ static inline void btree_trans_sort_paths(struct btree_trans *trans)
}
static inline struct btree_path *
-__trans_next_path(struct btree_trans *trans, unsigned idx)
-{
- idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, idx);
- if (idx == BTREE_ITER_MAX)
- return NULL;
- EBUG_ON(idx > BTREE_ITER_MAX);
- EBUG_ON(trans->paths[idx].idx != idx);
- return &trans->paths[idx];
-}
-
-#define trans_for_each_path(_trans, _path) \
- for (_path = __trans_next_path((_trans), 1); \
- (_path); \
- _path = __trans_next_path((_trans), (_path)->idx + 1))
-
-static inline struct btree_path *
__trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
{
*idx = find_next_bit(trans->paths_allocated, BTREE_ITER_MAX, *idx);
@@ -102,6 +86,19 @@ __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
#define trans_for_each_path_safe(_trans, _path, _idx) \
trans_for_each_path_safe_from(_trans, _path, _idx, 1)
+static inline struct btree_path *
+__trans_next_path(struct btree_trans *trans, unsigned *idx)
+{
+ struct btree_path *path = __trans_next_path_safe(trans, idx);
+ EBUG_ON(path && path->idx != *idx);
+ return path;
+}
+
+#define trans_for_each_path(_trans, _path, _iter) \
+ for (_iter = 1; \
+ (_path = __trans_next_path((_trans), &_iter)); \
+ _iter++)
+
static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
{
unsigned idx = path ? path->sorted_idx + 1 : 0;
@@ -156,10 +153,11 @@ static inline struct btree_path *
__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
unsigned idx)
{
- struct btree_path *path = __trans_next_path(trans, idx);
+ struct btree_path *path = __trans_next_path(trans, &idx);
- while (path && !__path_has_node(path, b))
- path = __trans_next_path(trans, path->idx + 1);
+ while ((path = __trans_next_path(trans, &idx)) &&
+ !__path_has_node(path, b))
+ idx++;
return path;
}