summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_iter.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-09-09 19:06:29 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2021-09-09 19:10:07 -0400
commit2b8c1bb0910534e8687ea3e5abf6d8bbba758247 (patch)
tree4e63e051d884896b1fb49d50a9f0c71fb143b7c8 /libbcachefs/btree_iter.h
parent6c42566c6204bb5dcd6af3b97257e548b9d2db67 (diff)
Update bcachefs sources to 3f3f969859 bcachefs: Fix some compiler warnings
Diffstat (limited to 'libbcachefs/btree_iter.h')
-rw-r--r--libbcachefs/btree_iter.h268
1 files changed, 119 insertions, 149 deletions
diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h
index 39124e68..be1bb489 100644
--- a/libbcachefs/btree_iter.h
+++ b/libbcachefs/btree_iter.h
@@ -5,40 +5,49 @@
#include "bset.h"
#include "btree_types.h"
-static inline void btree_iter_set_dirty(struct btree_iter *iter,
- enum btree_iter_uptodate u)
+static inline void __btree_path_get(struct btree_path *path, bool intent)
{
- iter->uptodate = max_t(unsigned, iter->uptodate, u);
+ path->ref++;
+ path->intent_ref += intent;
}
-static inline struct btree *btree_iter_node(struct btree_iter *iter,
+static inline bool __btree_path_put(struct btree_path *path, bool intent)
+{
+ EBUG_ON(!path->ref);
+ EBUG_ON(!path->intent_ref && intent);
+ path->intent_ref -= intent;
+ return --path->ref == 0;
+}
+
+static inline void btree_path_set_dirty(struct btree_path *path,
+ enum btree_path_uptodate u)
+{
+ path->uptodate = max_t(unsigned, path->uptodate, u);
+}
+
+static inline struct btree *btree_path_node(struct btree_path *path,
unsigned level)
{
- return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
+ return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
}
-static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
+static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
const struct btree *b, unsigned level)
{
/*
* We don't compare the low bits of the lock sequence numbers because
- * @iter might have taken a write lock on @b, and we don't want to skip
- * the linked iterator if the sequence numbers were equal before taking
- * that write lock. The lock sequence number is incremented by taking
- * and releasing write locks and is even when unlocked:
+ * @path might have taken a write lock on @b, and we don't want to skip
+ * the linked path if the sequence numbers were equal before taking that
+ * write lock. The lock sequence number is incremented by taking and
+ * releasing write locks and is even when unlocked:
*/
- return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
+ return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
-static inline struct btree *btree_node_parent(struct btree_iter *iter,
+static inline struct btree *btree_node_parent(struct btree_path *path,
struct btree *b)
{
- return btree_iter_node(iter, b->c.level + 1);
-}
-
-static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
-{
- return hweight64(trans->iters_linked) > 1;
+ return btree_path_node(path, b->c.level + 1);
}
static inline int btree_iter_err(const struct btree_iter *iter)
@@ -46,97 +55,105 @@ static inline int btree_iter_err(const struct btree_iter *iter)
return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
}
-/* Iterate over iters within a transaction: */
+/* Iterate over paths within a transaction: */
-static inline struct btree_iter *
-__trans_next_iter(struct btree_trans *trans, unsigned idx)
+static inline struct btree_path *
+__trans_next_path(struct btree_trans *trans, unsigned idx)
{
u64 l;
if (idx == BTREE_ITER_MAX)
return NULL;
- l = trans->iters_linked >> idx;
+ l = trans->paths_allocated >> idx;
if (!l)
return NULL;
idx += __ffs64(l);
EBUG_ON(idx >= BTREE_ITER_MAX);
- EBUG_ON(trans->iters[idx].idx != idx);
- return &trans->iters[idx];
+ EBUG_ON(trans->paths[idx].idx != idx);
+ return &trans->paths[idx];
}
-#define trans_for_each_iter(_trans, _iter) \
- for (_iter = __trans_next_iter((_trans), 0); \
- (_iter); \
- _iter = __trans_next_iter((_trans), (_iter)->idx + 1))
+#define trans_for_each_path(_trans, _path) \
+ for (_path = __trans_next_path((_trans), 0); \
+ (_path); \
+ _path = __trans_next_path((_trans), (_path)->idx + 1))
-static inline struct btree_iter *next_btree_iter(struct btree_trans *trans, struct btree_iter *iter)
+static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
{
- unsigned idx = iter ? iter->sorted_idx + 1 : 0;
+ unsigned idx = path ? path->sorted_idx + 1 : 0;
EBUG_ON(idx > trans->nr_sorted);
return idx < trans->nr_sorted
- ? trans->iters + trans->sorted[idx]
+ ? trans->paths + trans->sorted[idx]
: NULL;
}
-static inline struct btree_iter *prev_btree_iter(struct btree_trans *trans, struct btree_iter *iter)
+static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
{
- EBUG_ON(iter->sorted_idx >= trans->nr_sorted);
- return iter->sorted_idx
- ? trans->iters + trans->sorted[iter->sorted_idx - 1]
+ EBUG_ON(path->sorted_idx >= trans->nr_sorted);
+ return path->sorted_idx
+ ? trans->paths + trans->sorted[path->sorted_idx - 1]
: NULL;
}
-#define trans_for_each_iter_inorder(_trans, _iter) \
- for (_iter = next_btree_iter(trans, NULL); \
- (_iter); \
- _iter = next_btree_iter((_trans), (_iter)))
+#define trans_for_each_path_inorder(_trans, _path, _i) \
+ for (_i = 0; \
+ ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
+ _i++)
-static inline bool __iter_has_node(const struct btree_iter *iter,
+static inline bool __path_has_node(const struct btree_path *path,
const struct btree *b)
{
- return iter->l[b->c.level].b == b &&
- btree_node_lock_seq_matches(iter, b, b->c.level);
+ return path->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(path, b, b->c.level);
}
-static inline struct btree_iter *
-__trans_next_iter_with_node(struct btree_trans *trans, struct btree *b,
+static inline struct btree_path *
+__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
unsigned idx)
{
- struct btree_iter *iter = __trans_next_iter(trans, idx);
+ struct btree_path *path = __trans_next_path(trans, idx);
- while (iter && !__iter_has_node(iter, b))
- iter = __trans_next_iter(trans, iter->idx + 1);
+ while (path && !__path_has_node(path, b))
+ path = __trans_next_path(trans, path->idx + 1);
- return iter;
+ return path;
}
-#define trans_for_each_iter_with_node(_trans, _b, _iter) \
- for (_iter = __trans_next_iter_with_node((_trans), (_b), 0); \
- (_iter); \
- _iter = __trans_next_iter_with_node((_trans), (_b), \
- (_iter)->idx + 1))
+#define trans_for_each_path_with_node(_trans, _b, _path) \
+ for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
+ (_path); \
+ _path = __trans_next_path_with_node((_trans), (_b), \
+ (_path)->idx + 1))
+
+struct btree_path * __must_check
+bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *, bool);
+int __must_check bch2_btree_path_traverse(struct btree_trans *,
+ struct btree_path *, unsigned);
+struct btree_path *bch2_path_get(struct btree_trans *, bool, enum btree_id,
+ struct bpos, unsigned, unsigned, bool);
+inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
#ifdef CONFIG_BCACHEFS_DEBUG
-void bch2_btree_trans_verify_iters(struct btree_trans *, struct btree *);
-void bch2_btree_trans_verify_locks(struct btree_trans *);
+void bch2_trans_verify_paths(struct btree_trans *);
+void bch2_trans_verify_locks(struct btree_trans *);
#else
-static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
- struct btree *b) {}
-static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
+static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
+static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
#endif
-void bch2_btree_iter_fix_key_modified(struct btree_iter *, struct btree *,
- struct bkey_packed *);
-void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
- struct btree_node_iter *, struct bkey_packed *,
- unsigned, unsigned);
+void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
+ struct btree *, struct bkey_packed *);
+void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
+ struct btree *, struct btree_node_iter *,
+ struct bkey_packed *, unsigned, unsigned);
+
+bool bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
-bool bch2_btree_iter_relock_intent(struct btree_iter *);
-bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
+void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
@@ -149,35 +166,36 @@ static inline int btree_trans_restart(struct btree_trans *trans)
return -EINTR;
}
-bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
+bool __bch2_btree_path_upgrade(struct btree_trans *,
+ struct btree_path *, unsigned);
-static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
+static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
+ struct btree_path *path,
unsigned new_locks_want)
{
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
- return iter->locks_want < new_locks_want
- ? __bch2_btree_iter_upgrade(iter, new_locks_want)
- : iter->uptodate <= BTREE_ITER_NEED_PEEK;
+ return path->locks_want < new_locks_want
+ ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
+ : path->uptodate == BTREE_ITER_UPTODATE;
}
-void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
+void __bch2_btree_path_downgrade(struct btree_path *, unsigned);
-static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
+static inline void bch2_btree_path_downgrade(struct btree_path *path)
{
- unsigned new_locks_want = iter->level + !!(iter->flags & BTREE_ITER_INTENT);
+ unsigned new_locks_want = path->level + !!path->intent_ref;
- if (iter->locks_want > new_locks_want)
- __bch2_btree_iter_downgrade(iter, new_locks_want);
+ if (path->locks_want > new_locks_want)
+ __bch2_btree_path_downgrade(path, new_locks_want);
}
void bch2_trans_downgrade(struct btree_trans *);
-void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
-void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
-
-void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
+void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
+void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
@@ -206,7 +224,8 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
iter->k.p.offset = iter->pos.offset = new_pos.offset;
iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
iter->k.size = 0;
- iter->should_be_locked = false;
+ if (iter->path->ref == 1)
+ iter->path->should_be_locked = false;
}
static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
@@ -215,16 +234,6 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
-static inline struct btree_iter *idx_to_btree_iter(struct btree_trans *trans, unsigned idx)
-{
- return idx != U8_MAX ? trans->iters + idx : NULL;
-}
-
-static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
-{
- return idx_to_btree_iter(iter->trans, iter->child_idx);
-}
-
/*
* Unlocks before scheduling
* Note: does not revalidate iterator
@@ -242,11 +251,11 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans)
#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
_locks_want, _depth, _flags, _b) \
- for (iter = bch2_trans_get_node_iter((_trans), (_btree_id), \
+ for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
_start, _locks_want, _depth, _flags), \
- _b = bch2_btree_iter_peek_node(_iter); \
+ _b = bch2_btree_iter_peek_node(&(_iter)); \
(_b); \
- (_b) = bch2_btree_iter_next_node(_iter))
+ (_b) = bch2_btree_iter_next_node(&(_iter)))
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags, _b) \
@@ -276,75 +285,36 @@ static inline int bkey_err(struct bkey_s_c k)
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
- for ((_iter) = bch2_trans_get_iter((_trans), (_btree_id), \
- (_start), (_flags)), \
- (_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)), \
+ (_k) = __bch2_btree_iter_peek(&(_iter), _flags); \
!((_ret) = bkey_err(_k)) && (_k).k; \
- (_k) = __bch2_btree_iter_next(_iter, _flags))
+ (_k) = __bch2_btree_iter_next(&(_iter), _flags))
#define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
- for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
+ for ((_k) = __bch2_btree_iter_peek(&(_iter), _flags); \
!((_ret) = bkey_err(_k)) && (_k).k; \
- (_k) = __bch2_btree_iter_next(_iter, _flags))
+ (_k) = __bch2_btree_iter_next(&(_iter), _flags))
/* new multiple iterator interface: */
-int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
-int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
+void bch2_dump_trans_paths_updates(struct btree_trans *);
-void bch2_trans_unlink_iters(struct btree_trans *);
+void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
+void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
+ unsigned, struct bpos, unsigned);
+void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
+ enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned);
+void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
-struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
- struct bpos, unsigned,
- unsigned, unsigned);
-
-static inline struct btree_iter *
-bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
- struct bpos pos, unsigned flags)
-{
- struct btree_iter *iter =
- __bch2_trans_get_iter(trans, btree_id, pos,
- (flags & BTREE_ITER_INTENT) != 0, 0,
- flags);
- iter->ip_allocated = _THIS_IP_;
- return iter;
-}
-
-struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
- struct btree_iter *);
-static inline struct btree_iter *
-bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
-{
- struct btree_iter *iter =
- __bch2_trans_copy_iter(trans, src);
-
- iter->ip_allocated = _THIS_IP_;
- return iter;
-}
-
-struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
- enum btree_id, struct bpos,
- unsigned, unsigned, unsigned);
-
-static inline bool btree_iter_live(struct btree_trans *trans, struct btree_iter *iter)
+static inline void set_btree_iter_dontneed(struct btree_iter *iter)
{
- return (trans->iters_live & (1ULL << iter->idx)) != 0;
+ iter->path->preserve = false;
}
-static inline bool btree_iter_keep(struct btree_trans *trans, struct btree_iter *iter)
-{
- return btree_iter_live(trans, iter) ||
- (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
-}
-
-static inline void set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
-{
- trans->iters_touched &= ~(1ULL << iter->idx);
-}
-
-void bch2_trans_begin(struct btree_trans *);
-
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
+void bch2_trans_begin(struct btree_trans *);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);