summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/btree_iter.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/btree_iter.h')
-rw-r--r--drivers/md/bcache/btree_iter.h67
1 files changed, 48 insertions, 19 deletions
diff --git a/drivers/md/bcache/btree_iter.h b/drivers/md/bcache/btree_iter.h
index 96356558812f..7f8aa4710ffe 100644
--- a/drivers/md/bcache/btree_iter.h
+++ b/drivers/md/bcache/btree_iter.h
@@ -9,6 +9,27 @@ struct btree_iter_level {
u32 lock_seq;
};
+struct btree_iter_state {
+ /* Bitmasks for read/intent locks held per level */
+ u8 nodes_locked;
+ u8 nodes_intent_locked;
+
+ /*
+ * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
+ *
+ * This is because iter->nodes[iter->level] == NULL is how
+ * btree_iter_next_node() knows that it's finished with a depth first
+ * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
+ * and if you really don't want that node used again (e.g. btree_split()
+ * freed it) decrementing lock_seq will cause bch_btree_node_relock() to
+ * always fail (but since freeing a btree node takes a write lock on the
+ * node, which increments the node's lock seq, that's not actually
+ * necessary in that example).
+ */
+
+ struct btree_iter_level l[BTREE_MAX_DEPTH];
+};
+
struct btree_iter {
struct cache_set *c;
@@ -52,25 +73,33 @@ struct btree_iter {
/* Current btree depth */
u8 level;
- /* Bitmasks for read/intent locks held per level */
- u8 nodes_locked;
- u8 nodes_intent_locked;
+ unsigned s_idx:1,
+ have_alternate;
- /*
- * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
- *
- * This is because iter->nodes[iter->level] == NULL is how
- * btree_iter_next_node() knows that it's finished with a depth first
- * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
- * and if you really don't want that node used again (e.g. btree_split()
- * freed it) decrementing lock_seq will cause btree_node_relock() to
- * always fail (but since freeing a btree node takes a write lock on the
- * node, which increments the node's lock seq, that's not actually
- * necessary in that example).
- */
- struct btree_iter_level l[BTREE_MAX_DEPTH];
+ struct btree_iter_state s[2];
};
+static inline struct btree_iter_state *iter_s(struct btree_iter *iter)
+{
+ return &iter->s[iter->s_idx];
+}
+
+static inline struct btree_iter_state *iter_a(struct btree_iter *iter)
+{
+ return &iter->s[iter->s_idx ^ 1];
+}
+
+static inline struct btree *btree_iter_leaf(struct btree_iter *iter)
+{
+ return iter_s(iter)->l[0].node;
+}
+
+static inline bool btree_iter_has_node(struct btree_iter_state *_iter,
+ struct btree *b)
+{
+ return _iter->l[b->level].node == b;
+}
+
/**
* for_each_linked_btree_iter - iterate over all iterators linked with @_iter
*/
@@ -97,8 +126,8 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* sequence number is incremented by taking and releasing write
* locks and is even when unlocked:
*/
- } while (linked->l[b->level].node != b ||
- linked->l[b->level].lock_seq >> 1 != b->lock.state.seq >> 1);
+ } while (!btree_iter_has_node(iter_s(linked), b) ||
+ iter_s(linked)->l[b->level].lock_seq >> 1 != b->lock.state.seq >> 1);
return linked;
}
@@ -110,7 +139,7 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* @_b is assumed to be locked by @_iter
*
* Filters out iterators that don't have a valid btree_node iterator for @_b -
- * i.e. iterators for which btree_node_relock() would not succeed.
+ * i.e. iterators for which bch_btree_node_relock() would not succeed.
*/
#define for_each_linked_btree_node(_iter, _b, _linked) \
for ((_linked) = (_iter); \