summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-05-31 02:39:06 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-07-30 01:13:55 -0800
commit50cf040486ea853c44f57ee4b700071b0a46c002 (patch)
treeff2d2d92cdbd79d5ea18a64b9186edab3927322a
parentd9ec478c57c165528011840dd8420d4da823dd1a (diff)
bcache: btree_iter_level
-rw-r--r--drivers/md/bcache/btree_gc.c6
-rw-r--r--drivers/md/bcache/btree_io.c2
-rw-r--r--drivers/md/bcache/btree_iter.c156
-rw-r--r--drivers/md/bcache/btree_iter.h68
-rw-r--r--drivers/md/bcache/btree_locking.h4
-rw-r--r--drivers/md/bcache/btree_update.c32
-rw-r--r--drivers/md/bcache/extents.c20
7 files changed, 148 insertions, 140 deletions
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index a7fdec3e65a5..886d3c3acd76 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -451,7 +451,7 @@ static void recalc_packed_keys(struct btree *b)
static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
struct btree_iter *iter)
{
- struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
+ struct btree *parent = iter->l[old_nodes[0]->level + 1].node;
struct cache_set *c = iter->c;
unsigned i, nr_old_nodes, nr_new_nodes, u64s = 0;
unsigned blocks = btree_blocks(c) * 2 / 3;
@@ -625,7 +625,7 @@ next:
BUG_ON(!bch_keylist_empty(&keylist));
- BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->level].node != old_nodes[0]);
BUG_ON(!bch_btree_iter_node_replace(iter, new_nodes[0]));
@@ -716,7 +716,7 @@ static int bch_coalesce_btree(struct cache_set *c, enum btree_id btree_id)
* and the nodes in our sliding window might not have the same
* parent anymore - blow away the sliding window:
*/
- if (iter.nodes[iter.level + 1] &&
+ if (iter.l[iter.level + 1].node &&
!btree_node_intent_locked(&iter, iter.level + 1))
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c
index f90fc417aed1..052364a042cd 100644
--- a/drivers/md/bcache/btree_io.c
+++ b/drivers/md/bcache/btree_io.c
@@ -144,7 +144,7 @@ void bch_btree_init_next(struct cache_set *c, struct btree *b,
{
bool did_sort;
- BUG_ON(iter && iter->nodes[b->level] != b);
+ BUG_ON(iter && iter->l[b->level].node != b);
did_sort = btree_node_compact(c, b, iter);
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index 59483bbe57e9..864ffa158f42 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -13,7 +13,7 @@
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
{
- return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
+ return iter->l[l].node && iter->l[l].node != BTREE_ITER_NOT_END;
}
/* Btree node locking: */
@@ -26,13 +26,13 @@ void btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
- EBUG_ON(iter->nodes[b->level] != b);
- EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
+ EBUG_ON(iter->l[b->level].node != b);
+ EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq);
for_each_linked_btree_node(iter, b, linked)
- linked->lock_seq[b->level] += 2;
+ linked->l[b->level].lock_seq += 2;
- iter->lock_seq[b->level] += 2;
+ iter->l[b->level].lock_seq += 2;
six_unlock_write(&b->lock);
}
@@ -42,14 +42,14 @@ void btree_node_lock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(iter->nodes[b->level] != b);
- EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
+ EBUG_ON(iter->l[b->level].node != b);
+ EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
if (six_trylock_write(&b->lock))
return;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[b->level] == b &&
+ if (linked->l[b->level].node == b &&
btree_node_read_locked(linked, b->level))
readers++;
@@ -90,7 +90,7 @@ void __btree_node_lock_write(struct btree *b, struct btree_iter *iter)
static bool btree_lock_upgrade(struct btree_iter *iter, unsigned level)
{
struct btree_iter *linked;
- struct btree *b = iter->nodes[level];
+ struct btree *b = iter->l[level].node;
if (btree_node_intent_locked(iter, level))
return true;
@@ -100,13 +100,13 @@ static bool btree_lock_upgrade(struct btree_iter *iter, unsigned level)
if (btree_node_locked(iter, level)
? six_trylock_convert(&b->lock, SIX_LOCK_read, SIX_LOCK_intent)
- : six_relock_intent(&b->lock, iter->lock_seq[level]))
+ : six_relock_intent(&b->lock, iter->l[level].lock_seq))
goto success;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
+ if (linked->l[level].node == b &&
btree_node_intent_locked(linked, level) &&
- iter->lock_seq[level] == b->lock.state.seq) {
+ iter->l[level].lock_seq == b->lock.state.seq) {
btree_node_unlock(iter, level);
six_lock_increment(&b->lock, SIX_LOCK_intent);
goto success;
@@ -130,7 +130,7 @@ bool bch_btree_iter_upgrade(struct btree_iter *iter)
for (i = iter->level;
i < min_t(int, iter->locks_want, BTREE_MAX_DEPTH);
i++)
- if (iter->nodes[i] && !btree_lock_upgrade(iter, i)) {
+ if (iter->l[i].node && !btree_lock_upgrade(iter, i)) {
do {
btree_node_unlock(iter, i);
@@ -139,7 +139,7 @@ bool bch_btree_iter_upgrade(struct btree_iter *iter)
* btree_iter_traverse() fails, so that we keep
* going up and get all the intent locks we need
*/
- iter->lock_seq[i]--;
+ iter->l[i].lock_seq--;
} while (--i >= 0);
return false;
@@ -161,7 +161,7 @@ int bch_btree_iter_unlock(struct btree_iter *iter)
bool btree_node_relock(struct btree_iter *iter, unsigned level)
{
struct btree_iter *linked;
- struct btree *b = iter->nodes[level];
+ struct btree *b = iter->l[level].node;
enum six_lock_type type = btree_lock_want(iter, level);
if (btree_node_locked(iter, level))
@@ -171,15 +171,15 @@ bool btree_node_relock(struct btree_iter *iter, unsigned level)
return false;
if (is_btree_node(iter, level) &&
- six_relock_type(&b->lock, iter->lock_seq[level], type)) {
+ six_relock_type(&b->lock, iter->l[level].lock_seq, type)) {
mark_btree_node_locked(iter, level, type);
return true;
}
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b &&
+ if (linked->l[level].node == b &&
btree_node_locked_type(linked, level) == type &&
- iter->lock_seq[level] == b->lock.state.seq) {
+ iter->l[level].lock_seq == b->lock.state.seq) {
six_lock_increment(&b->lock, type);
mark_btree_node_locked(iter, level, type);
return true;
@@ -228,13 +228,13 @@ void bch_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- if (iter->nodes[b->level] == b)
- __bch_btree_iter_verify(&iter->node_iters[b->level],
+ if (iter->l[b->level].node == b)
+ __bch_btree_iter_verify(&iter->l[b->level].node_iter,
b, iter->pos,
iter->is_extents);
for_each_linked_btree_node(iter, b, linked)
- __bch_btree_iter_verify(&linked->node_iters[b->level],
+ __bch_btree_iter_verify(&linked->l[b->level].node_iter,
b, linked->pos,
linked->is_extents);
}
@@ -309,18 +309,18 @@ void bch_btree_node_iter_fix(struct btree_iter *iter,
{
struct btree_iter *linked;
- if (node_iter != &iter->node_iters[b->level])
+ if (node_iter != &iter->l[b->level].node_iter)
__bch_btree_node_iter_fix(iter, &b->keys, node_iter,
k, overwrote);
- if (iter->nodes[b->level] == b)
+ if (iter->l[b->level].node == b)
__bch_btree_node_iter_fix(iter, &b->keys,
- &iter->node_iters[b->level],
+ &iter->l[b->level].node_iter,
k, overwrote);
for_each_linked_btree_node(iter, b, linked)
__bch_btree_node_iter_fix(linked, &b->keys,
- &linked->node_iters[b->level],
+ &linked->l[b->level].node_iter,
k, overwrote);
bch_btree_iter_verify(iter, b);
}
@@ -328,10 +328,10 @@ void bch_btree_node_iter_fix(struct btree_iter *iter,
/* peek_all() doesn't skip deleted keys */
static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
{
- const struct bkey_format *f = &iter->nodes[iter->level]->keys.format;
+ struct btree_iter_level *l = &iter->l[iter->level];
+ const struct bkey_format *f = &l->node->keys.format;
struct bkey_packed *k =
- bch_btree_node_iter_peek_all(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys);
+ bch_btree_node_iter_peek_all(&l->node_iter, &l->node->keys);
struct bkey_s_c ret;
if (!k)
@@ -340,17 +340,17 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
ret = bkey_disassemble(f, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bkey_debugcheck(iter->c, iter->nodes[iter->level], ret);
+ bkey_debugcheck(iter->c, l->node, ret);
return ret;
}
static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
{
- const struct bkey_format *f = &iter->nodes[iter->level]->keys.format;
+ struct btree_iter_level *l = &iter->l[iter->level];
+ const struct bkey_format *f = &l->node->keys.format;
struct bkey_packed *k =
- bch_btree_node_iter_peek(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys);
+ bch_btree_node_iter_peek(&l->node_iter, &l->node->keys);
struct bkey_s_c ret;
if (!k)
@@ -359,27 +359,38 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
ret = bkey_disassemble(f, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bkey_debugcheck(iter->c, iter->nodes[iter->level], ret);
+ bkey_debugcheck(iter->c, l->node, ret);
return ret;
}
static inline void __btree_iter_advance(struct btree_iter *iter)
{
- bch_btree_node_iter_advance(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys);
+ struct btree_iter_level *l = &iter->l[iter->level];
+
+ bch_btree_node_iter_advance(&l->node_iter, &l->node->keys);
+}
+
+static inline void btree_iter_node_iter_init(struct btree_iter *iter,
+ struct btree_iter_level *l,
+ struct bpos pos)
+{
+ bch_btree_node_iter_init(&l->node_iter, &l->node->keys,
+ pos, iter->is_extents);
}
static inline void btree_iter_node_set(struct btree_iter *iter,
struct btree *b,
struct bpos pos)
{
+ struct btree_iter_level *l = &iter->l[b->level];
+
BUG_ON(b->lock.state.seq & 1);
- iter->lock_seq[b->level] = b->lock.state.seq;
- iter->nodes[b->level] = b;
- bch_btree_node_iter_init(&iter->node_iters[b->level], &b->keys,
- pos, iter->is_extents);
+ l->lock_seq = b->lock.state.seq;
+ l->node = b;
+
+ btree_iter_node_iter_init(iter, l, pos);
}
static bool btree_iter_pos_in_node(struct btree_iter *iter, struct btree *b)
@@ -441,9 +452,9 @@ void bch_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
unsigned level = b->level;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[level] == b) {
+ if (linked->l[level].node == b) {
btree_node_unlock(linked, level);
- linked->nodes[level] = BTREE_ITER_NOT_END;
+ linked->l[level].node = BTREE_ITER_NOT_END;
}
}
@@ -451,10 +462,10 @@ void bch_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
unsigned level = b->level;
- if (iter->nodes[level] == b) {
+ if (iter->l[level].node == b) {
BUG_ON(b->lock.state.intent_lock != 1);
btree_node_unlock(iter, level);
- iter->nodes[level] = BTREE_ITER_NOT_END;
+ iter->l[level].node = BTREE_ITER_NOT_END;
}
}
@@ -467,13 +478,13 @@ void bch_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked;
for_each_linked_btree_node(iter, b, linked)
- bch_btree_node_iter_init(&linked->node_iters[b->level],
- &linked->nodes[b->level]->keys,
- linked->pos, linked->is_extents);
+ btree_iter_node_iter_init(linked,
+ &linked->l[b->level],
+ linked->pos);
- bch_btree_node_iter_init(&iter->node_iters[b->level],
- &iter->nodes[b->level]->keys,
- iter->pos, iter->is_extents);
+ btree_iter_node_iter_init(iter,
+ &iter->l[b->level],
+ iter->pos);
}
static void btree_iter_verify_locking(struct btree_iter *iter, unsigned level)
@@ -524,7 +535,7 @@ static inline void btree_iter_lock_root(struct btree_iter *iter, struct bpos pos
(b != c->btree_roots[iter->btree_id].b))) {
iter->level = level;
if (level + 1 < BTREE_MAX_DEPTH)
- iter->nodes[level + 1] = NULL;
+ iter->l[level + 1].node = NULL;
btree_iter_node_set(iter, b, pos);
break;
}
@@ -568,7 +579,7 @@ static void btree_iter_up(struct btree_iter *iter)
static int __must_check __bch_btree_iter_traverse(struct btree_iter *iter,
unsigned l, struct bpos pos)
{
- if (!iter->nodes[iter->level])
+ if (!iter->l[iter->level].node)
return 0;
iter->at_end_of_leaf = false;
@@ -577,11 +588,11 @@ retry:
* If the current node isn't locked, go up until we have a locked node
* or run out of nodes:
*/
- while (iter->nodes[iter->level] &&
+ while (iter->l[iter->level].node &&
!(is_btree_node(iter, iter->level) &&
btree_node_relock(iter, iter->level) &&
btree_iter_pos_cmp(pos,
- &iter->nodes[iter->level]->key.k,
+ &iter->l[iter->level].node->key.k,
iter->is_extents)))
btree_iter_up(iter);
@@ -590,7 +601,7 @@ retry:
* root) - advance its node iterator if necessary:
*/
if (iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level]) {
+ iter->l[iter->level].node) {
struct bkey_s_c k;
while ((k = __btree_iter_peek_all(iter)).k &&
@@ -606,7 +617,7 @@ retry:
*/
while (iter->level > l)
if (iter->level < BTREE_MAX_DEPTH &&
- iter->nodes[iter->level]) {
+ iter->l[iter->level].node) {
struct closure cl;
int ret;
@@ -655,7 +666,7 @@ struct btree *bch_btree_iter_peek_node(struct btree_iter *iter)
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].node;
EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
iter->pos = b->key.k.p;
@@ -673,7 +684,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter)
btree_iter_up(iter);
if (iter->level >= BTREE_MAX_DEPTH ||
- !iter->nodes[iter->level])
+ !iter->l[iter->level].node)
return NULL;
/* parent node usually won't be locked: redo traversal if necessary */
@@ -681,7 +692,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter)
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].node;
if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
struct bpos pos = bkey_successor(iter->pos);
@@ -690,7 +701,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter)
if (ret)
return NULL;
- b = iter->nodes[iter->level];
+ b = iter->l[iter->level].node;
}
iter->pos = b->key.k.p;
@@ -738,14 +749,14 @@ void bch_btree_iter_advance_pos(struct btree_iter *iter)
/* XXX: expensive */
void bch_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
{
+ struct btree_iter_level *l = &iter->l[iter->level];
+
/* incapable of rewinding across nodes: */
- BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
+ BUG_ON(bkey_cmp(pos, l->node->data->min_key) < 0);
iter->pos = pos;
- bch_btree_node_iter_init(&iter->node_iters[iter->level],
- &iter->nodes[iter->level]->keys,
- pos, iter->is_extents);
+ btree_iter_node_iter_init(iter, l, pos);
}
struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
@@ -771,7 +782,7 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
return k;
}
- pos = iter->nodes[0]->key.k.p;
+ pos = iter->l[0].node->key.k.p;
if (!bkey_cmp(pos, POS_MAX))
return (struct bkey_s_c) { NULL, NULL };
@@ -807,7 +818,7 @@ recheck:
}
if (!k.k)
- k.k = &iter->nodes[0]->key.k;
+ k.k = &iter->l[0].node->key.k;
bch_key_resize(&n,
min_t(u64, KEY_SIZE_MAX,
@@ -850,8 +861,8 @@ void __bch_btree_iter_init(struct btree_iter *iter, struct cache_set *c,
iter->error = 0;
iter->c = c;
iter->pos = pos;
- iter->nodes[iter->level] = BTREE_ITER_NOT_END;
- iter->nodes[iter->level + 1] = NULL;
+ iter->l[iter->level].node = BTREE_ITER_NOT_END;
+ iter->l[iter->level + 1].node = NULL;
iter->next = iter;
}
@@ -886,16 +897,7 @@ void bch_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
dst->is_extents = src->is_extents;
dst->btree_id = src->btree_id;
dst->pos = src->pos;
-
- memcpy(dst->lock_seq,
- src->lock_seq,
- sizeof(src->lock_seq));
- memcpy(dst->nodes,
- src->nodes,
- sizeof(src->nodes));
- memcpy(dst->node_iters,
- src->node_iters,
- sizeof(src->node_iters));
+ memcpy(dst->l, src->l, sizeof(src->l));
bch_btree_iter_upgrade(dst);
}
diff --git a/drivers/md/bcache/btree_iter.h b/drivers/md/bcache/btree_iter.h
index c06e692b895e..0a13b7df9b12 100644
--- a/drivers/md/bcache/btree_iter.h
+++ b/drivers/md/bcache/btree_iter.h
@@ -3,22 +3,33 @@
#include "btree_types.h"
+struct btree_iter_level {
+ struct btree *node;
+ struct btree_node_iter node_iter;
+ u32 lock_seq;
+};
+
struct btree_iter {
- /* Current btree depth */
- u8 level;
+ struct cache_set *c;
/*
- * Used in bch_btree_iter_traverse(), to indicate whether we're
- * searching for @pos or the first key strictly greater than @pos
+ * Circular linked list of linked iterators: linked iterators share
+ * locks (e.g. two linked iterators may have the same node intent
+ * locked, or read and write locked, at the same time), and insertions
+ * through one iterator won't invalidate the other linked iterators.
*/
- u8 is_extents;
+ struct btree_iter *next;
- /* Bitmasks for read/intent locks held per level */
- u8 nodes_locked;
- u8 nodes_intent_locked;
+ /*
+ * Current unpacked key - so that bch_btree_iter_next()/
+ * bch_btree_iter_next_with_holes() can correctly advance pos.
+ */
+ struct bkey k;
- /* Btree level below which we start taking intent locks */
- u8 locks_want;
+ /* Current position of the iterator */
+ struct bpos pos;
+
+ s8 error;
enum btree_id btree_id:8;
@@ -29,13 +40,21 @@ struct btree_iter {
u8 at_end_of_leaf;
s8 error;
+ /*
+ * Used in bch_btree_iter_traverse(), to indicate whether we're
+ * searching for @pos or the first key strictly greater than @pos
+ */
+ u8 is_extents;
- struct cache_set *c;
+ /* Btree level below which we start taking intent locks */
+ u8 locks_want;
- /* Current position of the iterator */
- struct bpos pos;
+ /* Current btree depth */
+ u8 level;
- u32 lock_seq[BTREE_MAX_DEPTH];
+ /* Bitmasks for read/intent locks held per level */
+ u8 nodes_locked;
+ u8 nodes_intent_locked;
/*
* NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
@@ -49,22 +68,7 @@ struct btree_iter {
* node, which increments the node's lock seq, that's not actually
* necessary in that example).
*/
- struct btree *nodes[BTREE_MAX_DEPTH];
- struct btree_node_iter node_iters[BTREE_MAX_DEPTH];
-
- /*
- * Current unpacked key - so that bch_btree_iter_next()/
- * bch_btree_iter_next_with_holes() can correctly advance pos.
- */
- struct bkey k;
-
- /*
- * Circular linked list of linked iterators: linked iterators share
- * locks (e.g. two linked iterators may have the same node intent
- * locked, or read and write locked, at the same time), and insertions
- * through one iterator won't invalidate the other linked iterators.
- */
- struct btree_iter *next;
+ struct btree_iter_level l[BTREE_MAX_DEPTH];
};
/**
@@ -93,8 +97,8 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* sequence number is incremented by taking and releasing write
* locks and is even when unlocked:
*/
- } while (linked->nodes[b->level] != b ||
- linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
+ } while (linked->l[b->level].node != b ||
+ linked->l[b->level].lock_seq >> 1 != b->lock.state.seq >> 1);
return linked;
}
diff --git a/drivers/md/bcache/btree_locking.h b/drivers/md/bcache/btree_locking.h
index fb6ce606eea4..f6789bb4a20b 100644
--- a/drivers/md/bcache/btree_locking.h
+++ b/drivers/md/bcache/btree_locking.h
@@ -104,7 +104,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level,
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
- __btree_node_unlock(iter, level, iter->nodes[level]);
+ __btree_node_unlock(iter, level, iter->l[level].node);
}
static inline void btree_node_lock_type(struct btree *b, struct btree_iter *iter,
@@ -116,7 +116,7 @@ static inline void btree_node_lock_type(struct btree *b, struct btree_iter *iter
return;
for_each_linked_btree_iter(iter, linked)
- if (linked->nodes[b->level] == b &&
+ if (linked->l[b->level].node == b &&
btree_node_locked_type(linked, b->level) == type) {
six_lock_increment(&b->lock, type);
return;
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index b816c712cd10..d6a694667d70 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -723,7 +723,8 @@ void bch_btree_journal_key(struct btree_iter *iter,
struct journal_res *res)
{
struct cache_set *c = iter->c;
- struct btree *b = iter->nodes[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->node;
EBUG_ON(iter->level || b->level);
@@ -820,7 +821,7 @@ struct async_split *__bch_async_split_alloc(struct btree *nodes[],
*
* So far this is the only place where we have this issue:
*/
- if (iter->nodes[nodes[i]->level] == nodes[i])
+ if (iter->l[nodes[i]->level].node == nodes[i])
btree_node_lock_write(nodes[i], iter);
else
six_lock_write(&nodes[i]->lock);
@@ -853,7 +854,7 @@ struct async_split *__bch_async_split_alloc(struct btree *nodes[],
journal_pin_add(&c->journal, pin_list, &as->journal, NULL);
for (i = 0; i < nr_nodes; i++) {
- if (iter->nodes[nodes[i]->level] == nodes[i])
+ if (iter->l[nodes[i]->level].node == nodes[i])
btree_node_unlock_write(nodes[i], iter);
else
six_unlock_write(&nodes[i]->lock);
@@ -1118,7 +1119,7 @@ bch_btree_insert_keys_interior(struct btree *b,
}
/* Don't screw up @iter's position: */
- node_iter = iter->node_iters[b->level];
+ node_iter = iter->l[b->level].node_iter;
/*
* btree_split(), btree_gc_coalesce() will insert keys before
@@ -1267,7 +1268,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
struct async_split *as)
{
struct cache_set *c = iter->c;
- struct btree *parent = iter->nodes[b->level + 1];
+ struct btree *parent = iter->l[b->level + 1].node;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
unsigned u64s_to_insert = b->level
@@ -1445,7 +1446,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags,
{
struct btree_iter *linked;
struct cache_set *c = iter->c;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].node;
struct btree_reserve *reserve;
struct async_split *as;
int ret = 0;
@@ -1497,7 +1498,7 @@ out_get_locks:
for (i = 0; i < BTREE_MAX_DEPTH; i++) {
btree_node_unlock(linked, i);
- linked->lock_seq[i]--;
+ linked->l[i].lock_seq--;
}
linked->locks_want = U8_MAX;
}
@@ -1515,8 +1516,7 @@ btree_insert_key(struct btree_insert_trans *trans,
struct journal_res *res,
unsigned flags)
{
- struct btree_iter *iter = insert->iter;
- struct btree *b = iter->nodes[0];
+ struct btree *b = insert->iter->l[0].node;
s64 oldsize = bch_count_data(&b->keys);
enum btree_insert_ret ret;
@@ -1539,7 +1539,7 @@ static bool same_leaf_as_prev(struct btree_insert_trans *trans,
* point to the same leaf node they'll always be adjacent now:
*/
return i != trans->entries &&
- i[0].iter->nodes[0] == i[-1].iter->nodes[0];
+ i[0].iter->l[0].node == i[-1].iter->l[0].node;
}
#define trans_for_each_entry(trans, i) \
@@ -1551,7 +1551,7 @@ static void multi_lock_write(struct btree_insert_trans *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- btree_node_lock_for_insert(i->iter->nodes[0], i->iter);
+ btree_node_lock_for_insert(i->iter->l[0].node, i->iter);
}
static void multi_unlock_write(struct btree_insert_trans *trans)
@@ -1560,7 +1560,7 @@ static void multi_unlock_write(struct btree_insert_trans *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- btree_node_unlock_write(i->iter->nodes[0], i->iter);
+ btree_node_unlock_write(i->iter->l[0].node, i->iter);
}
static int btree_trans_entry_cmp(const void *_l, const void *_r)
@@ -1631,7 +1631,7 @@ retry:
if (!i->done) {
u64s += i->k->k.u64s;
if (!bch_btree_node_insert_fits(c,
- i->iter->nodes[0], u64s))
+ i->iter->l[0].node, u64s))
goto unlock_split;
}
}
@@ -1677,7 +1677,7 @@ retry:
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- bch_btree_node_write_lazy(i->iter->nodes[0], i->iter);
+ bch_btree_node_write_lazy(i->iter->l[0].node, i->iter);
out:
percpu_ref_put(&c->writes);
return ret;
@@ -1955,7 +1955,7 @@ int bch_btree_delete_range(struct cache_set *c, enum btree_id id,
delete.k.p = iter.pos;
delete.k.version = version;
- if (iter.nodes[0]->keys.ops->is_extents) {
+ if (iter.is_extents) {
/*
* The extents btree is special - KEY_TYPE_DISCARD is
* used for deletions, not KEY_TYPE_DELETED. This is an
@@ -1991,7 +1991,7 @@ int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
struct closure *cl)
{
struct cache_set *c = iter->c;
- struct btree *n, *parent = iter->nodes[b->level + 1];
+ struct btree *n, *parent = iter->l[b->level + 1].node;
struct btree_reserve *reserve;
struct async_split *as;
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 0693de44874a..af6159cad808 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -113,13 +113,14 @@ bch_insert_fixup_key(struct btree_insert_trans *trans,
struct journal_res *res)
{
struct btree_iter *iter = insert->iter;
+ struct btree_iter_level *l = &iter->l[0];
BUG_ON(iter->level);
bch_btree_journal_key(iter, insert->k, res);
bch_btree_bset_insert_key(iter,
- iter->nodes[0],
- &iter->node_iters[0],
+ l->node,
+ &l->node_iter,
insert->k);
trans->did_work = true;
@@ -819,7 +820,7 @@ static void bch_add_sectors(struct btree_iter *iter, struct bkey_s_c k,
struct bucket_stats_cache_set *stats)
{
struct cache_set *c = iter->c;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].node;
EBUG_ON(iter->level);
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
@@ -998,7 +999,7 @@ static enum btree_insert_ret extent_insert_should_stop(struct btree_insert_trans
u64 start_time,
unsigned nr_done)
{
- struct btree *b = insert->iter->nodes[0];
+ struct btree *b = insert->iter->l[0].node;
/*
* Check if we have sufficient space in both the btree node and the
* journal reservation:
@@ -1031,7 +1032,7 @@ static void extent_do_insert(struct btree_iter *iter,
struct journal_res *res, unsigned flags,
struct bucket_stats_cache_set *stats)
{
- struct btree_keys *b = &iter->nodes[0]->keys;
+ struct btree_keys *b = &iter->l[0].node->keys;
struct bset_tree *t = bset_tree_last(b);
struct bset *i = t->data;
struct bkey_packed *prev, *where =
@@ -1053,7 +1054,7 @@ static void extent_do_insert(struct btree_iter *iter,
bch_extent_merge_inline(iter, bkey_to_packed(insert), where, false))
return;
- bch_btree_bset_insert(iter, iter->nodes[0], insert_iter, insert);
+ bch_btree_bset_insert(iter, iter->l[0].node, insert_iter, insert);
}
static void extent_insert_committed(struct btree_insert_trans *trans,
@@ -1139,7 +1140,7 @@ extent_insert_advance_pos(struct btree_insert_trans *trans,
struct journal_res *res, unsigned flags,
struct bucket_stats_cache_set *stats)
{
- struct btree *b = insert->iter->nodes[0];
+ struct btree *b = insert->iter->l[0].node;
struct bpos next_pos =
bpos_min(insert->k->k.p, k.k ? k.k->p : b->key.k.p);
@@ -1227,8 +1228,9 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
{
struct btree_iter *iter = insert->iter;
struct cache_set *c = iter->c;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree *b = l->node;
+ struct btree_node_iter *node_iter = &l->node_iter;
struct btree_node_iter node_insert_iter = *node_iter;
const struct bkey_format *f = &b->keys.format;
struct bkey_packed *_k;