summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/btree_update.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/btree_update.c')
-rw-r--r--drivers/md/bcache/btree_update.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index 2d15e4d4c994..b37ac35fe015 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -191,7 +191,7 @@ static void __btree_node_free(struct cache_set *c, struct btree *b,
/*
* By using six_unlock_write() directly instead of
* btree_node_unlock_write(), we don't update the iterator's sequence
- * numbers and cause future btree_node_relock() calls to fail:
+ * numbers and cause future bch_btree_node_relock() calls to fail:
*/
six_unlock_write(&b->lock);
}
@@ -723,7 +723,7 @@ void bch_btree_journal_key(struct btree_iter *iter,
struct journal_res *res)
{
struct cache_set *c = iter->c;
- struct btree_iter_level *l = &iter->l[0];
+ struct btree_iter_level *l = &iter_s(iter)->l[0];
struct btree *b = l->node;
EBUG_ON(iter->level || b->level);
@@ -821,7 +821,7 @@ struct async_split *__bch_async_split_alloc(struct btree *nodes[],
*
* So far this is the only place where we have this issue:
*/
- if (iter->l[nodes[i]->level].node == nodes[i])
+ if (btree_iter_has_node(iter_s(iter), nodes[i]))
btree_node_lock_write(nodes[i], iter);
else
six_lock_write(&nodes[i]->lock);
@@ -854,7 +854,7 @@ struct async_split *__bch_async_split_alloc(struct btree *nodes[],
journal_pin_add(&c->journal, pin_list, &as->journal, NULL);
for (i = 0; i < nr_nodes; i++) {
- if (iter->l[nodes[i]->level].node == nodes[i])
+ if (btree_iter_has_node(iter_s(iter), nodes[i]))
btree_node_unlock_write(nodes[i], iter);
else
six_unlock_write(&nodes[i]->lock);
@@ -1105,7 +1105,7 @@ bch_btree_insert_keys_interior(struct btree *b,
struct bkey_i *insert = bch_keylist_front(insert_keys);
struct bkey_packed *k;
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(b)->level));
+ BUG_ON(!btree_node_intent_locked(iter_s(iter), btree_node_root(b)->level));
BUG_ON(!b->level);
BUG_ON(!as || as->b);
verify_keys_sorted(insert_keys);
@@ -1119,7 +1119,7 @@ bch_btree_insert_keys_interior(struct btree *b,
}
/* Don't screw up @iter's position: */
- node_iter = iter->l[b->level].node_iter;
+ node_iter = iter_s(iter)->l[b->level].node_iter;
/*
* btree_split(), btree_gc_coalesce() will insert keys before
@@ -1268,14 +1268,14 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
struct async_split *as)
{
struct cache_set *c = iter->c;
- struct btree *parent = iter->l[b->level + 1].node;
+ struct btree *parent = iter_s(iter)->l[b->level + 1].node;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
unsigned u64s_to_insert = b->level
? bch_keylist_nkeys(insert_keys) : 0;
BUG_ON(!parent && (b != btree_node_root(b)));
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(b)->level));
+ BUG_ON(!btree_node_intent_locked(iter_s(iter), btree_node_root(b)->level));
bch_async_split_will_free_node(as, b);
@@ -1446,7 +1446,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags,
{
struct btree_iter *linked;
struct cache_set *c = iter->c;
- struct btree *b = iter->l[0].node;
+ struct btree *b = iter_s(iter)->l[0].node;
struct btree_reserve *reserve;
struct async_split *as;
int ret = 0;
@@ -1497,8 +1497,8 @@ out_get_locks:
unsigned i;
for (i = 0; i < BTREE_MAX_DEPTH; i++) {
- btree_node_unlock(linked, i);
- linked->l[i].lock_seq--;
+ btree_node_unlock(iter_s(linked), i);
+ iter_s(linked)->l[i].lock_seq--;
}
linked->locks_want = U8_MAX;
}
@@ -1516,7 +1516,8 @@ btree_insert_key(struct btree_insert_trans *trans,
struct journal_res *res,
unsigned flags)
{
- struct btree *b = insert->iter->l[0].node;
+ struct btree_iter_level *l = &iter_s(insert->iter)->l[0];
+ struct btree *b = l->node;
s64 oldsize = bch_count_data(&b->keys);
enum btree_insert_ret ret;
@@ -1539,7 +1540,7 @@ static bool same_leaf_as_prev(struct btree_insert_trans *trans,
* point to the same leaf node they'll always be adjacent now:
*/
return i != trans->entries &&
- i[0].iter->l[0].node == i[-1].iter->l[0].node;
+ btree_iter_leaf(i[0].iter) == btree_iter_leaf(i[-1].iter);
}
#define trans_for_each_entry(trans, i) \
@@ -1551,7 +1552,7 @@ static void multi_lock_write(struct btree_insert_trans *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- btree_node_lock_for_insert(i->iter->l[0].node, i->iter);
+ btree_node_lock_for_insert(btree_iter_leaf(i->iter), i->iter);
}
static void multi_unlock_write(struct btree_insert_trans *trans)
@@ -1560,7 +1561,7 @@ static void multi_unlock_write(struct btree_insert_trans *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- btree_node_unlock_write(i->iter->l[0].node, i->iter);
+ btree_node_unlock_write(btree_iter_leaf(i->iter), i->iter);
}
static int btree_trans_entry_cmp(const void *_l, const void *_r)
@@ -1633,7 +1634,7 @@ retry:
if (!i->done) {
u64s += i->k->k.u64s;
if (!bch_btree_node_insert_fits(c,
- i->iter->l[0].node, u64s))
+ btree_iter_leaf(i->iter), u64s))
goto unlock_split;
}
}
@@ -1679,7 +1680,7 @@ retry:
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- bch_btree_node_write_lazy(i->iter->l[0].node, i->iter);
+ bch_btree_node_write_lazy(btree_iter_leaf(i->iter), i->iter);
out:
percpu_ref_put(&c->writes);
return ret;
@@ -1965,7 +1966,7 @@ int bch_btree_delete_range(struct cache_set *c,
delete.k.p = iter.pos;
delete.k.version = version;
- if (iter.is_extents) {
+ if (btree_iter_leaf(&iter)->keys.ops->is_extents) {
/*
* The extents btree is special - KEY_TYPE_DISCARD is
* used for deletions, not KEY_TYPE_DELETED. This is an
@@ -2002,7 +2003,7 @@ int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
struct closure *cl)
{
struct cache_set *c = iter->c;
- struct btree *n, *parent = iter->l[b->level + 1].node;
+ struct btree *n, *parent = iter_s(iter)->l[b->level + 1].node;
struct btree_reserve *reserve;
struct async_split *as;