summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/btree_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/btree_cache.c')
-rw-r--r--drivers/md/bcache/btree_cache.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c
index c36366cd777c..eaa482231b43 100644
--- a/drivers/md/bcache/btree_cache.c
+++ b/drivers/md/bcache/btree_cache.c
@@ -560,9 +560,10 @@ err:
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
- const struct bkey_i *k,
- unsigned level,
- struct closure *cl)
+ struct btree_iter_state *_iter,
+ const struct bkey_i *k,
+ unsigned level,
+ struct closure *cl)
{
struct cache_set *c = iter->c;
struct btree *b;
@@ -598,13 +599,13 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
* But the deadlock described below doesn't exist in this case,
* so it's safe to not drop the parent lock until here:
*/
- if (btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
+ if (btree_node_read_locked(_iter, level + 1))
+ btree_node_unlock(_iter, level + 1);
bch_btree_node_read(c, b);
six_unlock_write(&b->lock);
- mark_btree_node_locked(iter, level, btree_lock_want(iter, level));
+ mark_btree_node_locked(_iter, level, btree_lock_want(iter, level));
if (btree_lock_want(iter, level) == SIX_LOCK_read)
BUG_ON(!six_trylock_convert(&b->lock,
@@ -624,11 +625,12 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
* the @write parameter.
*/
struct btree *bch_btree_node_get(struct btree_iter *iter,
+ struct btree_iter_state *_iter,
const struct bkey_i *k, unsigned level,
struct closure *cl)
{
- int i = 0;
struct btree *b;
+ int i = 0;
BUG_ON(level >= BTREE_MAX_DEPTH);
retry:
@@ -642,7 +644,7 @@ retry:
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch_btree_node_fill(iter, k, level, cl);
+ b = bch_btree_node_fill(iter, _iter, k, level, cl);
/* We raced and found the btree node in the cache */
if (!b)
@@ -657,8 +659,8 @@ retry:
* But we still have to drop read locks before we return, for
* deadlock avoidance:
*/
- if (btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
+ if (btree_node_read_locked(_iter, level + 1))
+ btree_node_unlock(_iter, level + 1);
} else {
/*
* There's a potential deadlock with splits and insertions into
@@ -688,12 +690,12 @@ retry:
* the parent was modified, when the pointer to the node we want
* was removed - and we'll bail out:
*/
- if (btree_node_read_locked(iter, level + 1))
- btree_node_unlock(iter, level + 1);
+ if (btree_node_read_locked(_iter, level + 1))
+ btree_node_unlock(_iter, level + 1);
if (!btree_node_lock(b, iter, level,
PTR_HASH(&b->key) != PTR_HASH(k))) {
- if (!btree_node_relock(iter, level + 1)) {
+ if (!bch_btree_node_relock(iter, _iter, level + 1)) {
trace_bcache_btree_intent_lock_fail(b, iter);
return ERR_PTR(-EINTR);
}
@@ -712,7 +714,7 @@ retry:
}
if (btree_node_read_error(b)) {
- __btree_node_unlock(iter, level, b);
+ __btree_node_unlock(_iter, level, b);
return ERR_PTR(-EIO);
}