summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-07-11 13:56:18 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2021-07-11 13:57:11 -0400
commit700d013b5280b72a1fb3830d8f70ecce5decb0ab (patch)
tree9b74fdad5894ac8a9af1b0a0ab9a69ed7a3e32ee /libbcachefs/btree_cache.c
parent8e6f35cbf344dbefbfe6420e06a47f64079f72ad (diff)
Update bcachefs sources to 400c2f8d96 bcachefs: Mask out unknown compat features when going read-write
Diffstat (limited to 'libbcachefs/btree_cache.c')
-rw-r--r--libbcachefs/btree_cache.c99
1 files changed, 63 insertions, 36 deletions
diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c
index 12bc2946..73bfd01f 100644
--- a/libbcachefs/btree_cache.c
+++ b/libbcachefs/btree_cache.c
@@ -13,6 +13,8 @@
#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+struct lock_class_key bch2_btree_node_lock_key;
+
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
@@ -98,7 +100,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c)
return NULL;
bkey_btree_ptr_init(&b->key);
- six_lock_init(&b->c.lock);
+ __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
b->byte_order = ilog2(btree_bytes(c));
@@ -184,6 +186,17 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
int ret = 0;
lockdep_assert_held(&bc->lock);
+wait_on_io:
+ if (b->flags & ((1U << BTREE_NODE_dirty)|
+ (1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ return -ENOMEM;
+
+ /* XXX: waiting on IO with btree cache lock held */
+ bch2_btree_node_wait_on_read(b);
+ bch2_btree_node_wait_on_write(b);
+ }
if (!six_trylock_intent(&b->c.lock))
return -ENOMEM;
@@ -191,25 +204,26 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent;
+ /* recheck under lock */
+ if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ goto out_unlock;
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
+
if (btree_node_noevict(b))
goto out_unlock;
if (!btree_node_may_write(b))
goto out_unlock;
- if (btree_node_dirty(b) &&
- test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
- goto out_unlock;
-
- if (btree_node_dirty(b) ||
- btree_node_write_in_flight(b) ||
- btree_node_read_in_flight(b)) {
- if (!flush)
+ if (btree_node_dirty(b)) {
+ if (!flush ||
+ test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
goto out_unlock;
-
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
-
/*
* Using the underscore version because we don't want to compact
* bsets after the write, since this node is about to be evicted
@@ -221,8 +235,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
else
__bch2_btree_node_write(c, b);
- /* wait for any in flight btree write */
- btree_node_wait_on_io(b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
}
out:
if (b->hash_val && !ret)
@@ -572,6 +587,7 @@ got_node:
}
BUG_ON(btree_node_hashed(b));
+ BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_write_in_flight(b));
out:
b->flags = 0;
@@ -625,6 +641,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
+ u32 seq;
BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
/*
@@ -654,31 +671,31 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
return NULL;
}
+ set_btree_node_read_in_flight(b);
+
+ six_unlock_write(&b->c.lock);
+ seq = b->c.lock.state.seq;
+ six_unlock_intent(&b->c.lock);
+
/* Unlock before doing IO: */
if (iter && sync)
bch2_trans_unlock(iter->trans);
bch2_btree_node_read(c, b, sync);
- six_unlock_write(&b->c.lock);
-
- if (!sync) {
- six_unlock_intent(&b->c.lock);
+ if (!sync)
return NULL;
- }
/*
* XXX: this will probably always fail because btree_iter_relock()
* currently fails for iterators that aren't pointed at a valid btree
* node
*/
- if (iter && !bch2_trans_relock(iter->trans)) {
- six_unlock_intent(&b->c.lock);
+ if (iter && !bch2_trans_relock(iter->trans))
return ERR_PTR(-EINTR);
- }
- if (lock_type == SIX_LOCK_read)
- six_lock_downgrade(&b->c.lock);
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ return ERR_PTR(-EINTR);
return b;
}
@@ -822,11 +839,12 @@ lock_node:
}
if (unlikely(btree_node_read_in_flight(b))) {
+ u32 seq = b->c.lock.state.seq;
+
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(iter->trans);
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
+ bch2_btree_node_wait_on_read(b);
/*
* XXX: check if this always fails - btree_iter_relock()
@@ -835,7 +853,9 @@ lock_node:
*/
if (iter && !bch2_trans_relock(iter->trans))
return ERR_PTR(-EINTR);
- goto retry;
+
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ goto retry;
}
prefetch(b->aux_data);
@@ -914,8 +934,7 @@ lock_node:
}
/* XXX: waiting on IO with btree locks held: */
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
+ __bch2_btree_node_wait_on_read(b);
prefetch(b->aux_data);
@@ -970,16 +989,24 @@ void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
b = btree_cache_find(bc, k);
if (!b)
return;
+wait_on_io:
+ /* not allowed to wait on io with btree locks held: */
+
+ /* XXX we're called from btree_gc which will be holding other btree
+ * nodes locked
+ * */
+ __bch2_btree_node_wait_on_read(b);
+ __bch2_btree_node_wait_on_write(b);
six_lock_intent(&b->c.lock, NULL, NULL);
six_lock_write(&b->c.lock, NULL, NULL);
- wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
- TASK_UNINTERRUPTIBLE);
- __bch2_btree_node_write(c, b);
-
- /* wait for any in flight btree write */
- btree_node_wait_on_io(b);
+ if (btree_node_dirty(b)) {
+ __bch2_btree_node_write(c, b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
BUG_ON(btree_node_dirty(b));