summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_key_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_key_cache.c')
-rw-r--r--libbcachefs/btree_key_cache.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c
index 13df0d40..441b04c3 100644
--- a/libbcachefs/btree_key_cache.c
+++ b/libbcachefs/btree_key_cache.c
@@ -283,7 +283,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return NULL;
init:
INIT_LIST_HEAD(&ck->list);
- __six_lock_init(&ck->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
+ bch2_btree_lock_init(&ck->c);
if (pcpu_readers)
six_lock_pcpu_alloc(&ck->c.lock);
@@ -948,6 +948,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
struct bucket_table *tbl;
struct bkey_cached *ck, *n;
struct rhash_head *pos;
+ LIST_HEAD(items);
unsigned i;
#ifdef __KERNEL__
int cpu;
@@ -968,7 +969,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
for (i = 0; i < tbl->size; i++)
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
bkey_cached_evict(bc, ck);
- list_add(&ck->list, &bc->freed_nonpcpu);
+ list_add(&ck->list, &items);
}
rcu_read_unlock();
}
@@ -980,14 +981,17 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
for (i = 0; i < f->nr; i++) {
ck = f->objs[i];
- list_add(&ck->list, &bc->freed_nonpcpu);
+ list_add(&ck->list, &items);
}
}
#endif
- list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
+ list_splice(&bc->freed_pcpu, &items);
+ list_splice(&bc->freed_nonpcpu, &items);
- list_for_each_entry_safe(ck, n, &bc->freed_nonpcpu, list) {
+ mutex_unlock(&bc->lock);
+
+ list_for_each_entry_safe(ck, n, &items, list) {
cond_resched();
bch2_journal_pin_drop(&c->journal, &ck->journal);
@@ -1009,8 +1013,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
atomic_long_read(&bc->nr_keys));
- mutex_unlock(&bc->lock);
-
if (bc->table_init_done)
rhashtable_destroy(&bc->table);