diff options
Diffstat (limited to 'libbcachefs/btree_cache.c')
-rw-r--r-- | libbcachefs/btree_cache.c | 86 |
1 files changed, 45 insertions, 41 deletions
diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index 91e0aa79..3b1d694d 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -15,6 +15,7 @@ #include <linux/prefetch.h> #include <linux/sched/mm.h> +#include <linux/seq_buf.h> #include <linux/swap.h> const char * const bch2_btree_node_flags[] = { @@ -77,15 +78,14 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; - mutex_lock(&bc->lock); - __bch2_btree_node_to_freelist(bc, b); - mutex_unlock(&bc->lock); + scoped_guard(mutex, &bc->lock) + __bch2_btree_node_to_freelist(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); } -static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) +void __btree_node_data_free(struct btree *b) { BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); @@ -112,16 +112,17 @@ static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) munmap(b->aux_data, btree_aux_data_bytes(b)); #endif b->aux_data = NULL; - - btree_node_to_freedlist(bc, b); } static void btree_node_data_free(struct btree_cache *bc, struct btree *b) { BUG_ON(list_empty(&b->list)); list_del_init(&b->list); + + __btree_node_data_free(b); + --bc->nr_freeable; - __btree_node_data_free(bc, b); + btree_node_to_freedlist(bc, b); } static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, @@ -185,10 +186,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) { - struct btree_cache *bc = &c->btree_cache; - struct btree *b; - - b = __btree_node_mem_alloc(c, GFP_KERNEL); + struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL); if (!b) return NULL; @@ -198,8 +196,6 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) } bch2_btree_lock_init(&b->c, 0, GFP_KERNEL); - - __bch2_btree_node_to_freelist(bc, b); return b; } @@ -218,14 +214,13 @@ void bch2_node_pin(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; - mutex_lock(&bc->lock); - if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { + guard(mutex)(&bc->lock); + if (!btree_node_is_root(c, b) && !btree_node_pinned(b)) { set_btree_node_pinned(b); list_move(&b->list, &bc->live[1].list); bc->live[0].nr--; bc->live[1].nr++; } - mutex_unlock(&bc->lock); } void bch2_btree_cache_unpin(struct bch_fs *c) @@ -233,7 +228,7 @@ void bch2_btree_cache_unpin(struct bch_fs *c) struct btree_cache *bc = &c->btree_cache; struct btree *b, *n; - mutex_lock(&bc->lock); + guard(mutex)(&bc->lock); c->btree_cache.pinned_nodes_mask[0] = 0; c->btree_cache.pinned_nodes_mask[1] = 0; @@ -243,8 +238,6 @@ void bch2_btree_cache_unpin(struct bch_fs *c) bc->live[0].nr++; bc->live[1].nr--; } - - mutex_unlock(&bc->lock); } /* Btree in memory cache - hash table */ @@ -299,11 +292,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, b->c.level = level; b->c.btree_id = id; - mutex_lock(&bc->lock); - int ret = __bch2_btree_node_hash_insert(bc, b); - mutex_unlock(&bc->lock); - - return ret; + guard(mutex)(&bc->lock); + return __bch2_btree_node_hash_insert(bc, b); } void bch2_btree_node_update_key_early(struct btree_trans *trans, @@ -320,7 +310,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); if (!IS_ERR_OR_NULL(b)) { - mutex_lock(&c->btree_cache.lock); + guard(mutex)(&c->btree_cache.lock); __bch2_btree_node_hash_remove(&c->btree_cache, b); @@ -328,7 +318,6 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); six_unlock_read(&b->c.lock); } @@ -444,7 +433,8 @@ retry_unlocked: } if (b->hash_val && !ret) - trace_and_count(c, btree_cache_reap, c, b); + trace_btree_node(c, b, btree_cache_reap); + return 0; } @@ -521,10 +511,11 @@ restart: if (btree_node_accessed(b)) { clear_btree_node_accessed(b); bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; - --touched;; + --touched; } else if (!btree_node_reclaim(c, b)) { __bch2_btree_node_hash_remove(bc, b); - __btree_node_data_free(bc, b); + __btree_node_data_free(b); + btree_node_to_freedlist(bc, b); freed++; bc->nr_freed++; @@ -575,6 +566,19 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink, return btree_cache_can_free(list); } +static void bch2_btree_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink) +{ + struct btree_cache_list *list = shrink->private_data; + struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); + + char *cbuf; + size_t buflen = seq_buf_get_buf(s, &cbuf); + struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen); + + bch2_btree_cache_to_text(&out, bc); + seq_buf_commit(s, out.pos); +} + void bch2_fs_btree_cache_exit(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; @@ -652,9 +656,12 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bch2_recalc_btree_reserve(c); - for (i = 0; i < bc->nr_reserve; i++) - if (!__bch2_btree_node_mem_alloc(c)) + for (i = 0; i < bc->nr_reserve; i++) { + struct btree *b = __bch2_btree_node_mem_alloc(c); + if (!b) goto err; + __bch2_btree_node_to_freelist(bc, b); + } list_splice_init(&bc->live[0].list, &bc->freeable); @@ -666,6 +673,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bc->live[0].shrink = shrink; shrink->count_objects = bch2_btree_cache_count; shrink->scan_objects = bch2_btree_cache_scan; + shrink->to_text = bch2_btree_cache_shrinker_to_text; shrink->seeks = 2; shrink->private_data = &bc->live[0]; shrinker_register(shrink); @@ -676,6 +684,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bc->live[1].shrink = shrink; shrink->count_objects = bch2_btree_cache_count; shrink->scan_objects = bch2_btree_cache_scan; + shrink->to_text = bch2_btree_cache_shrinker_to_text; shrink->seeks = 8; shrink->private_data = &bc->live[1]; shrinker_register(shrink); @@ -795,7 +804,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea goto got_node; } - b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); + b = __btree_node_mem_alloc(c, GFP_NOWAIT); if (b) { bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT); } else { @@ -833,7 +842,7 @@ got_node: mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + if (btree_node_data_alloc(c, b, GFP_NOWAIT)) { bch2_trans_unlock(trans); if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) goto err; @@ -913,20 +922,18 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, } if (unlikely(!bkey_is_btree_ptr(&k->k))) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf); - printbuf_exit(&buf); return ERR_PTR(ret); } if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf); - printbuf_exit(&buf); return ERR_PTR(ret); } @@ -1001,11 +1008,10 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { - struct printbuf buf = PRINTBUF; - if (c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations) return; + CLASS(printbuf, buf)(); prt_printf(&buf, "btree node header doesn't match ptr: "); bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); @@ -1021,8 +1027,6 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) bch2_bpos_to_text(&buf, b->data->max_key); bch2_fs_topology_error(c, "%s", buf.buf); - - printbuf_exit(&buf); } static inline void btree_check_header(struct bch_fs *c, struct btree *b) |