summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_key_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_key_cache.c')
-rw-r--r--libbcachefs/btree_key_cache.c138
1 files changed, 87 insertions, 51 deletions
diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c
index d605ff18..a21dc485 100644
--- a/libbcachefs/btree_key_cache.c
+++ b/libbcachefs/btree_key_cache.c
@@ -12,6 +12,8 @@
#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+static struct kmem_cache *bch2_key_cache;
+
static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -76,10 +78,13 @@ static void bkey_cached_free(struct btree_key_cache *bc,
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+ BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
+
ck->btree_trans_barrier_seq =
start_poll_synchronize_srcu(&c->btree_trans_barrier);
- list_move(&ck->list, &bc->freed);
+ list_move_tail(&ck->list, &bc->freed);
+ bc->nr_freed++;
kfree(ck->k);
ck->k = NULL;
@@ -94,9 +99,20 @@ bkey_cached_alloc(struct btree_key_cache *c)
{
struct bkey_cached *ck;
- list_for_each_entry(ck, &c->freed, list)
- if (bkey_cached_lock_for_evict(ck))
+ list_for_each_entry_reverse(ck, &c->freed, list)
+ if (bkey_cached_lock_for_evict(ck)) {
+ c->nr_freed--;
return ck;
+ }
+
+ ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
+ if (likely(ck)) {
+ INIT_LIST_HEAD(&ck->list);
+ six_lock_init(&ck->c.lock);
+ BUG_ON(!six_trylock_intent(&ck->c.lock));
+ BUG_ON(!six_trylock_write(&ck->c.lock));
+ return ck;
+ }
list_for_each_entry(ck, &c->clean, list)
if (bkey_cached_lock_for_evict(ck)) {
@@ -104,16 +120,7 @@ bkey_cached_alloc(struct btree_key_cache *c)
return ck;
}
- ck = kzalloc(sizeof(*ck), GFP_NOFS);
- if (!ck)
- return NULL;
-
- INIT_LIST_HEAD(&ck->list);
- six_lock_init(&ck->c.lock);
- BUG_ON(!six_trylock_intent(&ck->c.lock));
- BUG_ON(!six_trylock_write(&ck->c.lock));
-
- return ck;
+ return NULL;
}
static struct bkey_cached *
@@ -132,8 +139,7 @@ btree_key_cache_create(struct btree_key_cache *c,
ck->key.btree_id = btree_id;
ck->key.pos = pos;
ck->valid = false;
-
- BUG_ON(ck->flags);
+ ck->flags = 1U << BKEY_CACHED_ACCESSED;
if (rhashtable_lookup_insert_fast(&c->table,
&ck->hash,
@@ -290,6 +296,9 @@ fill:
goto err;
}
+ if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+
iter->uptodate = BTREE_ITER_NEED_PEEK;
bch2_btree_iter_downgrade(iter);
return ret;
@@ -451,6 +460,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bkey_cached *ck = (void *) iter->l[0].b;
+ bool kick_reclaim = false;
BUG_ON(insert->u64s > ck->u64s);
@@ -475,11 +485,18 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
c->btree_key_cache.nr_dirty++;
+
+ if (bch2_nr_btree_keys_need_flush(c))
+ kick_reclaim = true;
+
mutex_unlock(&c->btree_key_cache.lock);
}
bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
&ck->journal, btree_key_cache_journal_flush);
+
+ if (kick_reclaim)
+ journal_reclaim_kick(&c->journal);
return true;
}
@@ -509,28 +526,34 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
flags = memalloc_nofs_save();
+ /*
+ * Newest freed entries are at the end of the list - once we hit one
+ * that's too new to be freed, we can bail out:
+ */
list_for_each_entry_safe(ck, t, &bc->freed, list) {
- scanned++;
+ if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
+ ck->btree_trans_barrier_seq))
+ break;
- if (poll_state_synchronize_srcu(&c->btree_trans_barrier,
- ck->btree_trans_barrier_seq)) {
- list_del(&ck->list);
- kfree(ck);
- freed++;
- }
-
- if (scanned >= nr)
- goto out;
+ list_del(&ck->list);
+ kmem_cache_free(bch2_key_cache, ck);
+ bc->nr_freed--;
+ scanned++;
+ freed++;
}
- list_for_each_entry_safe(ck, t, &bc->clean, list) {
- scanned++;
+ if (scanned >= nr)
+ goto out;
- if (bkey_cached_lock_for_evict(ck)) {
+ list_for_each_entry_safe(ck, t, &bc->clean, list) {
+ if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+ else if (bkey_cached_lock_for_evict(ck)) {
bkey_cached_evict(bc, ck);
bkey_cached_free(bc, ck);
}
+ scanned++;
if (scanned >= nr) {
if (&t->list != &bc->clean)
list_move_tail(&bc->clean, &t->list);
@@ -570,18 +593,22 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
bch2_journal_preres_put(&c->journal, &ck->res);
kfree(ck->k);
- kfree(ck);
+ list_del(&ck->list);
+ kmem_cache_free(bch2_key_cache, ck);
bc->nr_keys--;
}
BUG_ON(bc->nr_dirty && !bch2_journal_error(&c->journal));
BUG_ON(bc->nr_keys);
- list_for_each_entry_safe(ck, n, &bc->freed, list)
- kfree(ck);
+ list_for_each_entry_safe(ck, n, &bc->freed, list) {
+ list_del(&ck->list);
+ kmem_cache_free(bch2_key_cache, ck);
+ }
mutex_unlock(&bc->lock);
- rhashtable_destroy(&bc->table);
+ if (bc->table_init_done)
+ rhashtable_destroy(&bc->table);
}
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
@@ -594,33 +621,42 @@ void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
{
+ int ret;
+
+ c->shrink.seeks = 1;
c->shrink.count_objects = bch2_btree_key_cache_count;
c->shrink.scan_objects = bch2_btree_key_cache_scan;
- return register_shrinker(&c->shrink) ?:
- rhashtable_init(&c->table, &bch2_btree_key_cache_params);
+ ret = register_shrinker(&c->shrink);
+ if (ret)
+ return ret;
+
+ ret = rhashtable_init(&c->table, &bch2_btree_key_cache_params);
+ if (ret)
+ return ret;
+
+ c->table_init_done = true;
+ return 0;
}
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
{
- struct bucket_table *tbl;
- struct bkey_cached *ck;
- struct rhash_head *pos;
- size_t i;
+ pr_buf(out, "nr_freed:\t%zu\n", c->nr_freed);
+ pr_buf(out, "nr_keys:\t%zu\n", c->nr_keys);
+ pr_buf(out, "nr_dirty:\t%zu\n", c->nr_dirty);
+}
- mutex_lock(&c->lock);
- tbl = rht_dereference_rcu(c->table.tbl, &c->table);
+void bch2_btree_key_cache_exit(void)
+{
+ if (bch2_key_cache)
+ kmem_cache_destroy(bch2_key_cache);
+}
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
- pr_buf(out, "%s:",
- bch2_btree_ids[ck->key.btree_id]);
- bch2_bpos_to_text(out, ck->key.pos);
+int __init bch2_btree_key_cache_init(void)
+{
+ bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
+ if (!bch2_key_cache)
+ return -ENOMEM;
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
- pr_buf(out, " journal seq %llu", ck->journal.seq);
- pr_buf(out, "\n");
- }
- }
- mutex_unlock(&c->lock);
+ return 0;
}