summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_key_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-22 13:25:25 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2022-10-22 14:41:54 -0400
commit188b6d0c8ef1c02462a744b176557c27220112c9 (patch)
treeab1f239b3bad0335e1d3fa62415bc8d845ba68be /libbcachefs/btree_key_cache.c
parent494421ee6e85514f90bb316d77e1dd4f7dad3420 (diff)
Update bcachefs sources to cd779e0cc5 bcachefs: Skip inode unpack/pack in bch2_extent_update()v0.23
Diffstat (limited to 'libbcachefs/btree_key_cache.c')
-rw-r--r--libbcachefs/btree_key_cache.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c
index b8ed25b9..cd52dd5a 100644
--- a/libbcachefs/btree_key_cache.c
+++ b/libbcachefs/btree_key_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "btree_cache.h"
@@ -103,6 +104,22 @@ static void bkey_cached_free(struct btree_key_cache *bc,
six_unlock_intent(&ck->c.lock);
}
+static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
+{
+ struct bkey_cached *pos;
+
+ list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
+ if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
+ pos->btree_trans_barrier_seq)) {
+ list_move(&ck->list, &pos->list);
+ return;
+ }
+ }
+
+ list_move(&ck->list, &bc->freed_nonpcpu);
+}
+
static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
struct bkey_cached *ck)
{
@@ -130,11 +147,11 @@ static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
while (f->nr > ARRAY_SIZE(f->objs) / 2) {
struct bkey_cached *ck2 = f->objs[--f->nr];
- list_move_tail(&ck2->list, &bc->freed_nonpcpu);
+ __bkey_cached_move_to_freelist_ordered(bc, ck2);
}
preempt_enable();
- list_move_tail(&ck->list, &bc->freed_nonpcpu);
+ __bkey_cached_move_to_freelist_ordered(bc, ck);
mutex_unlock(&bc->lock);
}
#else
@@ -295,7 +312,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
bool was_new = true;
ck = bkey_cached_alloc(trans, path);
- if (unlikely(IS_ERR(ck)))
+ if (IS_ERR(ck))
return ck;
if (unlikely(!ck)) {
@@ -416,7 +433,7 @@ err:
return ret;
}
-noinline static int
+static noinline int
bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
unsigned flags)
{
@@ -597,7 +614,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
* Since journal reclaim depends on us making progress here, and the
* allocator/copygc depend on journal reclaim making progress, we need
* to be using alloc reserves:
- * */
+ */
ret = bch2_btree_iter_traverse(&b_iter) ?:
bch2_trans_update(trans, &b_iter, ck->k,
BTREE_UPDATE_KEY_CACHE_RECLAIM|
@@ -982,7 +999,7 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
bc->table_init_done = true;
- bc->shrink.seeks = 1;
+ bc->shrink.seeks = 0;
bc->shrink.count_objects = bch2_btree_key_cache_count;
bc->shrink.scan_objects = bch2_btree_key_cache_scan;
bc->shrink.to_text = bch2_btree_key_cache_shrinker_to_text;
@@ -991,15 +1008,17 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
{
- prt_printf(out, "nr_freed:\t%zu\n", atomic_long_read(&c->nr_freed));
- prt_printf(out, "nr_keys:\t%lu\n", atomic_long_read(&c->nr_keys));
- prt_printf(out, "nr_dirty:\t%lu\n", atomic_long_read(&c->nr_dirty));
+ prt_printf(out, "nr_freed:\t%zu", atomic_long_read(&c->nr_freed));
+ prt_newline(out);
+ prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
+ prt_newline(out);
+ prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
+ prt_newline(out);
}
void bch2_btree_key_cache_exit(void)
{
- if (bch2_key_cache)
- kmem_cache_destroy(bch2_key_cache);
+ kmem_cache_destroy(bch2_key_cache);
}
int __init bch2_btree_key_cache_init(void)