summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_cache.c')
-rw-r--r--libbcachefs/btree_cache.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c
index 0c737f35..2c9c3c18 100644
--- a/libbcachefs/btree_cache.c
+++ b/libbcachefs/btree_cache.c
@@ -62,13 +62,13 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
const struct btree *b = obj;
const u64 *v = arg->key;
- return PTR_HASH(&b->key) == *v ? 0 : 1;
+ return b->hash_val == *v ? 0 : 1;
}
static const struct rhashtable_params bch_btree_cache_params = {
.head_offset = offsetof(struct btree, hash),
- .key_offset = offsetof(struct btree, key.v),
- .key_len = sizeof(struct bch_extent_ptr),
+ .key_offset = offsetof(struct btree, hash_val),
+ .key_len = sizeof(u64),
.obj_cmpfn = bch2_btree_cache_cmp_fn,
};
@@ -114,11 +114,14 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
/* Cause future lookups for this node to fail: */
- PTR_HASH(&b->key) = 0;
+ b->hash_val = 0;
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
{
+ BUG_ON(b->hash_val);
+ b->hash_val = btree_ptr_hash_val(&b->key);
+
return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
bch_btree_cache_params);
}
@@ -144,8 +147,9 @@ __flatten
static inline struct btree *btree_cache_find(struct btree_cache *bc,
const struct bkey_i *k)
{
- return rhashtable_lookup_fast(&bc->table, &PTR_HASH(k),
- bch_btree_cache_params);
+ u64 v = btree_ptr_hash_val(k);
+
+ return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
}
/*
@@ -199,7 +203,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
btree_node_wait_on_io(b);
}
out:
- if (PTR_HASH(&b->key) && !ret)
+ if (b->hash_val && !ret)
trace_btree_node_reap(c, b);
return ret;
out_unlock:
@@ -607,7 +611,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
/* raced with another fill: */
/* mark as unhashed... */
- PTR_HASH(&b->key) = 0;
+ b->hash_val = 0;
mutex_lock(&bc->lock);
list_add(&b->list, &bc->freeable);
@@ -710,7 +714,7 @@ retry:
* free it:
*
* To guard against this, btree nodes are evicted from the cache
- * when they're freed - and PTR_HASH() is zeroed out, which we
+ * when they're freed - and b->hash_val is zeroed out, which we
* check for after we lock the node.
*
* Then, bch2_btree_node_relock() on the parent will fail - because
@@ -723,7 +727,7 @@ retry:
if (!btree_node_lock(b, k->k.p, level, iter, lock_type))
return ERR_PTR(-EINTR);
- if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
b->level != level ||
race_fault())) {
six_unlock_type(&b->lock, lock_type);