summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_update_interior.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_update_interior.c')
-rw-r--r--libbcachefs/btree_update_interior.c192
1 files changed, 110 insertions, 82 deletions
diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c
index 1fe8fff8..04854532 100644
--- a/libbcachefs/btree_update_interior.c
+++ b/libbcachefs/btree_update_interior.c
@@ -21,7 +21,7 @@
static void btree_node_will_make_reachable(struct btree_update *,
struct btree *);
static void btree_update_drop_new_node(struct bch_fs *, struct btree *);
-static void bch2_btree_set_root_ondisk(struct bch_fs *, struct btree *);
+static void bch2_btree_set_root_ondisk(struct bch_fs *, struct btree *, int);
/* Debug code: */
@@ -686,7 +686,7 @@ retry:
BUG_ON(c->btree_roots[b->btree_id].as != as);
c->btree_roots[b->btree_id].as = NULL;
- bch2_btree_set_root_ondisk(c, b);
+ bch2_btree_set_root_ondisk(c, b, WRITE);
/*
* We don't have to wait anything anything here (before
@@ -914,6 +914,7 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
struct btree_write *w;
struct bset_tree *t;
+ set_btree_node_dying(b);
btree_interior_update_add_node_reference(as, b);
/*
@@ -925,7 +926,8 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
* in with keys that aren't in the journal anymore:
*/
for_each_bset(b, t)
- as->journal_seq = max(as->journal_seq, bset(b, t)->journal_seq);
+ as->journal_seq = max(as->journal_seq,
+ le64_to_cpu(bset(b, t)->journal_seq));
mutex_lock(&c->btree_interior_update_lock);
@@ -1027,6 +1029,10 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
mutex_unlock(&c->btree_cache.lock);
mutex_lock(&c->btree_root_lock);
+ BUG_ON(btree_node_root(c, b) &&
+ (b->level < btree_node_root(c, b)->level ||
+ !btree_node_dying(btree_node_root(c, b))));
+
btree_node_root(c, b) = b;
mutex_unlock(&c->btree_root_lock);
@@ -1054,7 +1060,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
gc_pos_btree_root(b->btree_id));
}
-static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
+static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
{
struct btree_root *r = &c->btree_roots[b->btree_id];
@@ -1064,6 +1070,8 @@ static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
bkey_copy(&r->key, &b->key);
r->level = b->level;
r->alive = true;
+ if (rw == WRITE)
+ c->btree_roots_dirty = true;
mutex_unlock(&c->btree_root_lock);
}
@@ -1787,64 +1795,16 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
return ret;
}
-int bch2_btree_node_update_key(struct bch_fs *c, struct btree *b,
- struct bkey_i_extent *new_key)
+static void __bch2_btree_node_update_key(struct bch_fs *c,
+ struct btree_update *as,
+ struct btree_iter *iter,
+ struct btree *b, struct btree *new_hash,
+ struct bkey_i_extent *new_key)
{
- struct btree_update *as = NULL;
- struct btree *parent, *new_hash = NULL;
- struct btree_iter iter;
- struct closure cl;
+ struct btree *parent;
bool must_rewrite_parent = false;
int ret;
- __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
- BTREE_MAX_DEPTH,
- b->level, 0);
- closure_init_stack(&cl);
-
- ret = bch2_check_mark_super(c, extent_i_to_s_c(new_key), BCH_DATA_BTREE);
- if (ret)
- return ret;
-
-retry:
- down_read(&c->gc_lock);
- ret = bch2_btree_iter_traverse(&iter);
- if (ret)
- goto err;
-
- /* check PTR_HASH() after @b is locked by btree_iter_traverse(): */
- if (!new_hash &&
- PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
- /* bch2_btree_reserve_get will unlock */
- do {
- ret = bch2_btree_cache_cannibalize_lock(c, &cl);
- closure_sync(&cl);
- } while (ret == -EAGAIN);
-
- BUG_ON(ret);
-
- new_hash = bch2_btree_node_mem_alloc(c);
- }
-
- as = bch2_btree_update_start(c, iter.btree_id,
- btree_update_reserve_required(c, b),
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_USE_ALLOC_RESERVE,
- &cl);
- if (IS_ERR(as)) {
- ret = PTR_ERR(as);
- if (ret == -EAGAIN || ret == -EINTR) {
- bch2_btree_iter_unlock(&iter);
- up_read(&c->gc_lock);
- closure_sync(&cl);
- goto retry;
- }
- goto err;
- }
-
- mutex_lock(&c->btree_interior_update_lock);
-
/*
* Two corner cases that need to be thought about here:
*
@@ -1869,22 +1829,12 @@ retry:
if (b->will_make_reachable)
must_rewrite_parent = true;
- /* other case: btree node being freed */
- if (iter.nodes[b->level] != b) {
- /* node has been freed: */
- BUG_ON(btree_node_hashed(b));
- mutex_unlock(&c->btree_interior_update_lock);
- goto err;
- }
-
- mutex_unlock(&c->btree_interior_update_lock);
-
if (must_rewrite_parent)
as->flags |= BTREE_INTERIOR_UPDATE_MUST_REWRITE;
btree_interior_update_add_node_reference(as, b);
- parent = iter.nodes[b->level + 1];
+ parent = iter->nodes[b->level + 1];
if (parent) {
if (new_hash) {
bkey_copy(&new_hash->key, &new_key->k_i);
@@ -1893,8 +1843,8 @@ retry:
BUG_ON(ret);
}
- bch2_btree_insert_node(as, parent, &iter,
- &keylist_single(&new_key->k_i));
+ bch2_keylist_add(&as->parent_keys, &new_key->k_i);
+ bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
@@ -1914,7 +1864,7 @@ retry:
BUG_ON(btree_node_root(c, b) != b);
- bch2_btree_node_lock_write(b, &iter);
+ bch2_btree_node_lock_write(b, iter);
bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
c->opts.btree_node_size, true,
@@ -1925,14 +1875,94 @@ retry:
&stats);
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
- bkey_copy(&b->key, &new_key->k_i);
+
+ if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, &new_key->k_i);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+ mutex_unlock(&c->btree_cache.lock);
+ } else {
+ bkey_copy(&b->key, &new_key->k_i);
+ }
btree_update_updated_root(as);
- bch2_btree_node_unlock_write(b, &iter);
+ bch2_btree_node_unlock_write(b, iter);
}
bch2_btree_update_done(as);
-out:
+}
+
+int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
+ struct btree *b, struct bkey_i_extent *new_key)
+{
+ struct btree_update *as = NULL;
+ struct btree *new_hash = NULL;
+ struct closure cl;
+ int ret;
+
+ closure_init_stack(&cl);
+
+ if (!down_read_trylock(&c->gc_lock)) {
+ bch2_btree_iter_unlock(iter);
+ down_read(&c->gc_lock);
+
+ if (!bch2_btree_iter_relock(iter)) {
+ ret = -EINTR;
+ goto err;
+ }
+ }
+
+ /* check PTR_HASH() after @b is locked by btree_iter_traverse(): */
+ if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
+ /* bch2_btree_reserve_get will unlock */
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
+ if (ret) {
+ ret = -EINTR;
+
+ bch2_btree_iter_unlock(iter);
+ up_read(&c->gc_lock);
+ closure_sync(&cl);
+ down_read(&c->gc_lock);
+
+ if (!bch2_btree_iter_relock(iter))
+ goto err;
+ }
+
+ new_hash = bch2_btree_node_mem_alloc(c);
+ }
+
+ as = bch2_btree_update_start(c, iter->btree_id,
+ btree_update_reserve_required(c, b),
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_USE_ALLOC_RESERVE,
+ &cl);
+ if (IS_ERR(as)) {
+ ret = PTR_ERR(as);
+ if (ret == -EAGAIN)
+ ret = -EINTR;
+
+ if (ret != -EINTR)
+ goto err;
+
+ bch2_btree_iter_unlock(iter);
+ up_read(&c->gc_lock);
+ closure_sync(&cl);
+ down_read(&c->gc_lock);
+
+ if (!bch2_btree_iter_relock(iter))
+ goto err;
+ }
+
+ ret = bch2_check_mark_super(c, extent_i_to_s_c(new_key), BCH_DATA_BTREE);
+ if (ret)
+ goto err_free_update;
+
+ __bch2_btree_node_update_key(c, as, iter, b, new_hash, new_key);
+err:
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
list_move(&new_hash->list, &c->btree_cache.freeable);
@@ -1941,14 +1971,12 @@ out:
six_unlock_write(&new_hash->lock);
six_unlock_intent(&new_hash->lock);
}
- bch2_btree_iter_unlock(&iter);
up_read(&c->gc_lock);
closure_sync(&cl);
return ret;
-err:
- if (as)
- bch2_btree_update_free(as);
- goto out;
+err_free_update:
+ bch2_btree_update_free(as);
+ goto err;
}
/* Init code: */
@@ -1962,7 +1990,7 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
BUG_ON(btree_node_root(c, b));
__bch2_btree_set_root_inmem(c, b);
- bch2_btree_set_root_ondisk(c, b);
+ bch2_btree_set_root_ondisk(c, b, READ);
}
int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
@@ -1998,7 +2026,7 @@ int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
BUG_ON(btree_node_root(c, b));
bch2_btree_set_root_inmem(as, b);
- bch2_btree_set_root_ondisk(c, b);
+ bch2_btree_set_root_ondisk(c, b, WRITE);
bch2_btree_open_bucket_put(c, b);
six_unlock_intent(&b->lock);