summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_update_interior.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_update_interior.c')
-rw-r--r--libbcachefs/btree_update_interior.c39
1 files changed, 12 insertions, 27 deletions
diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c
index 4d34bdca..537b8da7 100644
--- a/libbcachefs/btree_update_interior.c
+++ b/libbcachefs/btree_update_interior.c
@@ -159,7 +159,6 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
{
struct bch_fs *c = as->c;
struct pending_btree_node_free *d;
- unsigned replicas;
/*
* btree_update lock is only needed here to avoid racing with
@@ -178,15 +177,6 @@ found:
d->index_update_done = true;
/*
- * Btree nodes are accounted as freed in bch_alloc_stats when they're
- * freed from the index:
- */
- replicas = bch2_extent_nr_dirty_ptrs(k);
- if (replicas)
- stats->replicas[replicas - 1].data[BCH_DATA_BTREE] -=
- c->opts.btree_node_size * replicas;
-
- /*
* We're dropping @k from the btree, but it's still live until the
* index update is persistent so we need to keep a reference around for
* mark and sweep to find - that's primarily what the
@@ -207,15 +197,16 @@ found:
* bch2_mark_key() compares the current gc pos to the pos we're
* moving this reference from, hence one comparison here:
*/
- if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
- struct bch_fs_usage tmp = { 0 };
+ if (gc_pos_cmp(c->gc_pos, b
+ ? gc_pos_btree_node(b)
+ : gc_pos_btree_root(as->btree_id)) >= 0 &&
+ gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
+ struct gc_pos pos = { 0 };
bch2_mark_key(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&d->key),
- false, 0, b
- ? gc_pos_btree_node(b)
- : gc_pos_btree_root(as->btree_id),
- &tmp, 0, 0);
+ false, 0, pos,
+ NULL, 0, BCH_BUCKET_MARK_GC);
/*
* Don't apply tmp - pending deletes aren't tracked in
* bch_alloc_stats:
@@ -286,19 +277,13 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
static void bch2_btree_node_free_ondisk(struct bch_fs *c,
struct pending_btree_node_free *pending)
{
- struct bch_fs_usage stats = { 0 };
-
BUG_ON(!pending->index_update_done);
bch2_mark_key(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&pending->key),
false, 0,
gc_phase(GC_PHASE_PENDING_DELETE),
- &stats, 0, 0);
- /*
- * Don't apply stats - pending deletes aren't tracked in
- * bch_alloc_stats:
- */
+ NULL, 0, 0);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
@@ -339,7 +324,7 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
mutex_unlock(&c->btree_reserve_cache_lock);
retry:
- wp = bch2_alloc_sectors_start(c, c->opts.foreground_target,
+ wp = bch2_alloc_sectors_start(c, c->opts.foreground_target, 0,
writepoint_ptr(&c->btree_write_point),
&devs_have,
res->nr_replicas,
@@ -637,12 +622,12 @@ static void btree_update_wait_on_journal(struct closure *cl)
int ret;
ret = bch2_journal_open_seq_async(&c->journal, as->journal_seq, cl);
- if (ret < 0)
- goto err;
- if (!ret) {
+ if (ret == -EAGAIN) {
continue_at(cl, btree_update_wait_on_journal, system_wq);
return;
}
+ if (ret < 0)
+ goto err;
bch2_journal_flush_seq_async(&c->journal, as->journal_seq, cl);
err: