summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_update_interior.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-01-09 20:48:31 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:29 -0400
commitf25d8215f499418c17dfde0b3158a66e03c758dc (patch)
tree2239ac974b58f41e6687a840c21cf2156795fded /fs/bcachefs/btree_update_interior.c
parentc6b2826cd14c5421bc50a768e923d078a71139c1 (diff)
bcachefs: Kill allocator threads & freelists
Now that we have new persistent data structures for the allocator, this patch converts the allocator to use them. Now, foreground bucket allocation uses the freespace btree to find buckets to allocate, instead of popping buckets off the freelist. The background allocator threads are no longer needed and are deleted, as well as the allocator freelists. Now we only need background tasks for invalidating buckets containing cached data (when we are low on empty buckets), and for issuing discards. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_update_interior.c')
-rw-r--r--fs/bcachefs/btree_update_interior.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index d1e3e2c76e30..2e958f88777b 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -178,12 +178,13 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
six_unlock_intent(&b->c.lock);
}
-static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
+static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct disk_reservation *res,
struct closure *cl,
bool interior_node,
unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct write_point *wp;
struct btree *b;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
@@ -214,7 +215,7 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
mutex_unlock(&c->btree_reserve_cache_lock);
retry:
- ret = bch2_alloc_sectors_start(c,
+ ret = bch2_alloc_sectors_start_trans(trans,
c->opts.metadata_target ?:
c->opts.foreground_target,
0,
@@ -414,7 +415,8 @@ static void bch2_btree_reserve_put(struct btree_update *as)
mutex_unlock(&c->btree_reserve_cache_lock);
}
-static int bch2_btree_reserve_get(struct btree_update *as,
+static int bch2_btree_reserve_get(struct btree_trans *trans,
+ struct btree_update *as,
unsigned nr_nodes[2],
unsigned flags,
struct closure *cl)
@@ -441,7 +443,7 @@ static int bch2_btree_reserve_get(struct btree_update *as,
struct prealloc_nodes *p = as->prealloc_nodes + interior;
while (p->nr < nr_nodes[interior]) {
- b = __bch2_btree_node_alloc(c, &as->disk_res,
+ b = __bch2_btree_node_alloc(trans, &as->disk_res,
flags & BTREE_INSERT_NOWAIT ? NULL : cl,
interior, flags);
if (IS_ERR(b)) {
@@ -1066,8 +1068,9 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
if (ret)
goto err;
- ret = bch2_btree_reserve_get(as, nr_nodes, flags, NULL);
- if (ret) {
+ ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL);
+ if (ret == -EAGAIN ||
+ ret == -ENOMEM) {
struct closure cl;
closure_init_stack(&cl);
@@ -1075,7 +1078,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
bch2_trans_unlock(trans);
do {
- ret = bch2_btree_reserve_get(as, nr_nodes, flags, &cl);
+ ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
closure_sync(&cl);
} while (ret == -EAGAIN);
}