summaryrefslogtreecommitdiff
path: root/fs/bcachefs/movinggc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-03-18 00:42:09 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:51 -0400
commit8e3f913e2ab6ac2cb9e75a0a8635d0b44f838c33 (patch)
tree935cb6aa90e9ef93b1508be5f0c81f2f06ad8c3f /fs/bcachefs/movinggc.c
parenta8c752bb1d93a24a0de753e209d4f4d58d65c878 (diff)
bcachefs: Copygc now uses backpointers
Previously, copygc needed to walk the entire extents & reflink btrees to find extents that needed to be moved. Now that we have backpointers, this patch implements bch2_evacuate_bucket() in the move code, which copygc now uses for evacuating mostly empty buckets. Also, thanks to the new backpointers code, copygc can now move btree nodes. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r--fs/bcachefs/movinggc.c236
1 files changed, 30 insertions, 206 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index a04e2330d0e6..b420b79edb36 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -31,79 +31,6 @@
#include <linux/sort.h>
#include <linux/wait.h>
-static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
-{
- const struct copygc_heap_entry *l = _l;
- const struct copygc_heap_entry *r = _r;
-
- return cmp_int(l->dev, r->dev) ?:
- cmp_int(l->offset, r->offset);
-}
-
-static bool copygc_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- copygc_heap *h = &c->copygc_heap;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p = { 0 };
- unsigned i = 0;
-
- /*
- * We need to use the journal reserve here, because
- * - journal reclaim depends on btree key cache
- * flushing to make forward progress,
- * - which has to make forward progress when the
- * journal is pre-reservation full,
- * - and depends on allocation - meaning allocator and
- * copygc
- */
-
- data_opts->rewrite_ptrs = 0;
- data_opts->target = io_opts->background_target;
- data_opts->extra_replicas = 0;
- data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE|
- JOURNAL_WATERMARK_copygc;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct copygc_heap_entry search = {
- .dev = p.ptr.dev,
- .offset = p.ptr.offset,
- };
- ssize_t eytz;
-
- if (p.ptr.cached)
- continue;
-
- eytz = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
-#if 0
- /* eytzinger search verify code: */
- ssize_t j = -1, k;
-
- for (k = 0; k < h->used; k++)
- if (h->data[k].offset <= ptr->offset &&
- (j < 0 || h->data[k].offset > h->data[j].offset))
- j = k;
-
- BUG_ON(i != j);
-#endif
- if (eytz >= 0 &&
- p.ptr.dev == h->data[eytz].dev &&
- p.ptr.offset < h->data[eytz].offset + ca->mi.bucket_size &&
- p.ptr.gen == h->data[eytz].gen)
- data_opts->rewrite_ptrs |= 1U << i;
-
- i++;
- }
-
- return data_opts->rewrite_ptrs != 0;
-}
-
static inline int fragmentation_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
@@ -111,7 +38,7 @@ static inline int fragmentation_cmp(copygc_heap *heap,
return cmp_int(l.fragmentation, r.fragmentation);
}
-static int walk_buckets_to_copygc(struct bch_fs *c)
+static int find_buckets_to_copygc(struct bch_fs *c)
{
copygc_heap *h = &c->copygc_heap;
struct btree_trans trans;
@@ -121,6 +48,14 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0);
+ /*
+ * Find buckets with lowest sector counts, skipping completely
+ * empty buckets, by building a maxheap sorted by sector count,
+ * and repeatedly replacing the maximum element until all
+ * buckets have been visited.
+ */
+ h->used = 0;
+
for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
@@ -130,7 +65,8 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
a = bch2_alloc_to_v4(k, &a_convert);
- if (a->data_type != BCH_DATA_user ||
+ if ((a->data_type != BCH_DATA_btree &&
+ a->data_type != BCH_DATA_user) ||
a->dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
@@ -142,7 +78,7 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
.fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31),
ca->mi.bucket_size),
.sectors = a->dirty_sectors,
- .offset = bucket_to_sector(ca, iter.pos.offset),
+ .bucket = iter.pos.offset,
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
@@ -153,77 +89,22 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
return ret;
}
-static int bucket_inorder_cmp(const void *_l, const void *_r)
-{
- const struct copygc_heap_entry *l = _l;
- const struct copygc_heap_entry *r = _r;
-
- return cmp_int(l->dev, r->dev) ?: cmp_int(l->offset, r->offset);
-}
-
-static int check_copygc_was_done(struct bch_fs *c,
- u64 *sectors_not_moved,
- u64 *buckets_not_moved)
-{
- copygc_heap *h = &c->copygc_heap;
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 a;
- struct copygc_heap_entry *i;
- int ret = 0;
-
- sort(h->data, h->used, sizeof(h->data[0]), bucket_inorder_cmp, NULL);
-
- bch2_trans_init(&trans, c, 0, 0);
- bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, 0);
-
- for (i = h->data; i < h->data + h->used; i++) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
-
- bch2_btree_iter_set_pos(&iter, POS(i->dev, sector_to_bucket(ca, i->offset)));
-
- ret = lockrestart_do(&trans,
- bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (ret)
- break;
-
- bch2_alloc_to_v4(k, &a);
-
- if (a.gen == i->gen && a.dirty_sectors) {
- *sectors_not_moved += a.dirty_sectors;
- *buckets_not_moved += 1;
- }
- }
- bch2_trans_iter_exit(&trans, &iter);
-
- bch2_trans_exit(&trans);
- return ret;
-}
-
static int bch2_copygc(struct bch_fs *c)
{
copygc_heap *h = &c->copygc_heap;
- struct copygc_heap_entry e, *i;
+ struct copygc_heap_entry e;
struct bch_move_stats move_stats;
- u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0;
- u64 sectors_reserved = 0;
- u64 buckets_to_move, buckets_not_moved = 0;
struct bch_dev *ca;
unsigned dev_idx;
size_t heap_size = 0;
- int ret;
+ struct moving_context ctxt;
+ struct data_update_opts data_opts = {
+ .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
+ };
+ int ret = 0;
bch2_move_stats_init(&move_stats, "copygc");
- /*
- * Find buckets with lowest sector counts, skipping completely
- * empty buckets, by building a maxheap sorted by sector count,
- * and repeatedly replacing the maximum element until all
- * buckets have been visited.
- */
- h->used = 0;
-
for_each_rw_member(ca, c, dev_idx)
heap_size += ca->mi.nbuckets >> 7;
@@ -235,21 +116,7 @@ static int bch2_copygc(struct bch_fs *c)
}
}
- for_each_rw_member(ca, c, dev_idx) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
-
- u64 avail = max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets +
- usage.d[BCH_DATA_need_discard].buckets -
- ca->nr_open_buckets -
- bch2_dev_buckets_reserved(ca, RESERVE_movinggc));
-
- avail = min(avail, ca->mi.nbuckets >> 6);
-
- sectors_reserved += avail * ca->mi.bucket_size;
- }
-
- ret = walk_buckets_to_copygc(c);
+ ret = find_buckets_to_copygc(c);
if (ret) {
bch2_fs_fatal_error(c, "error walking buckets to copygc!");
return ret;
@@ -281,69 +148,26 @@ static int bch2_copygc(struct bch_fs *c)
return 0;
}
- /*
- * Our btree node allocations also come out of RESERVE_movingc:
- */
- sectors_reserved = (sectors_reserved * 3) / 4;
- if (!sectors_reserved) {
- bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!");
- return -1;
- }
+ heap_resort(h, fragmentation_cmp, NULL);
- for (i = h->data; i < h->data + h->used; i++) {
- sectors_to_move += i->sectors;
- sectors_to_write += i->sectors * i->replicas;
- }
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+ writepoint_ptr(&c->copygc_write_point),
+ false);
- while (sectors_to_write > sectors_reserved) {
+ /* not correct w.r.t. device removal */
+ while (h->used && !ret) {
BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
- sectors_to_write -= e.sectors * e.replicas;
+ ret = __bch2_evacuate_bucket(&ctxt, POS(e.dev, e.bucket), e.gen,
+ data_opts);
}
- buckets_to_move = h->used;
+ bch2_moving_ctxt_exit(&ctxt);
- if (!buckets_to_move) {
- bch_err_ratelimited(c, "copygc cannot run - sectors_reserved %llu!",
- sectors_reserved);
- return 0;
- }
-
- eytzinger0_sort(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, NULL);
-
- ret = bch2_move_data(c,
- 0, POS_MIN,
- BTREE_ID_NR, POS_MAX,
- NULL,
- &move_stats,
- writepoint_ptr(&c->copygc_write_point),
- false,
- copygc_pred, NULL);
if (ret < 0 && !bch2_err_matches(ret, EROFS))
bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
- if (ret)
- return ret;
-
- ret = check_copygc_was_done(c, &sectors_not_moved, &buckets_not_moved);
- if (ret) {
- bch_err(c, "error %i from check_copygc_was_done()", ret);
- return ret;
- }
- if (sectors_not_moved)
- bch_warn_ratelimited(c,
- "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
- sectors_not_moved, sectors_to_move,
- buckets_not_moved, buckets_to_move,
- atomic64_read(&move_stats.sectors_moved),
- atomic64_read(&move_stats.keys_raced),
- atomic64_read(&move_stats.sectors_raced));
-
- trace_and_count(c, copygc, c,
- atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
- buckets_to_move, buckets_not_moved);
- return 0;
+ trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
+ return ret;
}
/*