diff options
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r-- | fs/bcachefs/movinggc.c | 510 |
1 files changed, 276 insertions, 234 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 4a8c4e3a15e0..d13a120da267 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -6,22 +6,27 @@ */ #include "bcachefs.h" +#include "alloc_background.h" #include "alloc_foreground.h" #include "btree_iter.h" #include "btree_update.h" +#include "btree_write_buffer.h" #include "buckets.h" #include "clock.h" #include "disk_groups.h" +#include "errcode.h" #include "error.h" #include "extents.h" #include "eytzinger.h" #include "io.h" #include "keylist.h" +#include "lru.h" #include "move.h" #include "movinggc.h" #include "super-io.h" #include <trace/events/bcachefs.h> +#include <linux/bsearch.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/math64.h> @@ -29,261 +34,233 @@ #include <linux/sort.h> #include <linux/wait.h> -/* - * We can't use the entire copygc reserve in one iteration of copygc: we may - * need the buckets we're freeing up to go back into the copygc reserve to make - * forward progress, but if the copygc reserve is full they'll be available for - * any allocation - and it's possible that in a given iteration, we free up most - * of the buckets we're going to free before we allocate most of the buckets - * we're going to allocate. - * - * If we only use half of the reserve per iteration, then in steady state we'll - * always have room in the reserve for the buckets we're going to need in the - * next iteration: - */ -#define COPYGC_BUCKETS_PER_ITER(ca) \ - ((ca)->free[RESERVE_MOVINGGC].size / 2) - -static int bucket_offset_cmp(const void *_l, const void *_r, size_t size) +struct buckets_in_flight { + struct rhashtable table; + struct move_bucket_in_flight *first; + struct move_bucket_in_flight *last; + size_t nr; + size_t sectors; +}; + +static const struct rhashtable_params bch_move_bucket_params = { + .head_offset = offsetof(struct move_bucket_in_flight, hash), + .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), + .key_len = sizeof(struct move_bucket_key), +}; + +static struct move_bucket_in_flight * +move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b) { - const struct copygc_heap_entry *l = _l; - const struct copygc_heap_entry *r = _r; + struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL); + int ret; - return cmp_int(l->dev, r->dev) ?: - cmp_int(l->offset, r->offset); -} + if (!new) + return ERR_PTR(-ENOMEM); -static enum data_cmd copygc_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_opts *data_opts) -{ - copygc_heap *h = &c->copygc_heap; - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p = { 0 }; - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); - struct copygc_heap_entry search = { - .dev = p.ptr.dev, - .offset = p.ptr.offset, - }; - - ssize_t i = eytzinger0_find_le(h->data, h->used, - sizeof(h->data[0]), - bucket_offset_cmp, &search); -#if 0 - /* eytzinger search verify code: */ - ssize_t j = -1, k; - - for (k = 0; k < h->used; k++) - if (h->data[k].offset <= ptr->offset && - (j < 0 || h->data[k].offset > h->data[j].offset)) - j = k; - - BUG_ON(i != j); -#endif - if (i >= 0 && - p.ptr.dev == h->data[i].dev && - p.ptr.offset < h->data[i].offset + ca->mi.bucket_size && - p.ptr.gen == h->data[i].gen) { - /* - * We need to use the journal reserve here, because - * - journal reclaim depends on btree key cache - * flushing to make forward progress, - * - which has to make forward progress when the - * journal is pre-reservation full, - * - and depends on allocation - meaning allocator and - * copygc - */ - - data_opts->target = io_opts->background_target; - data_opts->nr_replicas = 1; - data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE| - BTREE_INSERT_JOURNAL_RESERVED; - data_opts->rewrite_dev = p.ptr.dev; - - if (p.has_ec) - data_opts->nr_replicas += p.ec.redundancy; - - return DATA_REWRITE; - } + new->bucket = b; + + ret = rhashtable_lookup_insert_fast(&list->table, &new->hash, + bch_move_bucket_params); + if (ret) { + kfree(new); + return ERR_PTR(ret); } - return DATA_SKIP; + if (!list->first) + list->first = new; + else + list->last->next = new; + + list->last = new; + list->nr++; + list->sectors += b.sectors; + return new; } -static bool have_copygc_reserve(struct bch_dev *ca) +static int bch2_bucket_is_movable(struct btree_trans *trans, + struct move_bucket *b, u64 time) { - bool ret; + struct btree_iter iter; + struct bkey_s_c k; + struct bch_alloc_v4 _a; + const struct bch_alloc_v4 *a; + int ret; - spin_lock(&ca->fs->freelist_lock); - ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) || - ca->allocator_state != ALLOCATOR_running; - spin_unlock(&ca->fs->freelist_lock); + if (bch2_bucket_is_open(trans->c, + b->k.bucket.inode, + b->k.bucket.offset)) + return 0; - return ret; -} + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, + b->k.bucket, BTREE_ITER_CACHED); + k = bch2_btree_iter_peek_slot(&iter); + ret = bkey_err(k); + bch2_trans_iter_exit(trans, &iter); -static inline int fragmentation_cmp(copygc_heap *heap, - struct copygc_heap_entry l, - struct copygc_heap_entry r) -{ - return cmp_int(l.fragmentation, r.fragmentation); + if (ret) + return ret; + + a = bch2_alloc_to_v4(k, &_a); + b->k.gen = a->gen; + b->sectors = a->dirty_sectors; + + ret = data_type_movable(a->data_type) && + a->fragmentation_lru && + a->fragmentation_lru <= time; + + if (!ret) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, trans->c, k); + pr_debug("%s", buf.buf); + printbuf_exit(&buf); + } + + return ret; } -static int bch2_copygc(struct bch_fs *c) +static void move_buckets_wait(struct btree_trans *trans, + struct moving_context *ctxt, + struct buckets_in_flight *list, + bool flush) { - copygc_heap *h = &c->copygc_heap; - struct copygc_heap_entry e, *i; - struct bucket_array *buckets; - struct bch_move_stats move_stats; - u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0; - u64 sectors_reserved = 0; - u64 buckets_to_move, buckets_not_moved = 0; - struct bch_dev *ca; - unsigned dev_idx; - size_t b, heap_size = 0; + struct move_bucket_in_flight *i; int ret; - bch_move_stats_init(&move_stats, "copygc"); + while ((i = list->first)) { + if (flush) + move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count)); - /* - * Find buckets with lowest sector counts, skipping completely - * empty buckets, by building a maxheap sorted by sector count, - * and repeatedly replacing the maximum element until all - * buckets have been visited. - */ - h->used = 0; + if (atomic_read(&i->count)) + break; - for_each_rw_member(ca, c, dev_idx) - heap_size += ca->mi.nbuckets >> 7; + /* + * moving_ctxt_exit calls bch2_write as it flushes pending + * reads, which inits another btree_trans; this one must be + * unlocked: + */ + bch2_verify_bucket_evacuated(trans, i->bucket.k.bucket, i->bucket.k.gen); - if (h->size < heap_size) { - free_heap(&c->copygc_heap); - if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) { - bch_err(c, "error allocating copygc heap"); - return 0; - } - } + list->first = i->next; + if (!list->first) + list->last = NULL; - for_each_rw_member(ca, c, dev_idx) { - closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca)); - - spin_lock(&ca->fs->freelist_lock); - sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size; - spin_unlock(&ca->fs->freelist_lock); - - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); - - for (b = buckets->first_bucket; b < buckets->nbuckets; b++) { - struct bucket *g = buckets->b + b; - struct bucket_mark m = READ_ONCE(g->mark); - struct copygc_heap_entry e; - - if (m.owned_by_allocator || - m.data_type != BCH_DATA_user || - !bucket_sectors_used(m) || - bucket_sectors_used(m) >= ca->mi.bucket_size) - continue; - - WARN_ON(m.stripe && !g->stripe_redundancy); - - e = (struct copygc_heap_entry) { - .dev = dev_idx, - .gen = m.gen, - .replicas = 1 + g->stripe_redundancy, - .fragmentation = bucket_sectors_used(m) * (1U << 15) - / ca->mi.bucket_size, - .sectors = bucket_sectors_used(m), - .offset = bucket_to_sector(ca, b), - }; - heap_add_or_replace(h, e, -fragmentation_cmp, NULL); - } - up_read(&ca->bucket_lock); - } + list->nr--; + list->sectors -= i->bucket.sectors; - if (!h->used) { - bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!"); - return 0; + ret = rhashtable_remove_fast(&list->table, &i->hash, + bch_move_bucket_params); + BUG_ON(ret); + kfree(i); } - /* - * Our btree node allocations also come out of RESERVE_MOVINGGC: - */ - sectors_reserved = (sectors_reserved * 3) / 4; - if (!sectors_reserved) { - bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!"); - return -1; - } + bch2_trans_unlock(trans); +} - for (i = h->data; i < h->data + h->used; i++) { - sectors_to_move += i->sectors; - sectors_to_write += i->sectors * i->replicas; - } +static bool bucket_in_flight(struct buckets_in_flight *list, + struct move_bucket_key k) +{ + return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params); +} - while (sectors_to_write > sectors_reserved) { - BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL)); - sectors_to_write -= e.sectors * e.replicas; - } +typedef DARRAY(struct move_bucket) move_buckets; - buckets_to_move = h->used; +static int bch2_copygc_get_buckets(struct btree_trans *trans, + struct moving_context *ctxt, + struct buckets_in_flight *buckets_in_flight, + move_buckets *buckets) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + size_t nr_to_get = max(16UL, buckets_in_flight->nr / 4); + size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0; + int ret; - if (!buckets_to_move) { - bch_err_ratelimited(c, "copygc cannot run - sectors_reserved %llu!", - sectors_reserved); - return 0; - } + move_buckets_wait(trans, ctxt, buckets_in_flight, false); + + ret = bch2_btree_write_buffer_flush(trans); + if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()", + __func__, bch2_err_str(ret))) + return ret; + + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru, + lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), + lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), + 0, k, ({ + struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) }; + int ret = 0; + + saw++; + + if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p))) + not_movable++; + else if (bucket_in_flight(buckets_in_flight, b.k)) + in_flight++; + else { + ret = darray_push(buckets, b) ?: buckets->nr >= nr_to_get; + if (ret >= 0) + sectors += b.sectors; + } + ret; + })); - eytzinger0_sort(h->data, h->used, - sizeof(h->data[0]), - bucket_offset_cmp, NULL); + pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i", + buckets_in_flight->nr, buckets_in_flight->sectors, + saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret); - ret = bch2_move_data(c, - 0, POS_MIN, - BTREE_ID_NR, POS_MAX, - NULL, - writepoint_ptr(&c->copygc_write_point), - copygc_pred, NULL, - &move_stats); + return ret < 0 ? ret : 0; +} - for_each_rw_member(ca, c, dev_idx) { - down_read(&ca->bucket_lock); - buckets = bucket_array(ca); - for (i = h->data; i < h->data + h->used; i++) { - struct bucket_mark m; - size_t b; - - if (i->dev != dev_idx) - continue; - - b = sector_to_bucket(ca, i->offset); - m = READ_ONCE(buckets->b[b].mark); - - if (i->gen == m.gen && - bucket_sectors_used(m)) { - sectors_not_moved += bucket_sectors_used(m); - buckets_not_moved++; - } +static int bch2_copygc(struct btree_trans *trans, + struct moving_context *ctxt, + struct buckets_in_flight *buckets_in_flight) +{ + struct bch_fs *c = trans->c; + struct data_update_opts data_opts = { + .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc, + }; + move_buckets buckets = { 0 }; + struct move_bucket_in_flight *f; + struct move_bucket *i; + u64 moved = atomic64_read(&ctxt->stats->sectors_moved); + int ret = 0; + + ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets); + if (ret) + goto err; + + darray_for_each(buckets, i) { + if (unlikely(freezing(current))) + break; + + f = move_bucket_in_flight_add(buckets_in_flight, *i); + ret = PTR_ERR_OR_ZERO(f); + if (ret == -EEXIST) /* rare race: copygc_get_buckets returned same bucket more than once */ + continue; + if (ret == -ENOMEM) { /* flush IO, continue later */ + ret = 0; + break; } - up_read(&ca->bucket_lock); + + ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket, + f->bucket.k.gen, data_opts); + if (ret) + goto err; } +err: + darray_exit(&buckets); - if (sectors_not_moved && !ret) - bch_warn_ratelimited(c, - "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)", - sectors_not_moved, sectors_to_move, - buckets_not_moved, buckets_to_move, - atomic64_read(&move_stats.sectors_moved), - atomic64_read(&move_stats.keys_raced), - atomic64_read(&move_stats.sectors_raced)); - - trace_copygc(c, - atomic64_read(&move_stats.sectors_moved), sectors_not_moved, - buckets_to_move, buckets_not_moved); - return 0; + /* no entries in LRU btree found, or got to end: */ + if (ret == -ENOENT) + ret = 0; + + if (ret < 0 && !bch2_err_matches(ret, EROFS)) + bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret)); + + moved = atomic64_read(&ctxt->stats->sectors_moved) - moved; + trace_and_count(c, copygc, c, moved, 0, 0, 0); + return ret; } /* @@ -305,13 +282,18 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) struct bch_dev *ca; unsigned dev_idx; s64 wait = S64_MAX, fragmented_allowed, fragmented; + unsigned i; for_each_rw_member(ca, c, dev_idx) { struct bch_dev_usage usage = bch2_dev_usage_read(ca); - fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) * - ca->mi.bucket_size) >> 1); - fragmented = usage.d[BCH_DATA_user].fragmented; + fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) * + ca->mi.bucket_size) >> 1); + fragmented = 0; + + for (i = 0; i < BCH_DATA_NR; i++) + if (data_type_movable(i)) + fragmented += usage.d[i].fragmented; wait = min(wait, max(0LL, fragmented_allowed - fragmented)); } @@ -319,26 +301,75 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) return wait; } +void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) +{ + prt_printf(out, "Currently waiting for: "); + prt_human_readable_u64(out, max(0LL, c->copygc_wait - + atomic64_read(&c->io_clock[WRITE].now)) << 9); + prt_newline(out); + + prt_printf(out, "Currently waiting since: "); + prt_human_readable_u64(out, max(0LL, + atomic64_read(&c->io_clock[WRITE].now) - + c->copygc_wait_at) << 9); + prt_newline(out); + + prt_printf(out, "Currently calculated wait: "); + prt_human_readable_u64(out, bch2_copygc_wait_amount(c)); + prt_newline(out); +} + static int bch2_copygc_thread(void *arg) { struct bch_fs *c = arg; + struct btree_trans trans; + struct moving_context ctxt; + struct bch_move_stats move_stats; struct io_clock *clock = &c->io_clock[WRITE]; + struct buckets_in_flight move_buckets; u64 last, wait; + int ret = 0; + + memset(&move_buckets, 0, sizeof(move_buckets)); + + ret = rhashtable_init(&move_buckets.table, &bch_move_bucket_params); + if (ret) { + bch_err(c, "error allocating copygc buckets in flight: %s", + bch2_err_str(ret)); + return ret; + } set_freezable(); + bch2_trans_init(&trans, c, 0, 0); + + bch2_move_stats_init(&move_stats, "copygc"); + bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats, + writepoint_ptr(&c->copygc_write_point), + false); - while (!kthread_should_stop()) { + while (!ret && !kthread_should_stop()) { + bch2_trans_unlock(&trans); cond_resched(); - if (kthread_wait_freezable(c->copy_gc_enabled)) - break; + if (!c->copy_gc_enabled) { + move_buckets_wait(&trans, &ctxt, &move_buckets, true); + kthread_wait_freezable(c->copy_gc_enabled); + } + + if (unlikely(freezing(current))) { + move_buckets_wait(&trans, &ctxt, &move_buckets, true); + __refrigerator(false); + continue; + } last = atomic64_read(&clock->now); wait = bch2_copygc_wait_amount(c); if (wait > clock->max_slop) { - trace_copygc_wait(c, wait, last + wait); + c->copygc_wait_at = last; c->copygc_wait = last + wait; + move_buckets_wait(&trans, &ctxt, &move_buckets, true); + trace_and_count(c, copygc_wait, c, wait, last + wait); bch2_kthread_io_clock_wait(clock, last + wait, MAX_SCHEDULE_TIMEOUT); continue; @@ -346,10 +377,17 @@ static int bch2_copygc_thread(void *arg) c->copygc_wait = 0; - if (bch2_copygc(c)) - break; + c->copygc_running = true; + ret = bch2_copygc(&trans, &ctxt, &move_buckets); + c->copygc_running = false; + + wake_up(&c->copygc_running_wq); } + move_buckets_wait(&trans, &ctxt, &move_buckets, true); + bch2_trans_exit(&trans); + bch2_moving_ctxt_exit(&ctxt); + return 0; } @@ -365,6 +403,7 @@ void bch2_copygc_stop(struct bch_fs *c) int bch2_copygc_start(struct bch_fs *c) { struct task_struct *t; + int ret; if (c->copygc_thread) return 0; @@ -376,9 +415,10 @@ int bch2_copygc_start(struct bch_fs *c) return -ENOMEM; t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name); - if (IS_ERR(t)) { - bch_err(c, "error creating copygc thread: %li", PTR_ERR(t)); - return PTR_ERR(t); + ret = PTR_ERR_OR_ZERO(t); + if (ret) { + bch_err(c, "error creating copygc thread: %s", bch2_err_str(ret)); + return ret; } get_task_struct(t); @@ -391,4 +431,6 @@ int bch2_copygc_start(struct bch_fs *c) void bch2_fs_copygc_init(struct bch_fs *c) { + init_waitqueue_head(&c->copygc_running_wq); + c->copygc_running = false; } |