diff options
Diffstat (limited to 'libbcachefs/move.c')
-rw-r--r-- | libbcachefs/move.c | 466 |
1 files changed, 266 insertions, 200 deletions
diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 0c5b924c..5eaf0cf8 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -9,41 +9,38 @@ #include "keylist.h" #include <linux/ioprio.h> +#include <linux/kthread.h> #include <trace/events/bcachefs.h> -static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c, - struct bkey_s_extent e, - struct bch_extent_ptr ptr) -{ - struct bch_extent_ptr *ptr2; - struct bch_dev *ca = c->devs[ptr.dev]; +struct moving_io { + struct list_head list; + struct closure cl; + bool read_completed; + unsigned sectors; - extent_for_each_ptr(e, ptr2) - if (ptr2->dev == ptr.dev && - ptr2->gen == ptr.gen && - PTR_BUCKET_NR(ca, ptr2) == - PTR_BUCKET_NR(ca, &ptr)) - return ptr2; + struct bch_read_bio rbio; - return NULL; -} + struct migrate_write write; + /* Must be last since it is variable size */ + struct bio_vec bi_inline_vecs[0]; +}; -static struct bch_extent_ptr *bch2_migrate_matching_ptr(struct migrate_write *m, - struct bkey_s_extent e) -{ - const struct bch_extent_ptr *ptr; - struct bch_extent_ptr *ret; +struct moving_context { + /* Closure for waiting on all reads and writes to complete */ + struct closure cl; - if (m->move) - ret = bkey_find_ptr(m->op.c, e, m->move_ptr); - else - extent_for_each_ptr(bkey_i_to_s_c_extent(&m->key), ptr) - if ((ret = bkey_find_ptr(m->op.c, e, *ptr))) - break; + /* Key and sector moves issued, updated from submission context */ + u64 keys_moved; + u64 sectors_moved; + atomic64_t sectors_raced; - return ret; -} + struct list_head reads; + + atomic_t sectors_in_flight; + + wait_queue_head_t wait; +}; static int bch2_migrate_index_update(struct bch_write_op *op) { @@ -59,71 +56,78 @@ static int bch2_migrate_index_update(struct bch_write_op *op) BTREE_ITER_INTENT); while (1) { - struct bkey_s_extent insert = - bkey_i_to_s_extent(bch2_keylist_front(keys)); struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter); + struct bkey_i_extent *insert, *new = + bkey_i_to_extent(bch2_keylist_front(keys)); + BKEY_PADDED(k) _new, _insert; struct bch_extent_ptr *ptr; - struct bkey_s_extent e; - BKEY_PADDED(k) new; + struct bch_extent_crc_unpacked crc; + bool did_work = false; - if (!k.k) { + if (btree_iter_err(k)) { ret = bch2_btree_iter_unlock(&iter); break; } - if (!bkey_extent_is_data(k.k)) + if (bversion_cmp(k.k->version, new->k.version) || + !bkey_extent_is_data(k.k) || + !bch2_extent_matches_ptr(c, bkey_s_c_to_extent(k), + m->ptr, m->offset)) goto nomatch; - bkey_reassemble(&new.k, k); - bch2_cut_front(iter.pos, &new.k); - bch2_cut_back(insert.k->p, &new.k.k); - e = bkey_i_to_s_extent(&new.k); - - /* hack - promotes can race: */ - if (m->promote) - extent_for_each_ptr(insert, ptr) - if (bch2_extent_has_device(e.c, ptr->dev)) - goto nomatch; - - ptr = bch2_migrate_matching_ptr(m, e); - if (ptr) { - int nr_new_dirty = bch2_extent_nr_dirty_ptrs(insert.s_c); - unsigned insert_flags = - BTREE_INSERT_ATOMIC| - BTREE_INSERT_NOFAIL; + bkey_reassemble(&_insert.k, k); + insert = bkey_i_to_extent(&_insert.k); + + bkey_copy(&_new.k, bch2_keylist_front(keys)); + new = bkey_i_to_extent(&_new.k); + + bch2_cut_front(iter.pos, &insert->k_i); + bch2_cut_back(new->k.p, &insert->k); + bch2_cut_back(insert->k.p, &new->k); + + if (m->move_dev >= 0 && + (ptr = (struct bch_extent_ptr *) + bch2_extent_has_device(extent_i_to_s_c(insert), + m->move_dev))) + bch2_extent_drop_ptr(extent_i_to_s(insert), ptr); - /* copygc uses btree node reserve: */ - if (m->move) - insert_flags |= BTREE_INSERT_USE_RESERVE; - if (m->move) { - nr_new_dirty -= !ptr->cached; - __bch2_extent_drop_ptr(e, ptr); + extent_for_each_ptr_crc(extent_i_to_s(new), ptr, crc) { + if (bch2_extent_has_device(extent_i_to_s_c(insert), ptr->dev)) { + /* + * raced with another move op? extent already + * has a pointer to the device we just wrote + * data to + */ + continue; } - BUG_ON(nr_new_dirty < 0); - - memcpy_u64s(extent_entry_last(e), - insert.v, - bkey_val_u64s(insert.k)); - e.k->u64s += bkey_val_u64s(insert.k); - - bch2_extent_narrow_crcs(e); - bch2_extent_drop_redundant_crcs(e); - bch2_extent_normalize(c, e.s); - bch2_extent_mark_replicas_cached(c, e, nr_new_dirty); - - ret = bch2_btree_insert_at(c, &op->res, - NULL, op_journal_seq(op), - insert_flags, - BTREE_INSERT_ENTRY(&iter, &new.k)); - if (ret && ret != -EINTR) - break; - } else { -nomatch: - bch2_btree_iter_advance_pos(&iter); + bch2_extent_crc_append(insert, crc); + extent_ptr_append(insert, *ptr); + did_work = true; } + if (!did_work) + goto nomatch; + + bch2_extent_narrow_crcs(insert, + (struct bch_extent_crc_unpacked) { 0 }); + bch2_extent_normalize(c, extent_i_to_s(insert).s); + bch2_extent_mark_replicas_cached(c, extent_i_to_s(insert)); + + ret = bch2_btree_insert_at(c, &op->res, + NULL, op_journal_seq(op), + BTREE_INSERT_ATOMIC| + BTREE_INSERT_NOFAIL| + m->btree_insert_flags, + BTREE_INSERT_ENTRY(&iter, &insert->k_i)); + if (!ret) + atomic_long_inc(&c->extent_migrate_done); + if (ret == -EINTR) + ret = 0; + if (ret) + break; +next: while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { bch2_keylist_pop_front(keys); if (bch2_keylist_empty(keys)) @@ -131,96 +135,83 @@ nomatch: } bch2_cut_front(iter.pos, bch2_keylist_front(keys)); + continue; +nomatch: + if (m->ctxt) + atomic64_add(k.k->p.offset - iter.pos.offset, + &m->ctxt->sectors_raced); + atomic_long_inc(&c->extent_migrate_raced); + trace_move_race(&new->k); + bch2_btree_iter_advance_pos(&iter); + goto next; } out: bch2_btree_iter_unlock(&iter); return ret; } -void bch2_migrate_write_init(struct bch_fs *c, - struct migrate_write *m, - struct bch_devs_mask *devs, - struct bkey_s_c k, - const struct bch_extent_ptr *move_ptr, - unsigned flags) +void bch2_migrate_write_init(struct migrate_write *m, + struct bch_read_bio *rbio) { - bkey_reassemble(&m->key, k); - - m->promote = false; - m->move = move_ptr != NULL; - if (move_ptr) - m->move_ptr = *move_ptr; - - if (bkey_extent_is_cached(k.k) || - (move_ptr && move_ptr->cached)) - flags |= BCH_WRITE_CACHED; + /* write bio must own pages: */ + BUG_ON(!m->op.wbio.bio.bi_vcnt); + + m->ptr = rbio->pick.ptr; + m->offset = rbio->pos.offset - rbio->pick.crc.offset; + m->op.devs_have = rbio->devs_have; + m->op.pos = rbio->pos; + m->op.version = rbio->version; + m->op.crc = rbio->pick.crc; + + if (bch2_csum_type_is_encryption(m->op.crc.csum_type)) { + m->op.nonce = m->op.crc.nonce + m->op.crc.offset; + m->op.csum_type = m->op.crc.csum_type; + } - bch2_write_op_init(&m->op, c, (struct disk_reservation) { 0 }, - devs, (unsigned long) current, - bkey_start_pos(k.k), NULL, - flags|BCH_WRITE_ONLY_SPECIFIED_DEVS); + if (m->move_dev >= 0) + bch2_dev_list_drop_dev(&m->op.devs_have, m->move_dev); - if (m->move) + if (m->btree_insert_flags & BTREE_INSERT_USE_RESERVE) m->op.alloc_reserve = RESERVE_MOVINGGC; - m->op.nonce = extent_current_nonce(bkey_s_c_to_extent(k)); + m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS| + BCH_WRITE_PAGES_STABLE| + BCH_WRITE_PAGES_OWNED| + BCH_WRITE_DATA_ENCODED; + + m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; m->op.nr_replicas = 1; + m->op.nr_replicas_required = 1; m->op.index_update_fn = bch2_migrate_index_update; } -static void migrate_bio_init(struct moving_io *io, struct bio *bio, - unsigned sectors) +static void move_free(struct closure *cl) { - bio_init(bio, io->bi_inline_vecs, - DIV_ROUND_UP(sectors, PAGE_SECTORS)); - bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - - bio->bi_iter.bi_size = sectors << 9; - bio->bi_private = &io->cl; - bch2_bio_map(bio, NULL); -} - -static void moving_io_free(struct moving_io *io) -{ - struct moving_context *ctxt = io->ctxt; + struct moving_io *io = container_of(cl, struct moving_io, cl); + struct moving_context *ctxt = io->write.ctxt; struct bio_vec *bv; int i; - atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight); - wake_up(&ctxt->wait); - bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i) if (bv->bv_page) __free_page(bv->bv_page); - kfree(io); -} - -static void moving_error(struct moving_context *ctxt, unsigned flag) -{ - atomic_inc(&ctxt->error_count); - //atomic_or(flag, &ctxt->error_flags); -} -static void moving_write_done(struct closure *cl) -{ - struct moving_io *io = container_of(cl, struct moving_io, cl); - - if (io->write.op.error) - moving_error(io->ctxt, MOVING_FLAG_WRITE); - - //if (io->replace.failures) - // trace_copy_collision(q, &io->key.k); + atomic_sub(io->sectors, &ctxt->sectors_in_flight); + wake_up(&ctxt->wait); - moving_io_free(io); + kfree(io); } -static void write_moving(struct closure *cl) +static void move_write(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); - struct bch_write_op *op = &io->write.op; - closure_call(&op->cl, bch2_write, NULL, &io->cl); - closure_return_with_destructor(&io->cl, moving_write_done); + if (likely(!io->rbio.bio.bi_error)) { + bch2_migrate_write_init(&io->write, &io->rbio); + closure_call(&io->write.op.cl, bch2_write, NULL, cl); + } + + closure_return_with_destructor(cl, move_free); } static inline struct moving_io *next_pending_write(struct moving_context *ctxt) @@ -231,16 +222,10 @@ static inline struct moving_io *next_pending_write(struct moving_context *ctxt) return io && io->read_completed ? io : NULL; } -static void read_moving_endio(struct bio *bio) +static void move_read_endio(struct bio *bio) { - struct closure *cl = bio->bi_private; - struct moving_io *io = container_of(cl, struct moving_io, cl); - struct moving_context *ctxt = io->ctxt; - - trace_move_read_done(&io->write.key.k); - - if (bio->bi_error) - moving_error(io->ctxt, MOVING_FLAG_READ); + struct moving_io *io = container_of(bio, struct moving_io, rbio.bio); + struct moving_context *ctxt = io->write.ctxt; io->read_completed = true; if (next_pending_write(ctxt)) @@ -249,58 +234,81 @@ static void read_moving_endio(struct bio *bio) closure_put(&ctxt->cl); } -int bch2_data_move(struct bch_fs *c, - struct moving_context *ctxt, - struct bch_devs_mask *devs, - struct bkey_s_c k, - const struct bch_extent_ptr *move_ptr) +static int bch2_move_extent(struct bch_fs *c, + struct moving_context *ctxt, + struct bch_devs_mask *devs, + struct write_point_specifier wp, + int btree_insert_flags, + int move_device, + struct bkey_s_c k) { struct extent_pick_ptr pick; struct moving_io *io; + const struct bch_extent_ptr *ptr; + struct bch_extent_crc_unpacked crc; + unsigned sectors = k.k->size, pages; - bch2_extent_pick_ptr(c, k, &ctxt->avoid, &pick); + bch2_extent_pick_ptr(c, k, NULL, &pick); if (IS_ERR_OR_NULL(pick.ca)) return pick.ca ? PTR_ERR(pick.ca) : 0; - io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) * - DIV_ROUND_UP(k.k->size, PAGE_SECTORS), GFP_KERNEL); - if (!io) - return -ENOMEM; + /* write path might have to decompress data: */ + extent_for_each_ptr_crc(bkey_s_c_to_extent(k), ptr, crc) + sectors = max_t(unsigned, sectors, crc.uncompressed_size); - io->ctxt = ctxt; + pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); + io = kzalloc(sizeof(struct moving_io) + + sizeof(struct bio_vec) * pages, GFP_KERNEL); + if (!io) + goto err; - migrate_bio_init(io, &io->rbio.bio, k.k->size); + io->write.ctxt = ctxt; + io->sectors = k.k->size; - bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); - io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); - io->rbio.bio.bi_end_io = read_moving_endio; + bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages); + bio_set_prio(&io->write.op.wbio.bio, + IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); + io->write.op.wbio.bio.bi_iter.bi_size = sectors << 9; - if (bio_alloc_pages(&io->rbio.bio, GFP_KERNEL)) { + bch2_bio_map(&io->write.op.wbio.bio, NULL); + if (bio_alloc_pages(&io->write.op.wbio.bio, GFP_KERNEL)) { kfree(io); - return -ENOMEM; + goto err; } - migrate_bio_init(io, &io->write.op.wbio.bio, k.k->size); + bio_init(&io->rbio.bio, io->bi_inline_vecs, pages); + bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); + io->rbio.bio.bi_iter.bi_size = sectors << 9; - bch2_migrate_write_init(c, &io->write, devs, k, move_ptr, 0); + bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0); + io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); + io->rbio.bio.bi_end_io = move_read_endio; - trace_move_read(&io->write.key.k); + __bch2_write_op_init(&io->write.op, c); + io->write.btree_insert_flags = btree_insert_flags; + io->write.move_dev = move_device; + io->write.op.devs = devs; + io->write.op.write_point = wp; ctxt->keys_moved++; ctxt->sectors_moved += k.k->size; - if (ctxt->rate) - bch2_ratelimit_increment(ctxt->rate, k.k->size); - atomic_add(k.k->size, &ctxt->sectors_in_flight); + trace_move_extent(k.k); + + atomic_add(io->sectors, &ctxt->sectors_in_flight); list_add_tail(&io->list, &ctxt->reads); /* - * dropped by read_moving_endio() - guards against use after free of + * dropped by move_read_endio() - guards against use after free of * ctxt when doing wakeup */ - closure_get(&io->ctxt->cl); - bch2_read_extent(c, &io->rbio, k, &pick, 0); + closure_get(&ctxt->cl); + bch2_read_extent(c, &io->rbio, bkey_s_c_to_extent(k), + &pick, BCH_READ_NODECODE); return 0; +err: + trace_move_alloc_fail(k.k); + return -ENOMEM; } static void do_pending_writes(struct moving_context *ctxt) @@ -309,14 +317,7 @@ static void do_pending_writes(struct moving_context *ctxt) while ((io = next_pending_write(ctxt))) { list_del(&io->list); - - if (io->rbio.bio.bi_error) { - moving_io_free(io); - continue; - } - - trace_move_write(&io->write.key.k); - closure_call(&io->cl, write_moving, NULL, &ctxt->cl); + closure_call(&io->cl, move_write, NULL, &ctxt->cl); } } @@ -330,18 +331,7 @@ do { \ next_pending_write(_ctxt) || (_cond)); \ } while (1) -int bch2_move_ctxt_wait(struct moving_context *ctxt) -{ - move_ctxt_wait_event(ctxt, - atomic_read(&ctxt->sectors_in_flight) < - ctxt->max_sectors_in_flight); - - return ctxt->rate - ? bch2_ratelimit_wait_freezable_stoppable(ctxt->rate) - : 0; -} - -void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) +static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) { unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight); @@ -350,7 +340,7 @@ void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) atomic_read(&ctxt->sectors_in_flight) != sectors_pending); } -void bch2_move_ctxt_exit(struct moving_context *ctxt) +static void bch2_move_ctxt_exit(struct moving_context *ctxt) { move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight)); closure_sync(&ctxt->cl); @@ -359,16 +349,92 @@ void bch2_move_ctxt_exit(struct moving_context *ctxt) EBUG_ON(atomic_read(&ctxt->sectors_in_flight)); } -void bch2_move_ctxt_init(struct moving_context *ctxt, - struct bch_ratelimit *rate, - unsigned max_sectors_in_flight) +static void bch2_move_ctxt_init(struct moving_context *ctxt) { memset(ctxt, 0, sizeof(*ctxt)); closure_init_stack(&ctxt->cl); - ctxt->rate = rate; - ctxt->max_sectors_in_flight = max_sectors_in_flight; - INIT_LIST_HEAD(&ctxt->reads); init_waitqueue_head(&ctxt->wait); } + +int bch2_move_data(struct bch_fs *c, + struct bch_ratelimit *rate, + unsigned sectors_in_flight, + struct bch_devs_mask *devs, + struct write_point_specifier wp, + int btree_insert_flags, + int move_device, + move_pred_fn pred, void *arg, + u64 *keys_moved, + u64 *sectors_moved) +{ + bool kthread = (current->flags & PF_KTHREAD) != 0; + struct moving_context ctxt; + struct btree_iter iter; + BKEY_PADDED(k) tmp; + struct bkey_s_c k; + int ret = 0; + + bch2_move_ctxt_init(&ctxt); + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN, + BTREE_ITER_PREFETCH); + + if (rate) + bch2_ratelimit_reset(rate); + + while (!kthread || !(ret = kthread_should_stop())) { + if (atomic_read(&ctxt.sectors_in_flight) >= sectors_in_flight) { + bch2_btree_iter_unlock(&iter); + move_ctxt_wait_event(&ctxt, + atomic_read(&ctxt.sectors_in_flight) < + sectors_in_flight); + } + + if (rate && + bch2_ratelimit_delay(rate) && + (bch2_btree_iter_unlock(&iter), + (ret = bch2_ratelimit_wait_freezable_stoppable(rate)))) + break; + + k = bch2_btree_iter_peek(&iter); + if (!k.k) + break; + ret = btree_iter_err(k); + if (ret) + break; + + if (!bkey_extent_is_data(k.k) || + !pred(arg, bkey_s_c_to_extent(k))) + goto next; + + /* unlock before doing IO: */ + bkey_reassemble(&tmp.k, k); + k = bkey_i_to_s_c(&tmp.k); + bch2_btree_iter_unlock(&iter); + + if (bch2_move_extent(c, &ctxt, devs, wp, + btree_insert_flags, + move_device, k)) { + /* memory allocation failure, wait for some IO to finish */ + bch2_move_ctxt_wait_for_io(&ctxt); + continue; + } + + if (rate) + bch2_ratelimit_increment(rate, k.k->size); +next: + bch2_btree_iter_advance_pos(&iter); + bch2_btree_iter_cond_resched(&iter); + } + + bch2_btree_iter_unlock(&iter); + bch2_move_ctxt_exit(&ctxt); + + trace_move_data(c, ctxt.sectors_moved, ctxt.keys_moved); + + *keys_moved = ctxt.keys_moved; + *sectors_moved = ctxt.sectors_moved; + + return ret; +} |