summaryrefslogtreecommitdiff
path: root/libbcache/move.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-19 15:56:34 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-19 17:31:47 -0800
commit5ec39af8eaba49aee7bafa44c661da39e2f40dc3 (patch)
tree1fb1a981602cbf22c7d2b2dba1168c715d7cecb5 /libbcache/move.c
parentbb1941de5378a7b8122d3575dcbc7d0aeb6326f0 (diff)
Rename from bcache-tools to bcachefs-tools
Diffstat (limited to 'libbcache/move.c')
-rw-r--r--libbcache/move.c392
1 files changed, 0 insertions, 392 deletions
diff --git a/libbcache/move.c b/libbcache/move.c
deleted file mode 100644
index edee726c..00000000
--- a/libbcache/move.c
+++ /dev/null
@@ -1,392 +0,0 @@
-
-#include "bcache.h"
-#include "btree_gc.h"
-#include "btree_update.h"
-#include "buckets.h"
-#include "io.h"
-#include "move.h"
-#include "super-io.h"
-#include "keylist.h"
-
-#include <linux/ioprio.h>
-
-#include <trace/events/bcache.h>
-
-static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c,
- struct bkey_s_extent e,
- struct bch_extent_ptr ptr)
-{
- struct bch_extent_ptr *ptr2;
- unsigned bucket_bits = c->devs[ptr.dev]->bucket_bits;
-
- extent_for_each_ptr(e, ptr2)
- if (ptr2->dev == ptr.dev &&
- ptr2->gen == ptr.gen &&
- (ptr2->offset >> bucket_bits) ==
- (ptr.offset >> bucket_bits))
- return ptr2;
-
- return NULL;
-}
-
-static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m,
- struct bkey_s_extent e)
-{
- const struct bch_extent_ptr *ptr;
- struct bch_extent_ptr *ret;
-
- if (m->move)
- ret = bkey_find_ptr(m->op.c, e, m->move_ptr);
- else
- extent_for_each_ptr(bkey_i_to_s_c_extent(&m->key), ptr)
- if ((ret = bkey_find_ptr(m->op.c, e, *ptr)))
- break;
-
- return ret;
-}
-
-static int bch_migrate_index_update(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct migrate_write *m =
- container_of(op, struct migrate_write, op);
- struct keylist *keys = &op->insert_keys;
- struct btree_iter iter;
- int ret = 0;
-
- bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch_keylist_front(keys)->k));
-
- while (1) {
- struct bkey_s_extent insert =
- bkey_i_to_s_extent(bch_keylist_front(keys));
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter);
- struct bch_extent_ptr *ptr;
- struct bkey_s_extent e;
- BKEY_PADDED(k) new;
-
- if (!k.k) {
- ret = bch_btree_iter_unlock(&iter);
- break;
- }
-
- if (!bkey_extent_is_data(k.k))
- goto nomatch;
-
- bkey_reassemble(&new.k, k);
- bch_cut_front(iter.pos, &new.k);
- bch_cut_back(insert.k->p, &new.k.k);
- e = bkey_i_to_s_extent(&new.k);
-
- /* hack - promotes can race: */
- if (m->promote)
- extent_for_each_ptr(insert, ptr)
- if (bch_extent_has_device(e.c, ptr->dev))
- goto nomatch;
-
- ptr = bch_migrate_matching_ptr(m, e);
- if (ptr) {
- int nr_new_dirty = bch_extent_nr_dirty_ptrs(insert.s_c);
- unsigned insert_flags =
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL;
-
- /* copygc uses btree node reserve: */
- if (m->move)
- insert_flags |= BTREE_INSERT_USE_RESERVE;
-
- if (m->move) {
- nr_new_dirty -= !ptr->cached;
- __bch_extent_drop_ptr(e, ptr);
- }
-
- BUG_ON(nr_new_dirty < 0);
-
- memcpy_u64s(extent_entry_last(e),
- insert.v,
- bkey_val_u64s(insert.k));
- e.k->u64s += bkey_val_u64s(insert.k);
-
- bch_extent_narrow_crcs(e);
- bch_extent_drop_redundant_crcs(e);
- bch_extent_normalize(c, e.s);
- bch_extent_mark_replicas_cached(c, e, nr_new_dirty);
-
- ret = bch_btree_insert_at(c, &op->res,
- NULL, op_journal_seq(op),
- insert_flags,
- BTREE_INSERT_ENTRY(&iter, &new.k));
- if (ret && ret != -EINTR)
- break;
- } else {
-nomatch:
- bch_btree_iter_advance_pos(&iter);
- }
-
- while (bkey_cmp(iter.pos, bch_keylist_front(keys)->k.p) >= 0) {
- bch_keylist_pop_front(keys);
- if (bch_keylist_empty(keys))
- goto out;
- }
-
- bch_cut_front(iter.pos, bch_keylist_front(keys));
- }
-out:
- bch_btree_iter_unlock(&iter);
- return ret;
-}
-
-void bch_migrate_write_init(struct bch_fs *c,
- struct migrate_write *m,
- struct write_point *wp,
- struct bkey_s_c k,
- const struct bch_extent_ptr *move_ptr,
- unsigned flags)
-{
- bkey_reassemble(&m->key, k);
-
- m->promote = false;
- m->move = move_ptr != NULL;
- if (move_ptr)
- m->move_ptr = *move_ptr;
-
- if (bkey_extent_is_cached(k.k) ||
- (move_ptr && move_ptr->cached))
- flags |= BCH_WRITE_CACHED;
-
- bch_write_op_init(&m->op, c, &m->wbio,
- (struct disk_reservation) { 0 },
- wp,
- bkey_start_pos(k.k),
- NULL, flags);
-
- if (m->move)
- m->op.alloc_reserve = RESERVE_MOVINGGC;
-
- m->op.nonce = extent_current_nonce(bkey_s_c_to_extent(k));
- m->op.nr_replicas = 1;
- m->op.index_update_fn = bch_migrate_index_update;
-}
-
-static void migrate_bio_init(struct moving_io *io, struct bio *bio,
- unsigned sectors)
-{
- bio_init(bio);
- bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
- bio->bi_iter.bi_size = sectors << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(sectors, PAGE_SECTORS);
- bio->bi_private = &io->cl;
- bio->bi_io_vec = io->bi_inline_vecs;
- bch_bio_map(bio, NULL);
-}
-
-static void moving_io_destructor(struct closure *cl)
-{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
- struct moving_context *ctxt = io->ctxt;
- struct bio_vec *bv;
- int i;
-
- //if (io->replace.failures)
- // trace_bcache_copy_collision(q, &io->key.k);
-
- atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight);
- wake_up(&ctxt->wait);
-
- bio_for_each_segment_all(bv, &io->write.wbio.bio, i)
- if (bv->bv_page)
- __free_page(bv->bv_page);
-
- kfree(io);
-}
-
-static void moving_error(struct moving_context *ctxt, unsigned flag)
-{
- atomic_inc(&ctxt->error_count);
- //atomic_or(flag, &ctxt->error_flags);
-}
-
-static void moving_io_after_write(struct closure *cl)
-{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
- struct moving_context *ctxt = io->ctxt;
-
- if (io->write.op.error)
- moving_error(ctxt, MOVING_FLAG_WRITE);
-
- moving_io_destructor(cl);
-}
-
-static void write_moving(struct moving_io *io)
-{
- struct bch_write_op *op = &io->write.op;
-
- if (op->error) {
- closure_return_with_destructor(&io->cl, moving_io_destructor);
- } else {
- closure_call(&op->cl, bch_write, NULL, &io->cl);
- closure_return_with_destructor(&io->cl, moving_io_after_write);
- }
-}
-
-static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
-{
- struct moving_io *io =
- list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
-
- return io && io->read_completed ? io : NULL;
-}
-
-static void read_moving_endio(struct bio *bio)
-{
- struct closure *cl = bio->bi_private;
- struct moving_io *io = container_of(cl, struct moving_io, cl);
- struct moving_context *ctxt = io->ctxt;
-
- trace_bcache_move_read_done(&io->write.key.k);
-
- if (bio->bi_error) {
- io->write.op.error = bio->bi_error;
- moving_error(io->ctxt, MOVING_FLAG_READ);
- }
-
- io->read_completed = true;
- if (next_pending_write(ctxt))
- wake_up(&ctxt->wait);
-
- closure_put(&ctxt->cl);
-}
-
-static void __bch_data_move(struct closure *cl)
-{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
- struct bch_fs *c = io->write.op.c;
- struct extent_pick_ptr pick;
-
- bch_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key),
- io->ctxt->avoid, &pick);
- if (IS_ERR_OR_NULL(pick.ca))
- closure_return_with_destructor(cl, moving_io_destructor);
-
- bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
- io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&io->write.key.k);
- io->rbio.bio.bi_end_io = read_moving_endio;
-
- /*
- * dropped by read_moving_endio() - guards against use after free of
- * ctxt when doing wakeup
- */
- closure_get(&io->ctxt->cl);
-
- bch_read_extent(c, &io->rbio,
- bkey_i_to_s_c(&io->write.key),
- &pick, BCH_READ_IS_LAST);
-}
-
-int bch_data_move(struct bch_fs *c,
- struct moving_context *ctxt,
- struct write_point *wp,
- struct bkey_s_c k,
- const struct bch_extent_ptr *move_ptr)
-{
- struct moving_io *io;
-
- io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) *
- DIV_ROUND_UP(k.k->size, PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- return -ENOMEM;
-
- io->ctxt = ctxt;
-
- migrate_bio_init(io, &io->rbio.bio, k.k->size);
-
- if (bio_alloc_pages(&io->rbio.bio, GFP_KERNEL)) {
- kfree(io);
- return -ENOMEM;
- }
-
- migrate_bio_init(io, &io->write.wbio.bio, k.k->size);
- bio_get(&io->write.wbio.bio);
- io->write.wbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
-
- bch_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
-
- trace_bcache_move_read(&io->write.key.k);
-
- ctxt->keys_moved++;
- ctxt->sectors_moved += k.k->size;
- if (ctxt->rate)
- bch_ratelimit_increment(ctxt->rate, k.k->size);
-
- atomic_add(k.k->size, &ctxt->sectors_in_flight);
- list_add_tail(&io->list, &ctxt->reads);
-
- closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl);
- return 0;
-}
-
-static void do_pending_writes(struct moving_context *ctxt)
-{
- struct moving_io *io;
-
- while ((io = next_pending_write(ctxt))) {
- list_del(&io->list);
- trace_bcache_move_write(&io->write.key.k);
- write_moving(io);
- }
-}
-
-#define move_ctxt_wait_event(_ctxt, _cond) \
-do { \
- do_pending_writes(_ctxt); \
- \
- if (_cond) \
- break; \
- __wait_event((_ctxt)->wait, \
- next_pending_write(_ctxt) || (_cond)); \
-} while (1)
-
-int bch_move_ctxt_wait(struct moving_context *ctxt)
-{
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->sectors_in_flight) <
- ctxt->max_sectors_in_flight);
-
- return ctxt->rate
- ? bch_ratelimit_wait_freezable_stoppable(ctxt->rate)
- : 0;
-}
-
-void bch_move_ctxt_wait_for_io(struct moving_context *ctxt)
-{
- unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight);
-
- move_ctxt_wait_event(ctxt,
- !atomic_read(&ctxt->sectors_in_flight) ||
- atomic_read(&ctxt->sectors_in_flight) != sectors_pending);
-}
-
-void bch_move_ctxt_exit(struct moving_context *ctxt)
-{
- move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight));
- closure_sync(&ctxt->cl);
-
- EBUG_ON(!list_empty(&ctxt->reads));
- EBUG_ON(atomic_read(&ctxt->sectors_in_flight));
-}
-
-void bch_move_ctxt_init(struct moving_context *ctxt,
- struct bch_ratelimit *rate,
- unsigned max_sectors_in_flight)
-{
- memset(ctxt, 0, sizeof(*ctxt));
- closure_init_stack(&ctxt->cl);
-
- ctxt->rate = rate;
- ctxt->max_sectors_in_flight = max_sectors_in_flight;
-
- INIT_LIST_HEAD(&ctxt->reads);
- init_waitqueue_head(&ctxt->wait);
-}