summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-09-03 21:31:05 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-09-08 19:07:20 -0800
commit6c4b58995eccba3dba5843ed13a1e284d8be2625 (patch)
tree592d52558616df0ff32cf24a06b1d5ab4ce63db0
parenta35f8e611b7f3bdf3173751de98e122db76fb693 (diff)
bcache: convert promote to new migrate path
-rw-r--r--drivers/md/bcache/io.c45
-rw-r--r--drivers/md/bcache/io.h2
-rw-r--r--drivers/md/bcache/move.c117
-rw-r--r--drivers/md/bcache/move.h21
4 files changed, 98 insertions, 87 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 250de2586a90..40f51b7340bd 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -18,6 +18,7 @@
#include "io.h"
#include "journal.h"
#include "keylist.h"
+#include "move.h"
#include "notify.h"
#include "stats.h"
#include "super.h"
@@ -1268,13 +1269,6 @@ void bch_write_op_init(struct bch_write_op *op, struct cache_set *c,
op->insert_key.k.version = bch_rand_range(UINT_MAX);
}
-void bch_replace_init(struct bch_replace_info *r, struct bkey_s_c old)
-{
- memset(r, 0, sizeof(*r));
- r->hook.fn = bch_extent_cmpxchg;
- bkey_reassemble(&r->key, old);
-}
-
/* Discard */
/* bch_discard - discard a range of keys from start_key to end_key.
@@ -1306,9 +1300,8 @@ int bch_discard(struct cache_set *c, struct bpos start,
struct cache_promote_op {
struct closure cl;
- struct bch_replace_info replace;
- struct bch_write_op iop;
- struct bch_write_bio bio; /* must be last */
+ struct migrate_write write;
+ struct bio_vec bi_inline_vecs[0]; /* must be last */
};
/* Read */
@@ -1427,7 +1420,7 @@ static void cache_promote_done(struct closure *cl)
struct cache_promote_op *op =
container_of(cl, struct cache_promote_op, cl);
- bch_bio_free_pages_pool(op->iop.c, &op->bio.bio.bio);
+ bch_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio.bio);
kfree(op);
}
@@ -1451,13 +1444,13 @@ static void __bch_read_endio(struct cache_set *c, struct bch_read_bio *rbio)
BUG_ON(!rbio->split || !rbio->bounce);
/* we now own pages: */
- swap(promote->bio.bio.bio.bi_vcnt, rbio->bio.bi_vcnt);
+ swap(promote->write.wbio.bio.bio.bi_vcnt, rbio->bio.bi_vcnt);
rbio->promote = NULL;
bch_rbio_done(c, rbio);
closure_init(cl, &c->cl);
- closure_call(&promote->iop.cl, bch_write, c->wq, cl);
+ closure_call(&promote->write.op.cl, bch_write, c->wq, cl);
closure_return_with_destructor(cl, cache_promote_done);
} else {
bch_rbio_done(c, rbio);
@@ -1539,7 +1532,7 @@ void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
promote_op = kmalloc(sizeof(*promote_op) +
sizeof(struct bio_vec) * pages, GFP_NOIO);
if (promote_op) {
- struct bio *promote_bio = &promote_op->bio.bio.bio;
+ struct bio *promote_bio = &promote_op->write.wbio.bio.bio;
bio_init(promote_bio);
promote_bio->bi_max_vecs = pages;
@@ -1626,24 +1619,20 @@ void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
rbio->bio.bi_end_io = bch_read_endio;
if (promote_op) {
- struct bio *promote_bio = &promote_op->bio.bio.bio;
+ struct bio *promote_bio = &promote_op->write.wbio.bio.bio;
promote_bio->bi_iter = rbio->bio.bi_iter;
memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
- bch_replace_init(&promote_op->replace, k);
- bch_write_op_init(&promote_op->iop, c,
- &promote_op->bio,
- (struct disk_reservation) { 0 },
- &c->promote_write_point, k,
- &promote_op->replace.hook, NULL,
- BCH_WRITE_ALLOC_NOWAIT);
- promote_op->iop.nr_replicas = 1;
+ bch_migrate_write_init(c, &promote_op->write,
+ &c->promote_write_point,
+ k, NULL,
+ BCH_WRITE_ALLOC_NOWAIT);
if (rbio->crc.compression_type) {
- promote_op->iop.flags |= BCH_WRITE_DATA_COMPRESSED;
- promote_op->iop.crc = rbio->crc;
+ promote_op->write.op.flags |= BCH_WRITE_DATA_COMPRESSED;
+ promote_op->write.op.crc = rbio->crc;
} else if (read_full) {
/*
* Adjust bio to correspond to _live_ portion of @k -
@@ -1658,13 +1647,13 @@ void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
* actually reading:
*/
bch_cut_front(POS(k.k->p.inode, iter.bi_sector),
- &promote_op->iop.insert_key);
- bch_key_resize(&promote_op->iop.insert_key.k,
+ &promote_op->write.op.insert_key);
+ bch_key_resize(&promote_op->write.op.insert_key.k,
bvec_iter_sectors(iter));
}
promote_bio->bi_iter.bi_sector =
- bkey_start_offset(&promote_op->iop.insert_key.k);
+ bkey_start_offset(&promote_op->write.op.insert_key.k);
}
/* _after_ promete stuff has looked at rbio->crc.offset */
diff --git a/drivers/md/bcache/io.h b/drivers/md/bcache/io.h
index 50172477fcb8..c6c4089003c7 100644
--- a/drivers/md/bcache/io.h
+++ b/drivers/md/bcache/io.h
@@ -46,8 +46,6 @@ void bch_write_op_init(struct bch_write_op *, struct cache_set *,
struct extent_insert_hook *, u64 *, unsigned);
void bch_write(struct closure *);
-void bch_replace_init(struct bch_replace_info *, struct bkey_s_c);
-
struct cache_promote_op;
struct extent_pick_ptr;
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index 1099cc395e05..bb42e8dca9dd 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -32,17 +32,17 @@ static struct bch_extent_ptr *bkey_find_ptr(struct cache_set *c,
return NULL;
}
-static struct bch_extent_ptr *bch_migrate_matching_ptr(struct moving_io *io,
+static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m,
struct bkey_s_extent e)
{
const struct bch_extent_ptr *ptr;
struct bch_extent_ptr *ret;
- if (io->move)
- ret = bkey_find_ptr(io->op.c, e, io->move_ptr);
+ if (m->move)
+ ret = bkey_find_ptr(m->op.c, e, m->move_ptr);
else
- extent_for_each_ptr(bkey_i_to_s_c_extent(&io->key), ptr)
- if ((ret = bkey_find_ptr(io->op.c, e, *ptr)))
+ extent_for_each_ptr(bkey_i_to_s_c_extent(&m->key), ptr)
+ if ((ret = bkey_find_ptr(m->op.c, e, *ptr)))
break;
return ret;
@@ -51,7 +51,8 @@ static struct bch_extent_ptr *bch_migrate_matching_ptr(struct moving_io *io,
static int bch_migrate_index_update(struct bch_write_op *op)
{
struct cache_set *c = op->c;
- struct moving_io *io = container_of(op, struct moving_io, op);
+ struct migrate_write *m =
+ container_of(op, struct migrate_write, op);
struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret = 0;
@@ -79,9 +80,9 @@ static int bch_migrate_index_update(struct bch_write_op *op)
bch_cut_back(insert->k.p, &new.k.k);
e = bkey_i_to_s_extent(&new.k);
- ptr = bch_migrate_matching_ptr(io, e);
+ ptr = bch_migrate_matching_ptr(m, e);
if (ptr) {
- if (io->move)
+ if (m->move)
__bch_extent_drop_ptr(e, ptr);
memcpy(extent_entry_last(e),
@@ -117,6 +118,34 @@ out:
return ret;
}
+void bch_migrate_write_init(struct cache_set *c,
+ struct migrate_write *m,
+ struct write_point *wp,
+ struct bkey_s_c k,
+ const struct bch_extent_ptr *move_ptr,
+ unsigned flags)
+{
+ bkey_reassemble(&m->key, k);
+
+ m->move = move_ptr != NULL;
+ if (move_ptr)
+ m->move_ptr = *move_ptr;
+
+ if (bkey_extent_is_cached(k.k))
+ flags |= BCH_WRITE_CACHED;
+
+ bch_write_op_init(&m->op, c, &m->wbio,
+ (struct disk_reservation) { 0 },
+ wp,
+ bkey_to_s_c(&KEY(k.k->p.inode,
+ k.k->p.offset,
+ k.k->size)),
+ NULL, NULL, flags);
+
+ m->op.nr_replicas = 1;
+ m->op.index_update_fn = bch_migrate_index_update;
+}
+
static void moving_error(struct moving_context *ctxt, unsigned flag)
{
atomic_inc(&ctxt->error_count);
@@ -180,15 +209,15 @@ static void bch_queue_write(struct moving_queue *q)
queue_work(q->wq, &q->work);
}
-static void moving_init(struct moving_io *io, struct bio *bio)
+static void migrate_bio_init(struct moving_io *io, struct bio *bio,
+ unsigned sectors)
{
bio_init(bio);
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_iter.bi_size = io->key.k.size << 9;
- bio->bi_max_vecs = DIV_ROUND_UP(io->key.k.size,
- PAGE_SECTORS);
+ bio->bi_iter.bi_size = sectors << 9;
+ bio->bi_max_vecs = DIV_ROUND_UP(sectors, PAGE_SECTORS);
bio->bi_private = &io->cl;
bio->bi_io_vec = io->bi_inline_vecs;
bch_bio_map(bio, NULL);
@@ -208,41 +237,27 @@ struct moving_io *moving_io_alloc(struct cache_set *c,
if (!io)
return NULL;
- bkey_reassemble(&io->key, k);
-
- moving_init(io, &io->rbio.bio);
+ migrate_bio_init(io, &io->rbio.bio, k.k->size);
if (bio_alloc_pages(&io->rbio.bio, GFP_KERNEL)) {
kfree(io);
return NULL;
}
- bch_write_op_init(&io->op, c, &io->wbio,
- (struct disk_reservation) { 0 },
- wp,
- bkey_to_s_c(&KEY(k.k->p.inode,
- k.k->p.offset,
- k.k->size)),
- NULL, NULL,
- bkey_extent_is_cached(k.k)
- ? BCH_WRITE_CACHED : 0);
-
- io->op.io_wq = q->wq;
- io->op.nr_replicas = 1;
- io->op.index_update_fn = bch_migrate_index_update;
-
- if (move_ptr) {
- io->move_ptr = *move_ptr;
- io->move = true;
- io->sort_key = move_ptr->offset;
- }
+ migrate_bio_init(io, &io->write.wbio.bio.bio, k.k->size);
+ io->write.wbio.bio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
+
+ bch_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
+
+ if (move_ptr)
+ io->sort_key = move_ptr->offset;
return io;
}
void moving_io_free(struct moving_io *io)
{
- bch_bio_free_pages(&io->wbio.bio.bio);
+ bch_bio_free_pages(&io->write.wbio.bio.bio);
kfree(io);
}
@@ -270,7 +285,7 @@ static void moving_io_destructor(struct closure *cl)
if (io->write_issued) {
BUG_ON(!q->write_count);
q->write_count--;
- trace_bcache_move_write_done(q, &io->key.k);
+ trace_bcache_move_write_done(q, &io->write.key.k);
}
list_del_init(&io->list);
@@ -301,7 +316,7 @@ static void moving_io_after_write(struct closure *cl)
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->context;
- if (io->op.error)
+ if (io->write.op.error)
moving_error(ctxt, MOVING_FLAG_WRITE);
moving_io_destructor(cl);
@@ -311,7 +326,7 @@ static void write_moving(struct moving_io *io)
{
bool stopped;
unsigned long flags;
- struct bch_write_op *op = &io->op;
+ struct bch_write_op *op = &io->write.op;
spin_lock_irqsave(&io->q->lock, flags);
BUG_ON(io->q->count == 0);
@@ -327,10 +342,6 @@ static void write_moving(struct moving_io *io)
if (op->error || stopped)
closure_return_with_destructor(&io->cl, moving_io_destructor);
else {
- moving_init(io, &io->wbio.bio.bio);
-
- op->bio->bio.bio.bi_iter.bi_sector = bkey_start_offset(&io->key.k);
-
closure_call(&op->cl, bch_write, NULL, &io->cl);
closure_return_with_destructor(&io->cl, moving_io_after_write);
}
@@ -370,7 +381,7 @@ static void bch_queue_write_work(struct work_struct *work)
io->write_issued = 1;
list_del(&io->list);
list_add_tail(&io->list, &q->write_pending);
- trace_bcache_move_write(q, &io->key.k);
+ trace_bcache_move_write(q, &io->write.key.k);
spin_unlock_irqrestore(&q->lock, flags);
write_moving(io);
spin_lock_irqsave(&q->lock, flags);
@@ -518,7 +529,8 @@ static void pending_recalc_oldest_gens(struct cache_set *c, struct list_head *l)
* don't need to be marked because they are pointing
* to open buckets until the write completes
*/
- bch_btree_key_recalc_oldest_gen(c, bkey_i_to_s_c(&io->key));
+ bch_btree_key_recalc_oldest_gen(c,
+ bkey_i_to_s_c(&io->write.key));
}
}
@@ -549,7 +561,7 @@ static void read_moving_endio(struct bio *bio)
unsigned long flags;
if (bio->bi_error) {
- io->op.error = bio->bi_error;
+ io->write.op.error = bio->bi_error;
moving_error(io->context, MOVING_FLAG_READ);
}
@@ -557,7 +569,7 @@ static void read_moving_endio(struct bio *bio)
spin_lock_irqsave(&q->lock, flags);
- trace_bcache_move_read_done(q, &io->key.k);
+ trace_bcache_move_read_done(q, &io->write.key.k);
BUG_ON(!io->read_issued);
BUG_ON(io->read_completed);
@@ -582,10 +594,11 @@ static void read_moving_endio(struct bio *bio)
static void __bch_data_move(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct cache_set *c = io->write.op.c;
struct extent_pick_ptr pick;
- u64 size = io->key.k.size;
+ u64 size = io->write.key.k.size;
- bch_extent_pick_ptr_avoiding(io->op.c, bkey_i_to_s_c(&io->key),
+ bch_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key),
io->context->avoid, &pick);
if (IS_ERR_OR_NULL(pick.ca))
closure_return_with_destructor(cl, moving_io_destructor);
@@ -596,11 +609,11 @@ static void __bch_data_move(struct closure *cl)
bch_ratelimit_increment(io->context->rate, size);
io->rbio.bio.bi_rw = READ;
- io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&io->key.k);
+ io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&io->write.key.k);
io->rbio.bio.bi_end_io = read_moving_endio;
- bch_read_extent(io->op.c, &io->rbio,
- bkey_i_to_s_c(&io->key),
+ bch_read_extent(c, &io->rbio,
+ bkey_i_to_s_c(&io->write.key),
&pick, BCH_READ_IS_LAST);
}
@@ -662,7 +675,7 @@ void bch_data_move(struct moving_queue *q,
q->count++;
list_add_tail(&io->list, &q->pending);
- trace_bcache_move_read(q, &io->key.k);
+ trace_bcache_move_read(q, &io->write.key.k);
if (q->rotational)
BUG_ON(RB_INSERT(&q->tree, io, node, moving_io_cmp));
diff --git a/drivers/md/bcache/move.h b/drivers/md/bcache/move.h
index bdf5aaec8a50..ab7829d1cfd1 100644
--- a/drivers/md/bcache/move.h
+++ b/drivers/md/bcache/move.h
@@ -61,16 +61,28 @@ static inline int bch_moving_context_wait(struct moving_context *ctxt)
void bch_moving_wait(struct moving_context *);
+struct migrate_write {
+ BKEY_PADDED(key);
+ bool move;
+ struct bch_extent_ptr move_ptr;
+ struct bch_write_op op;
+ struct bch_write_bio wbio;
+};
+
+void bch_migrate_write_init(struct cache_set *,
+ struct migrate_write *,
+ struct write_point *,
+ struct bkey_s_c,
+ const struct bch_extent_ptr *,
+ unsigned);
+
struct moving_io {
struct list_head list;
struct rb_node node;
struct closure cl;
struct moving_queue *q;
struct moving_context *context;
- struct bch_write_op op;
- struct bch_extent_ptr move_ptr;
- bool move;
- BKEY_PADDED(key);
+ struct migrate_write write;
/* Sort key for moving_queue->tree */
u64 sort_key;
/* Protected by q->lock */
@@ -95,7 +107,6 @@ struct moving_io {
unsigned write_issued:1;
struct bch_read_bio rbio;
- struct bch_write_bio wbio;
/* Must be last since it is variable size */
struct bio_vec bi_inline_vecs[0];
};