summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2018-11-21 12:00:18 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2018-11-21 12:00:18 +1100
commitf5c523f53caa103f199ba3f868f8dd7adea93153 (patch)
treed4cb63ec664c947088321c12140a22e9d2345b1e /drivers
parent144e8fd8da1b507255deb6709910c14d3d3d421e (diff)
parentc49f3571b597abd9d041fa6a0611f3869a070d0a (diff)
Merge remote-tracking branch 'device-mapper/for-next'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-rq.c23
-rw-r--r--drivers/md/dm.c63
4 files changed, 35 insertions, 58 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 224d44503a06..0a93f2c83d8a 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -65,7 +65,6 @@ struct mapped_device {
*/
struct work_struct work;
wait_queue_head_t wait;
- atomic_t pending[2];
spinlock_t deferred_lock;
struct bio_list deferred;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6a66921daf4..2ee5e357a0a7 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1211,14 +1211,16 @@ static void flush_multipath_work(struct multipath *m)
set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
smp_mb__after_atomic();
- flush_workqueue(kmpath_handlerd);
+ if (atomic_read(&m->pg_init_in_progress))
+ flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
smp_mb__after_atomic();
}
- flush_workqueue(kmultipathd);
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
+ flush_work(&m->process_queued_bios);
flush_work(&m->trigger_event);
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1f1fe9a618ea..5af22e165fb7 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -128,13 +128,13 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+static void rq_completed(struct mapped_device *md)
{
- atomic_dec(&md->pending[rw]);
-
/* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
- wake_up(&md->wait);
+ if (unlikely(waitqueue_active(&md->wait))) {
+ if (!blk_mq_queue_busy(md->queue))
+ wake_up(&md->wait);
+ }
/*
* dm_put() must be at the end of this function. See the comment above
@@ -149,7 +149,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
*/
static void dm_end_request(struct request *clone, blk_status_t error)
{
- int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
@@ -159,7 +158,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
- rq_completed(md, rw, true);
+ rq_completed(md);
}
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
@@ -183,7 +182,6 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
{
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- int rw = rq_data_dir(rq);
unsigned long delay_ms = delay_requeue ? 100 : 0;
rq_end_stats(md, rq);
@@ -193,7 +191,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
}
dm_mq_delay_requeue_request(rq, delay_ms);
- rq_completed(md, rw, false);
+ rq_completed(md);
}
static void dm_done(struct request *clone, blk_status_t error, bool mapped)
@@ -248,15 +246,13 @@ static void dm_softirq_done(struct request *rq)
bool mapped = true;
struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
- int rw;
if (!clone) {
struct mapped_device *md = tio->md;
rq_end_stats(md, rq);
- rw = rq_data_dir(rq);
blk_mq_end_request(rq, tio->error);
- rq_completed(md, rw, false);
+ rq_completed(md);
return;
}
@@ -436,7 +432,6 @@ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
blk_mq_start_request(orig);
- atomic_inc(&md->pending[rq_data_dir(orig)]);
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
@@ -510,7 +505,7 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (map_request(tio) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
+ rq_completed(md);
return BLK_STS_RESOURCE;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a733e4c920af..24609a07ef50 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -648,24 +648,20 @@ static void free_tio(struct dm_target_io *tio)
int md_in_flight(struct mapped_device *md)
{
- return atomic_read(&md->pending[READ]) +
- atomic_read(&md->pending[WRITE]);
+ return atomic_read(&dm_disk(md)->part0.in_flight[READ]) +
+ atomic_read(&dm_disk(md)->part0.in_flight[WRITE]);
}
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- int rw = bio_data_dir(bio);
io->start_time = jiffies;
generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
&dm_disk(md)->part0);
- atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
-
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -677,28 +673,30 @@ static void end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
- int pending;
- int rw = bio_data_dir(bio);
+ /*
+ * make sure that atomic_dec in generic_end_io_acct is not reordered
+ * with previous writes
+ */
+ smp_mb__before_atomic();
generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
io->start_time);
+ /*
+ * generic_end_io_acct does atomic_dec, this barrier makes sure that
+ * atomic_dec is not reordered with waitqueue_active
+ */
+ smp_mb__after_atomic();
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, &io->stats_aux);
- /*
- * After this is decremented the bio must not be touched if it is
- * a flush.
- */
- pending = atomic_dec_return(&md->pending[rw]);
- atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
- pending += atomic_read(&md->pending[rw^0x1]);
-
/* nudge anyone waiting on suspend queue */
- if (!pending)
- wake_up(&md->wait);
+ if (unlikely(waitqueue_active(&md->wait))) {
+ if (!md_in_flight(md))
+ wake_up(&md->wait);
+ }
}
/*
@@ -1685,8 +1683,7 @@ out:
typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
-static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
- process_bio_fn process_bio)
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
blk_qc_t ret = BLK_QC_T_NONE;
@@ -1706,26 +1703,15 @@ static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
return ret;
}
- ret = process_bio(md, map, bio);
+ if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+ ret = __process_bio(md, map, bio);
+ else
+ ret = __split_and_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx);
return ret;
}
-/*
- * The request function that remaps the bio to one target and
- * splits off any remainder.
- */
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
-{
- return __dm_make_request(q, bio, __split_and_process_bio);
-}
-
-static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
-{
- return __dm_make_request(q, bio, __process_bio);
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -1906,8 +1892,6 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->disk)
goto bad;
- atomic_set(&md->pending[0], 0);
- atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
@@ -2219,12 +2203,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- dm_init_normal_md_queue(md);
- blk_queue_make_request(md->queue, dm_make_request);
- break;
case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md);
- blk_queue_make_request(md->queue, dm_make_request_nvme);
+ blk_queue_make_request(md->queue, dm_make_request);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);