summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c39
-rw-r--r--block/elevator.c2
-rw-r--r--block/fops.c35
-rw-r--r--block/genhd.c14
8 files changed, 72 insertions, 38 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 3376ad03823e..29d2635a2a63 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7029,6 +7029,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock);
#endif
+ wbt_enable_default(bfqd->queue);
+
kfree(bfqd);
}
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index bd5453220065..6996e7bd66e9 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
- bip->bip_iter.bi_sector += bytes_done >> 9;
+ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index a97918d107a0..9c14deab3af4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -50,6 +50,7 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
+#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root;
@@ -285,13 +286,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
-void blk_set_queue_dying(struct request_queue *q)
-{
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- blk_queue_start_drain(q);
-}
-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -309,7 +303,8 @@ void blk_cleanup_queue(struct request_queue *q)
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
- blk_set_queue_dying(q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
@@ -321,6 +316,9 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_freeze_queue(q);
+ /* cleanup rq qos structures for queue without disk */
+ rq_qos_exit(q);
+
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q);
diff --git a/block/blk-map.c b/block/blk-map.c
index 4526adde0156..c7f71d83eff1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len)
bytes = len;
- page = alloc_page(GFP_NOIO | gfp_mask);
+ page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
if (!page)
goto cleanup;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 213bb5979bed..8e659dc5fcf3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -737,6 +737,10 @@ static void blk_complete_request(struct request *req)
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ bio->bi_iter.bi_sector = req->__sector;
+
if (!is_flush)
bio_endio(bio);
bio = next;
@@ -2727,7 +2731,8 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio)
+ struct bio *bio,
+ unsigned int nsegs)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -2739,6 +2744,11 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
if (unlikely(bio_queue_enter(bio)))
return NULL;
+ if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+ goto queue_exit;
+
+ rq_qos_throttle(q, bio);
+
if (plug) {
data.nr_tags = plug->nr_ios;
plug->nr_ios = 1;
@@ -2751,12 +2761,13 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
+queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
- struct blk_plug *plug, struct bio *bio)
+ struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
struct request *rq;
@@ -2766,12 +2777,19 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
if (!rq || rq->q != q)
return NULL;
- if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+ if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
+ *bio = NULL;
return NULL;
- if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+ }
+
+ rq_qos_throttle(q, *bio);
+
+ if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+ return NULL;
+ if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
- rq->cmd_flags = bio->bi_opf;
+ rq->cmd_flags = (*bio)->bi_opf;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
return rq;
@@ -2806,14 +2824,11 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
- if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
- return;
-
- rq_qos_throttle(q, bio);
-
- rq = blk_mq_get_cached_request(q, plug, bio);
+ rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
- rq = blk_mq_get_new_requests(q, plug, bio);
+ if (!bio)
+ return;
+ rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
return;
}
diff --git a/block/elevator.c b/block/elevator.c
index 20a4e7c7c774..c319765892bb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -530,8 +530,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_del(&e->kobj);
e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
}
}
diff --git a/block/fops.c b/block/fops.c
index 3696665e586a..7ccc4ff109ce 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -287,6 +287,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
struct kiocb *iocb = dio->iocb;
ssize_t ret;
+ WRITE_ONCE(iocb->private, NULL);
+
if (likely(!bio->bi_status)) {
ret = dio->size;
iocb->ki_pos += ret;
@@ -563,34 +565,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct block_device *bdev = iocb->ki_filp->private_data;
loff_t size = bdev_nr_bytes(bdev);
- size_t count = iov_iter_count(to);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
ssize_t ret = 0;
+ size_t count;
- if (unlikely(pos + count > size)) {
+ if (unlikely(pos + iov_iter_count(to) > size)) {
if (pos >= size)
return 0;
size -= pos;
- if (count > size) {
- shorted = count - size;
- iov_iter_truncate(to, size);
- }
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
}
+ count = iov_iter_count(to);
+ if (!count)
+ goto reexpand; /* skip atime */
+
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
- iocb->ki_pos + count - 1))
- return -EAGAIN;
+ if (filemap_range_needs_writeback(mapping, pos,
+ pos + count - 1)) {
+ ret = -EAGAIN;
+ goto reexpand;
+ }
} else {
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
+ ret = filemap_write_and_wait_range(mapping, pos,
+ pos + count - 1);
if (ret < 0)
- return ret;
+ goto reexpand;
}
file_accessed(iocb->ki_filp);
@@ -600,12 +605,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
iocb->ki_pos += ret;
count -= ret;
}
+ iov_iter_revert(to, count - iov_iter_count(to));
if (ret < 0 || !count)
- return ret;
+ goto reexpand;
}
ret = filemap_read(iocb, to, ret);
+reexpand:
if (unlikely(shorted))
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
diff --git a/block/genhd.c b/block/genhd.c
index 8244f09e058d..37eb41ee4086 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -555,6 +555,20 @@ out_free_ext_minor:
EXPORT_SYMBOL(device_add_disk);
/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+ set_bit(GD_DEAD, &disk->state);
+ blk_queue_start_drain(disk->queue);
+}
+EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
+
+/**
* del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove
*