summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index cfbde698cc81..83fdf39de548 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
+#include <linux/aio.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -1742,6 +1743,11 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ if (bio_cancelled(bio)) {
+ err = -ECANCELED;
+ goto end_io;
+ }
+
/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
@@ -2055,6 +2061,20 @@ static void blk_account_io_done(struct request *req)
}
}
+static bool request_cancelled(struct request *rq)
+{
+ struct bio *bio;
+
+ if (!rq->bio)
+ return false;
+
+ for (bio = rq->bio; bio; bio = bio->bi_next)
+ if (!bio_cancelled(bio))
+ return false;
+
+ return true;
+}
+
#ifdef CONFIG_PM_RUNTIME
/*
* Don't process normal requests when queue is suspended
@@ -2122,6 +2142,11 @@ struct request *blk_peek_request(struct request_queue *q)
trace_block_rq_issue(q, rq);
}
+ if (request_cancelled(rq)) {
+ blk_start_abort_request(rq, -ECANCELED);
+ continue;
+ }
+
if (!q->boundary_rq || q->boundary_rq == rq) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = NULL;
@@ -2316,6 +2341,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes,
char *error_type;
switch (error) {
+ case -ECANCELED:
+ goto noerr;
case -ENOLINK:
error_type = "recoverable transport";
break;
@@ -2345,6 +2372,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes,
(unsigned long long)blk_rq_pos(req));
}
+noerr:
blk_account_io_completion(req, nr_bytes);