summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-10 20:50:11 -0700
committerKent Overstreet <kmo@daterainc.com>2013-10-10 20:50:46 -0700
commit87f98e774776d0f3e1214b61123b34de53e63e94 (patch)
tree7ab1b4f81329487991cb519bc3182bd72cdc6b06 /block/blk-core.c
parent8613f0c75e242dd806ae7b84c67e1761a0ed6a9d (diff)
block: Bio cancellationaio
If a bio is associated with a kiocb, allow it to be cancelled. This is accomplished by adding a pointer to a kiocb in struct bio, and when we go to dequeue a request we check if its bio has been cancelled - if so, we end the request with -ECANCELED. We don't currently try to cancel bios if IO has already been started - that'd require a per bio callback function, and a way to find all the outstanding bios for a given kiocb. Such a mechanism may or may not be added in the future but this patch tries to start simple. Currently this can only be triggered with aio and io_cancel(), but the mechanism can be used for sync io too. It can also be used for bios created by stacking drivers, and bio clones in general - when cloning a bio, if the bi_iocb pointer is copied as well the clone will then be cancellable. bio_clone() could be modified to do this, but hasn't in this patch because all the bio_clone() users would need to be auditied to make sure that it's safe. We can't blindly make e.g. raid5 writes cancellable without the knowledge of the md code. Initial patch by Anatol Pomazau (anatol@google.com). Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index cfbde698cc81..83fdf39de548 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
+#include <linux/aio.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -1742,6 +1743,11 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ if (bio_cancelled(bio)) {
+ err = -ECANCELED;
+ goto end_io;
+ }
+
/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
@@ -2055,6 +2061,20 @@ static void blk_account_io_done(struct request *req)
}
}
+static bool request_cancelled(struct request *rq)
+{
+ struct bio *bio;
+
+ if (!rq->bio)
+ return false;
+
+ for (bio = rq->bio; bio; bio = bio->bi_next)
+ if (!bio_cancelled(bio))
+ return false;
+
+ return true;
+}
+
#ifdef CONFIG_PM_RUNTIME
/*
* Don't process normal requests when queue is suspended
@@ -2122,6 +2142,11 @@ struct request *blk_peek_request(struct request_queue *q)
trace_block_rq_issue(q, rq);
}
+ if (request_cancelled(rq)) {
+ blk_start_abort_request(rq, -ECANCELED);
+ continue;
+ }
+
if (!q->boundary_rq || q->boundary_rq == rq) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = NULL;
@@ -2316,6 +2341,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes,
char *error_type;
switch (error) {
+ case -ECANCELED:
+ goto noerr;
case -ENOLINK:
error_type = "recoverable transport";
break;
@@ -2345,6 +2372,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes,
(unsigned long long)blk_rq_pos(req));
}
+noerr:
blk_account_io_completion(req, nr_bytes);