summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c119
1 files changed, 107 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435c6eaf..0214837befa6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,6 +501,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.name = "block";
err = bdi_init(&q->backing_dev_info);
if (err) {
@@ -1118,17 +1119,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_type = REQ_TYPE_FS;
/*
- * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+ * Inherit FAILFAST from bio (for read-ahead, and explicit
+ * FAILFAST). FAILFAST flags are identical for req and bio.
*/
if (bio_rw_ahead(bio))
- req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER);
- if (bio_failfast_dev(bio))
- req->cmd_flags |= REQ_FAILFAST_DEV;
- if (bio_failfast_transport(bio))
- req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
- if (bio_failfast_driver(bio))
- req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ req->cmd_flags |= REQ_FAILFAST_MASK;
+ else
+ req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
if (unlikely(bio_discard(bio))) {
req->cmd_flags |= REQ_DISCARD;
@@ -1168,6 +1165,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
+ const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
if (bio_barrier(bio) && bio_has_data(bio) &&
@@ -1197,6 +1195,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
trace_block_bio_backmerge(q, bio);
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bytes;
@@ -1216,6 +1217,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
trace_block_bio_frontmerge(q, bio);
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
+ blk_rq_set_mixed_merge(req);
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
+ req->cmd_flags |= ff;
+ }
+
bio->bi_next = req->bio;
req->bio = bio;
@@ -1660,6 +1667,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+/**
+ * blk_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+unsigned int blk_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_rw & ff) != ff)
+ break;
+ bytes += bio->bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
+
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
@@ -2006,6 +2057,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (blk_fs_request(req) || blk_discard_rq(req))
req->__sector += total_bytes >> 9;
+ /* mixed attributes always follow the first bio */
+ if (req->cmd_flags & REQ_MIXED_MERGE) {
+ req->cmd_flags &= ~REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ }
+
/*
* If total number of sectors is less than the first segment
* size, something has gone terribly wrong.
@@ -2185,6 +2242,25 @@ bool blk_end_request_cur(struct request *rq, int error)
EXPORT_SYMBOL_GPL(blk_end_request_cur);
/**
+ * blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(blk_end_request_err);
+
+/**
* __blk_end_request - Helper function for drivers to complete the request.
* @rq: the request being processed
* @error: %0 for success, < %0 for error
@@ -2243,12 +2319,31 @@ bool __blk_end_request_cur(struct request *rq, int error)
}
EXPORT_SYMBOL_GPL(__blk_end_request_cur);
+/**
+ * __blk_end_request_err - Finish a request till the next failure boundary.
+ * @rq: the request to finish till the next failure boundary for
+ * @error: must be negative errno
+ *
+ * Description:
+ * Complete @rq till the next failure boundary. Must be called
+ * with queue lock held.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_err(struct request *rq, int error)
+{
+ WARN_ON(error >= 0);
+ return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
+}
+EXPORT_SYMBOL_GPL(__blk_end_request_err);
+
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
- we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
- rq->cmd_flags |= (bio->bi_rw & 3);
+ /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
+ rq->cmd_flags |= bio->bi_rw & REQ_RW;
if (bio_has_data(bio)) {
rq->nr_phys_segments = bio_phys_segments(q, bio);