diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 42 |
1 files changed, 24 insertions, 18 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 66d31687cf6b..186603b36c44 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -151,7 +151,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq) EXPORT_SYMBOL(blk_rq_init); static void req_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, int error) + unsigned int nbytes, int error, + struct batch_complete *batch) { if (error) clear_bit(BIO_UPTODATE, &bio->bi_flags); @@ -175,7 +176,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); + bio_endio_batch(bio, error, batch); } void blk_dump_rq_flags(struct request *rq, char *msg) @@ -1473,6 +1474,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) */ blk_queue_bounce(q, &bio); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; @@ -1706,9 +1712,6 @@ generic_make_request_checks(struct bio *bio) */ blk_partition_remap(bio); - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) - goto end_io; - if (bio_check_eod(bio, nr_sectors)) goto end_io; @@ -2248,7 +2251,8 @@ EXPORT_SYMBOL(blk_fetch_request); * %false - this request doesn't have any more data * %true - this request has more data **/ -bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes, + struct batch_complete *batch) { int total_bytes, bio_nbytes, next_idx = 0; struct bio *bio; @@ -2304,7 +2308,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) if (nr_bytes >= bio->bi_size) { req->bio = bio->bi_next; nbytes = bio->bi_size; - req_bio_endio(req, bio, nbytes, error); + req_bio_endio(req, bio, nbytes, error, batch); next_idx = 0; bio_nbytes = 0; } else { @@ -2366,7 +2370,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) * if the request wasn't completed, update state */ if (bio_nbytes) { - req_bio_endio(req, bio, bio_nbytes, error); + req_bio_endio(req, bio, bio_nbytes, error, batch); bio->bi_idx += next_idx; bio_iovec(bio)->bv_offset += nr_bytes; bio_iovec(bio)->bv_len -= nr_bytes; @@ -2403,14 +2407,15 @@ EXPORT_SYMBOL_GPL(blk_update_request); static bool blk_update_bidi_request(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes) + unsigned int bidi_bytes, + struct batch_complete *batch) { - if (blk_update_request(rq, error, nr_bytes)) + if (blk_update_request(rq, error, nr_bytes, batch)) return true; /* Bidi request must be completed as a whole */ if (unlikely(blk_bidi_rq(rq)) && - blk_update_request(rq->next_rq, error, bidi_bytes)) + blk_update_request(rq->next_rq, error, bidi_bytes, batch)) return true; if (blk_queue_add_random(rq->q)) @@ -2493,7 +2498,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, struct request_queue *q = rq->q; unsigned long flags; - if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes, NULL)) return true; spin_lock_irqsave(q->queue_lock, flags); @@ -2519,9 +2524,10 @@ static bool blk_end_bidi_request(struct request *rq, int error, * %true - still buffers pending for this request **/ bool __blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) + unsigned int nr_bytes, unsigned int bidi_bytes, + struct batch_complete *batch) { - if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes, batch)) return true; blk_finish_request(rq, error); @@ -2622,7 +2628,7 @@ EXPORT_SYMBOL_GPL(blk_end_request_err); **/ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { - return __blk_end_bidi_request(rq, error, nr_bytes, 0); + return __blk_end_bidi_request(rq, error, nr_bytes, 0, NULL); } EXPORT_SYMBOL(__blk_end_request); @@ -2634,7 +2640,7 @@ EXPORT_SYMBOL(__blk_end_request); * Description: * Completely finish @rq. Must be called with queue lock held. */ -void __blk_end_request_all(struct request *rq, int error) +void blk_end_request_all_batch(struct request *rq, int error, struct batch_complete *batch) { bool pending; unsigned int bidi_bytes = 0; @@ -2642,10 +2648,10 @@ void __blk_end_request_all(struct request *rq, int error) if (unlikely(blk_bidi_rq(rq))) bidi_bytes = blk_rq_bytes(rq->next_rq); - pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes, batch); BUG_ON(pending); } -EXPORT_SYMBOL(__blk_end_request_all); +EXPORT_SYMBOL(blk_end_request_all_batch); /** * __blk_end_request_cur - Helper function to finish the current request chunk. |