summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-23 21:36:29 -0800
committerKent Overstreet <kmo@daterainc.com>2014-06-17 18:25:56 -0700
commit43a36dbbcf84a32649b0e91c36951efc9c74b91b (patch)
tree2829ffbcef00557031ac408770805bcf36e8782a
parent7455abd4f2f037796127a8f92f2c5dd4dcc6770e (diff)
block: Make blk_queue_bounce() handle bios larger than BIO_MAX_PAGES
We'd like to eventually be able to handle bios with more than BIO_MAX_PAGES segments; this shouldn't be too hard and it'll simplify other code in the kernel. The issue is code that clones the bio and must clone the biovec (i.e. it can't use bio_clone_fast()) won't be able to allocate a bio with more than BIO_MAX_PAGES - bio_alloc_bioset() always fails in that case. Fortunately, it's easy to make blk_queue_bounce() just process part of the bio if necessary, using bi_remaining to count the splits and punting the rest back to generic_make_request(). Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--block/bounce.c60
1 files changed, 52 insertions, 8 deletions
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba203d5c..689ea894c0e7 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -196,6 +196,43 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
}
#endif /* CONFIG_NEED_BOUNCE_POOL */
+static struct bio *bio_clone_segments(struct bio *bio_src, gfp_t gfp_mask,
+ struct bio_set *bs, unsigned nsegs)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(gfp_mask, nsegs, bs);
+ if (!bio)
+ return NULL;
+
+ bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_rw = bio_src->bi_rw;
+ bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
+
+ bio_for_each_segment(bv, bio_src, iter) {
+ bio->bi_io_vec[bio->bi_vcnt++] = bv;
+ bio->bi_iter.bi_size += bv.bv_len;
+ if (!--nsegs)
+ break;
+ }
+
+ if (bio_integrity(bio_src)) {
+ int ret;
+
+ ret = bio_integrity_clone(bio, bio_src, gfp_mask);
+ if (ret < 0) {
+ bio_put(bio);
+ return NULL;
+ }
+ }
+
+ bio_src->bi_iter = iter;
+
+ return bio;
+}
+
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool, int force)
{
@@ -203,17 +240,24 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
int rw = bio_data_dir(*bio_orig);
struct bio_vec *to, from;
struct bvec_iter iter;
- unsigned i;
+ int i, nsegs = 0, bounce = force;
- if (force)
- goto bounce;
- bio_for_each_segment(from, *bio_orig, iter)
+ bio_for_each_segment(from, *bio_orig, iter) {
+ nsegs++;
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
- goto bounce;
+ bounce = 1;
+ }
+
+ if (!bounce)
+ return;
- return;
-bounce:
- bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+ bio = bio_clone_segments(*bio_orig, GFP_NOIO, fs_bio_set,
+ min(nsegs, BIO_MAX_PAGES));
+
+ if ((*bio_orig)->bi_iter.bi_size) {
+ atomic_inc(&(*bio_orig)->bi_remaining);
+ generic_make_request(*bio_orig);
+ }
bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page;