summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2017-09-24 10:21:15 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-18 09:38:31 +0200
commit9f3bb57753d26ef4c491cd17d0670c3dcbb5528c (patch)
treea79d7a67133227b409677ea8eae426b1c89d10f1 /block
parent02fb540228593ed28ac19040d403996f907ff6a7 (diff)
bio_copy_user_iov(): don't ignore ->iov_offset
commit 1cfd0ddd82232804e03f3023f6a58b50dfef0574 upstream. Since "block: support large requests in blk_rq_map_user_iov" we started to call it with partially drained iter; that works fine on the write side, but reads create a copy of iter for completion time. And that needs to take the possibility of ->iov_iter != 0 into account... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/bio.c b/block/bio.c
index a49d8e77d750..30f56b8b1fb2 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1235,8 +1235,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- iov_iter_init(&bmd->iter, iter->type, bmd->iov,
- iter->nr_segs, iter->count);
+ bmd->iter = *iter;
+ bmd->iter.iov = bmd->iov;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);