summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-06-17 18:25:33 -0700
committerKent Overstreet <kmo@daterainc.com>2014-06-17 18:25:33 -0700
commit94bea102bae9172364eb7bdf9e7a9785bcddfae0 (patch)
treed82137140a4092d60cf847ff63dd1c6f23d1bcf4
parent26355db40ab8aacf5da235ee6ccf3b874678f369 (diff)
refactor __bio_copy_iov(), per Al Viro's suggestions
-rw-r--r--block/bio.c62
1 files changed, 25 insertions, 37 deletions
diff --git a/block/bio.c b/block/bio.c
index 12b67f619194..8a5d8239ecb8 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1000,45 +1000,34 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
}
static int __bio_copy_iov(struct bio *bio, const struct iov_iter *iter,
- int to_user, int from_user, int do_free_page)
+ int to_iov)
{
- int ret = 0, i;
+ int i;
struct bio_vec *bvec;
struct iov_iter iov_iter = *iter;
bio_for_each_segment_all(bvec, bio, i) {
- char *bv_addr = page_address(bvec->bv_page);
- unsigned int bv_len = bvec->bv_len;
-
- while (bv_len && iov_iter.count) {
- struct iovec iov = iov_iter_iovec(&iov_iter);
- unsigned int bytes = min_t(unsigned int, bv_len,
- iov.iov_len);
-
- if (!ret) {
- if (to_user)
- ret = copy_to_user(iov.iov_base,
- bv_addr, bytes);
-
- if (from_user)
- ret = copy_from_user(bv_addr,
- iov.iov_base,
- bytes);
-
- if (ret)
- ret = -EFAULT;
- }
-
- bv_len -= bytes;
- bv_addr += bytes;
- iov_iter_advance(&iov_iter, bytes);
- }
+ ssize_t ret;
+
+ if (to_iov == WRITE)
+ ret = copy_page_to_iter(bvec->bv_page,
+ bvec->bv_offset,
+ bvec->bv_len,
+ &iov_iter);
+ else
+ ret = copy_page_from_iter(bvec->bv_page,
+ bvec->bv_offset,
+ bvec->bv_len,
+ &iov_iter);
+
+ if (!iov_iter_count(&iov_iter))
+ break;
- if (do_free_page)
- __free_page(bvec->bv_page);
+ if (ret < bvec->bv_len)
+ return -EFAULT;
}
- return ret;
+ return 0;
}
/**
@@ -1059,11 +1048,10 @@ int bio_uncopy_user(struct bio *bio)
* if we're in a workqueue, the request is orphaned, so
* don't copy into a random user address space, just free.
*/
- if (current->mm)
- ret = __bio_copy_iov(bio, &bmd->iter,
- bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
- else if (bmd->is_our_pages)
+ if (current->mm && bio_data_dir(bio) == READ)
+ ret = __bio_copy_iov(bio, &bmd->iter, WRITE);
+
+ if (bmd->is_our_pages)
bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
@@ -1185,7 +1173,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
- ret = __bio_copy_iov(bio, iter, 0, 1, 0);
+ ret = __bio_copy_iov(bio, iter, READ);
if (ret)
goto cleanup;
}