summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-04-21 11:41:49 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2009-04-21 11:41:49 +1000
commit839684015dd5c046a5bf5cd21930ff675fe3e4e1 (patch)
treec7b67bed5fbed1bf879f8e6e1f2b7723dfc4a3f5
parenta162dbdd0b8759a6d6c3250c798153d332d8eef8 (diff)
parenta95320f3ede3f028837a621715cd352e83ffe7c2 (diff)
Merge commit 'block/for-next'
-rw-r--r--block/blk-settings.c20
-rw-r--r--block/blk-timeout.c6
-rw-r--r--block/genhd.c12
-rw-r--r--block/scsi_ioctl.c13
-rw-r--r--drivers/block/loop.c26
-rw-r--r--fs/bio.c124
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/loop.h3
-rw-r--r--lib/scatterlist.c9
10 files changed, 111 insertions, 106 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 69c42adde52b..57af728d94bb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -156,26 +156,28 @@ EXPORT_SYMBOL(blk_queue_make_request);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @dma_addr: bus address limit
+ * @q: the request queue for the device
+ * @dma_mask: the maximum address the device can handle
*
* Description:
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
- * buffers for doing I/O to pages residing above @dma_addr.
+ * buffers for doing I/O to pages residing above @dma_mask.
**/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
{
- unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
+ unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
- /* Assume anything <= 4GB can be handled by IOMMU.
- Actually some IOMMUs can handle everything, but I don't
- know of a way to test this here. */
- if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ /*
+ * Assume anything <= 4GB can be handled by IOMMU. Actually
+ * some IOMMUs can handle everything, but I don't know of a
+ * way to test this here.
+ */
+ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bbbdc4b8ccf2..8f570c4c80ee 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -211,6 +211,12 @@ void blk_abort_queue(struct request_queue *q)
struct request *rq, *tmp;
LIST_HEAD(list);
+ /*
+ * Not a request based block device, nothing to abort
+ */
+ if (!q->request_fn)
+ return;
+
spin_lock_irqsave(q->queue_lock, flags);
elv_abort_queue(q);
diff --git a/block/genhd.c b/block/genhd.c
index a9ec910974c1..1a4916e01732 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -98,7 +98,7 @@ void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
if (flags & DISK_PITER_REVERSE)
piter->idx = ptbl->len - 1;
- else if (flags & DISK_PITER_INCL_PART0)
+ else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
piter->idx = 0;
else
piter->idx = 1;
@@ -134,7 +134,8 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
/* determine iteration parameters */
if (piter->flags & DISK_PITER_REVERSE) {
inc = -1;
- if (piter->flags & DISK_PITER_INCL_PART0)
+ if (piter->flags & (DISK_PITER_INCL_PART0 |
+ DISK_PITER_INCL_EMPTY_PART0))
end = -1;
else
end = 0;
@@ -150,7 +151,10 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
- if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
+ if (!part->nr_sects &&
+ !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+ !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+ piter->idx == 0))
continue;
get_device(part_to_dev(part));
@@ -1011,7 +1015,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
"\n\n");
*/
- disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
+ disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
while ((hd = disk_part_iter_next(&piter))) {
cpu = part_stat_lock();
part_round_stats(cpu, hd);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 84b7f8709f41..82a0ca2f6729 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -290,6 +290,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+ size_t iov_data_len;
struct sg_iovec *iov;
iov = kmalloc(size, GFP_KERNEL);
@@ -304,8 +305,18 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
goto out;
}
+ /* SG_IO howto says that the shorter of the two wins */
+ iov_data_len = iov_length((struct iovec *)iov,
+ hdr->iovec_count);
+ if (hdr->dxfer_len < iov_data_len) {
+ hdr->iovec_count = iov_shorten((struct iovec *)iov,
+ hdr->iovec_count,
+ hdr->dxfer_len);
+ iov_data_len = hdr->dxfer_len;
+ }
+
ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
- hdr->dxfer_len, GFP_KERNEL);
+ iov_data_len, GFP_KERNEL);
kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae80825899..9ca4bb014657 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
+ bio_list_add(&lo->lo_bio_list, bio);
}
/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
- struct bio *bio;
-
- if ((bio = lo->lo_bio)) {
- if (bio == lo->lo_biotail)
- lo->lo_biotail = NULL;
- lo->lo_bio = bio->bi_next;
- bio->bi_next = NULL;
- }
-
- return bio;
+ return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
- while (!kthread_should_stop() || lo->lo_bio) {
+ while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
- lo->lo_bio || kthread_should_stop());
+ !bio_list_empty(&lo->lo_bio_list) ||
+ kthread_should_stop());
- if (!lo->lo_bio)
+ if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
@@ -841,7 +829,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- lo->lo_bio = lo->lo_biotail = NULL;
+ bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level
diff --git a/fs/bio.c b/fs/bio.c
index cd42bb882f30..7bbc98f0eda1 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -175,14 +175,6 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
struct bio_vec *bvl;
/*
- * If 'bs' is given, lookup the pool and do the mempool alloc.
- * If not, this is a bio_kmalloc() allocation and just do a
- * kzalloc() for the exact number of vecs right away.
- */
- if (!bs)
- bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
-
- /*
* see comment near bvec_array define!
*/
switch (nr) {
@@ -260,21 +252,6 @@ void bio_free(struct bio *bio, struct bio_set *bs)
mempool_free(p, bs->bio_pool);
}
-/*
- * default destructor for a bio allocated with bio_alloc_bioset()
- */
-static void bio_fs_destructor(struct bio *bio)
-{
- bio_free(bio, fs_bio_set);
-}
-
-static void bio_kmalloc_destructor(struct bio *bio)
-{
- if (bio_has_allocated_vec(bio))
- kfree(bio->bi_io_vec);
- kfree(bio);
-}
-
void bio_init(struct bio *bio)
{
memset(bio, 0, sizeof(*bio));
@@ -301,21 +278,15 @@ void bio_init(struct bio *bio)
**/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
+ unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
- struct bio *bio = NULL;
- unsigned long idx = 0;
- void *p = NULL;
-
- if (bs) {
- p = mempool_alloc(bs->bio_pool, gfp_mask);
- if (!p)
- goto err;
- bio = p + bs->front_pad;
- } else {
- bio = kmalloc(sizeof(*bio), gfp_mask);
- if (!bio)
- goto err;
- }
+ struct bio *bio;
+ void *p;
+
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ if (unlikely(!p))
+ return NULL;
+ bio = p + bs->front_pad;
bio_init(bio);
@@ -332,22 +303,50 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
nr_iovecs = bvec_nr_vecs(idx);
}
+out_set:
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
-out_set:
bio->bi_io_vec = bvl;
-
return bio;
err_free:
- if (bs)
- mempool_free(p, bs->bio_pool);
- else
- kfree(bio);
-err:
+ mempool_free(p, bs->bio_pool);
return NULL;
}
+static void bio_fs_destructor(struct bio *bio)
+{
+ bio_free(bio, fs_bio_set);
+}
+
+/**
+ * bio_alloc - allocate a new bio, memory pool backed
+ * @gfp_mask: allocation mask to use
+ * @nr_iovecs: number of iovecs
+ *
+ * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
+ * contains __GFP_WAIT, the allocation is guaranteed to succeed.
+ *
+ * RETURNS:
+ * Pointer to new bio on success, NULL on failure.
+ */
+struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
+{
+ struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+
+ if (bio)
+ bio->bi_destructor = bio_fs_destructor;
+
+ return bio;
+}
+
+static void bio_kmalloc_destructor(struct bio *bio)
+{
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
+ kfree(bio);
+}
+
/**
* bio_alloc - allocate a bio for I/O
* @gfp_mask: the GFP_ mask given to the slab allocator
@@ -366,29 +365,20 @@ err:
* do so can cause livelocks under memory pressure.
*
**/
-struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
-{
- struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
-
- if (bio)
- bio->bi_destructor = bio_fs_destructor;
-
- return bio;
-}
-
-/*
- * Like bio_alloc(), but doesn't use a mempool backing. This means that
- * it CAN fail, but while bio_alloc() can only be used for allocations
- * that have a short (finite) life span, bio_kmalloc() should be used
- * for more permanent bio allocations (like allocating some bio's for
- * initalization or setup purposes).
- */
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
{
- struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+ struct bio *bio;
- if (bio)
- bio->bi_destructor = bio_kmalloc_destructor;
+ bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ if (unlikely(!bio))
+ return NULL;
+
+ bio_init(bio);
+ bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
+ bio->bi_max_vecs = nr_iovecs;
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_destructor = bio_kmalloc_destructor;
return bio;
}
@@ -832,7 +822,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
- bio = bio_alloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
goto out_bmd;
@@ -956,7 +946,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
if (!nr_pages)
return ERR_PTR(-EINVAL);
- bio = bio_alloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
@@ -1140,7 +1130,7 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
int offset, i;
struct bio *bio;
- bio = bio_alloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b89cf2d82898..f37ca8c726ba 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -132,6 +132,7 @@ struct bio {
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
+#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
@@ -505,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
}
/*
- * BIO list managment for use by remapping drivers (e.g. DM or MD).
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
* A bio_list anchors a singly-linked list of bios chained through the bi_next
* member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 634c53028fb8..a1a28caed23d 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part)
#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
+#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
struct disk_part_iter {
struct gendisk *disk;
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40725447f5e0..66c194e2d9b9 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
gfp_t old_gfp_mask;
spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
+ struct bio_list lo_bio_list;
int lo_state;
struct mutex lo_ctl_mutex;
struct task_struct *lo_thread;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b7b449dafbe5..a295e404e908 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -347,9 +347,12 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
sg_miter_stop(miter);
/* get to the next sg if necessary. __offset is adjusted by stop */
- if (miter->__offset == miter->__sg->length && --miter->__nents) {
- miter->__sg = sg_next(miter->__sg);
- miter->__offset = 0;
+ while (miter->__offset == miter->__sg->length) {
+ if (--miter->__nents) {
+ miter->__sg = sg_next(miter->__sg);
+ miter->__offset = 0;
+ } else
+ return false;
}
/* map the next page */