diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-12-05 13:58:19 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-05 13:58:19 +0100 |
commit | 30e62ce1ce96ff01d4826b0e476941169c94041d (patch) | |
tree | 032fc462debbdf9084879bf0d3f56063de8f3b6d /fs/bio.c | |
parent | dc23b3285aaaa0dbd013bcfacad26f752e23927e (diff) |
bio: add experimental support for inlining a number of bio_vecs inside the bio
When we go and allocate a bio for IO, we actually do two allocations.
One for the bio itself, and one for the bi_io_vec that holds the
actual pages we are interested in.
This feature inlines a definable amount of io vecs inside the bio
itself, so we eliminate the bio_vec array allocation for IO's up
to a certain size. It defaults to 4 vecs, which is typically 16k
of IO.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 33 |
1 files changed, 28 insertions, 5 deletions
@@ -28,6 +28,12 @@ #include <linux/blktrace_api.h> #include <scsi/sg.h> /* for struct sg_iovec */ +/* + * Test patch to inline a certain number of bi_io_vec's inside the bio + * itself, to shrink a bio data allocation from two mempool calls to one + */ +#define BIO_INLINE_VECS 4 + static mempool_t *bio_split_pool __read_mostly; /* @@ -231,7 +237,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) { void *p; - if (bio->bi_io_vec) + if (bio_has_allocated_vec(bio)) bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); if (bio_integrity(bio)) @@ -257,7 +263,8 @@ static void bio_fs_destructor(struct bio *bio) static void bio_kmalloc_destructor(struct bio *bio) { - kfree(bio->bi_io_vec); + if (bio_has_allocated_vec(bio)) + kfree(bio->bi_io_vec); kfree(bio); } @@ -304,7 +311,15 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) if (likely(nr_iovecs)) { unsigned long uninitialized_var(idx); - bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); + if (nr_iovecs <= BIO_INLINE_VECS) { + idx = 0; + bvl = bio->bi_inline_vecs; + nr_iovecs = BIO_INLINE_VECS; + } else { + bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, + bs); + nr_iovecs = bvec_nr_vecs(idx); + } if (unlikely(!bvl)) { if (bs) mempool_free(bio, bs->bio_pool); @@ -314,7 +329,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) goto out; } bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = bvec_nr_vecs(idx); + bio->bi_max_vecs = nr_iovecs; } bio->bi_io_vec = bvl; } @@ -1562,6 +1577,8 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { + unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); + bio_slab_max = 2; bio_slab_nr = 0; bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); @@ -1571,7 +1588,13 @@ static int __init init_bio(void) bio_integrity_init_slab(); biovec_init_slabs(); - fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); + if (back_pad) { + printk(KERN_INFO "bio: Using %d inlines, %u -> %u bytes\n", + BIO_INLINE_VECS, (int) sizeof(struct bio), + (int) sizeof(struct bio) + back_pad); + } + + fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, back_pad); if (!fs_bio_set) panic("bio: can't allocate bios\n"); |