diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-12-23 12:42:54 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-29 08:29:50 +0100 |
commit | 392ddc32982a5c661dd90dd49a3cb37f1c68b782 (patch) | |
tree | 614b8e857a70ce479bcbbf24af66a56b7723efc8 /fs/bio.c | |
parent | bb799ca0202a360fa74d5f17039b9100caebdde7 (diff) | |
download | linux-392ddc32982a5c661dd90dd49a3cb37f1c68b782.tar.bz2 |
bio: add support for inlining a number of bio_vecs inside the bio
When we go and allocate a bio for IO, we actually do two allocations.
One for the bio itself, and one for the bi_io_vec that holds the
actual pages we are interested in.
This feature inlines a definable amount of io vecs inside the bio
itself, so we eliminate the bio_vec array allocation for IO's up
to a certain size. It defaults to 4 vecs, which is typically 16k
of IO.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 27 |
1 files changed, 22 insertions, 5 deletions
@@ -31,6 +31,12 @@ DEFINE_TRACE(block_split); +/* + * Test patch to inline a certain number of bi_io_vec's inside the bio + * itself, to shrink a bio data allocation from two mempool calls to one + */ +#define BIO_INLINE_VECS 4 + static mempool_t *bio_split_pool __read_mostly; /* @@ -241,7 +247,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) { void *p; - if (bio->bi_io_vec) + if (bio_has_allocated_vec(bio)) bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); if (bio_integrity(bio)) @@ -267,7 +273,8 @@ static void bio_fs_destructor(struct bio *bio) static void bio_kmalloc_destructor(struct bio *bio) { - kfree(bio->bi_io_vec); + if (bio_has_allocated_vec(bio)) + kfree(bio->bi_io_vec); kfree(bio); } @@ -314,7 +321,16 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) if (likely(nr_iovecs)) { unsigned long uninitialized_var(idx); - bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); + if (nr_iovecs <= BIO_INLINE_VECS) { + idx = 0; + bvl = bio->bi_inline_vecs; + nr_iovecs = BIO_INLINE_VECS; + memset(bvl, 0, BIO_INLINE_VECS * sizeof(*bvl)); + } else { + bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, + bs); + nr_iovecs = bvec_nr_vecs(idx); + } if (unlikely(!bvl)) { if (bs) mempool_free(bio, bs->bio_pool); @@ -324,7 +340,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) goto out; } bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = bvec_nr_vecs(idx); + bio->bi_max_vecs = nr_iovecs; } bio->bi_io_vec = bvl; } @@ -1525,6 +1541,7 @@ void bioset_free(struct bio_set *bs) */ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) { + unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); struct bio_set *bs; bs = kzalloc(sizeof(*bs), GFP_KERNEL); @@ -1533,7 +1550,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) bs->front_pad = front_pad; - bs->bio_slab = bio_find_or_create_slab(front_pad); + bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); if (!bs->bio_slab) { kfree(bs); return NULL; |