[BLOCK] Don't pin lots of memory in mempools

Currently we scale the mempool sizes depending on memory installed
in the machine, except for the bio pool itself which sits at a fixed
256 entry pre-allocation.

There's really no point in "optimizing" this OOM path, we just need
enough preallocated to make progress. A single unit is enough, lets
scale it down to 2 just to be on the safe side.

This patch saves ~150kb of pinned kernel memory on a 32-bit box.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Jens Axboe 2007-04-02 10:06:42 +02:00 committed by Jens Axboe
parent b9099ff63c
commit 5972511b77
6 changed files with 11 additions and 40 deletions

View file

@ -867,7 +867,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad4; goto bad4;
} }
cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); cc->bs = bioset_create(MIN_IOS, MIN_IOS);
if (!cc->bs) { if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset"; ti->error = "Cannot allocate crypt bioset";
goto bad_bs; goto bad_bs;

View file

@ -60,7 +60,7 @@ static int resize_pool(unsigned int new_ios)
if (!_io_pool) if (!_io_pool)
return -ENOMEM; return -ENOMEM;
_bios = bioset_create(16, 16, 4); _bios = bioset_create(16, 16);
if (!_bios) { if (!_bios) {
mempool_destroy(_io_pool); mempool_destroy(_io_pool);
_io_pool = NULL; _io_pool = NULL;

View file

@ -1012,7 +1012,7 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->tio_pool) if (!md->tio_pool)
goto bad3; goto bad3;
md->bs = bioset_create(16, 16, 4); md->bs = bioset_create(16, 16);
if (!md->bs) if (!md->bs)
goto bad_no_bioset; goto bad_no_bioset;

View file

@ -31,7 +31,7 @@
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
#define SG_MEMPOOL_SIZE 32 #define SG_MEMPOOL_SIZE 2
struct scsi_host_sg_pool { struct scsi_host_sg_pool {
size_t size; size_t size;

View file

@ -28,7 +28,7 @@
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */ #include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 256 #define BIO_POOL_SIZE 2
static struct kmem_cache *bio_slab __read_mostly; static struct kmem_cache *bio_slab __read_mostly;
@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly;
* a small number of entries is fine, not going to be performance critical. * a small number of entries is fine, not going to be performance critical.
* basically we just need to survive * basically we just need to survive
*/ */
#define BIO_SPLIT_ENTRIES 8 #define BIO_SPLIT_ENTRIES 2
mempool_t *bio_split_pool __read_mostly; mempool_t *bio_split_pool __read_mostly;
struct biovec_slab { struct biovec_slab {
@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
* create memory pools for biovec's in a bio_set. * create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use. * use the global biovec slabs created for general use.
*/ */
static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) static int biovec_create_pools(struct bio_set *bs, int pool_entries)
{ {
int i; int i;
@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
struct biovec_slab *bp = bvec_slabs + i; struct biovec_slab *bp = bvec_slabs + i;
mempool_t **bvp = bs->bvec_pools + i; mempool_t **bvp = bs->bvec_pools + i;
if (pool_entries > 1 && i >= scale)
pool_entries >>= 1;
*bvp = mempool_create_slab_pool(pool_entries, bp->slab); *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
if (!*bvp) if (!*bvp)
return -ENOMEM; return -ENOMEM;
@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs)
kfree(bs); kfree(bs);
} }
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
{ {
struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
if (!bs->bio_pool) if (!bs->bio_pool)
goto bad; goto bad;
if (!biovec_create_pools(bs, bvec_pool_size, scale)) if (!biovec_create_pools(bs, bvec_pool_size))
return bs; return bs;
bad: bad:
@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void) static int __init init_bio(void)
{ {
int megabytes, bvec_pool_entries;
int scale = BIOVEC_NR_POOLS;
bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
biovec_init_slabs(); biovec_init_slabs();
megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
/*
* find out where to start scaling
*/
if (megabytes <= 16)
scale = 0;
else if (megabytes <= 32)
scale = 1;
else if (megabytes <= 64)
scale = 2;
else if (megabytes <= 96)
scale = 3;
else if (megabytes <= 128)
scale = 4;
/*
* Limit number of entries reserved -- mempools are only used when
* the system is completely unable to allocate memory, so we only
* need enough to make progress.
*/
bvec_pool_entries = 1 + scale;
fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
if (!fs_bio_set) if (!fs_bio_set)
panic("bio: can't allocate bios\n"); panic("bio: can't allocate bios\n");

View file

@ -276,7 +276,7 @@ extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
extern mempool_t *bio_split_pool; extern mempool_t *bio_split_pool;
extern void bio_pair_release(struct bio_pair *dbio); extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int, int); extern struct bio_set *bioset_create(int, int);
extern void bioset_free(struct bio_set *); extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int); extern struct bio *bio_alloc(gfp_t, int);