mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
dm snapshot: use dm-bufio prefetch
This patch modifies dm-snapshot so that it prefetches the buffers when loading the exceptions. The number of buffers read ahead is specified in the DM_PREFETCH_CHUNKS macro. The current value for DM_PREFETCH_CHUNKS (12) was found to provide the best performance on a single 15k SCSI spindle. In the future we may modify this default or make it configurable. Also, introduce the function dm_bufio_set_minimum_buffers to setup bufio's number of internal buffers before freeing happens. dm-bufio may hold more buffers if enough memory is available. There is no guarantee that the specified number of buffers will be available - if you need a guarantee, use the argument reserved_buffers for dm_bufio_client_create. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
55494bf294
commit
55b082e614
3 changed files with 41 additions and 3 deletions
|
@ -104,6 +104,8 @@ struct dm_bufio_client {
|
||||||
struct list_head reserved_buffers;
|
struct list_head reserved_buffers;
|
||||||
unsigned need_reserved_buffers;
|
unsigned need_reserved_buffers;
|
||||||
|
|
||||||
|
unsigned minimum_buffers;
|
||||||
|
|
||||||
struct hlist_head *cache_hash;
|
struct hlist_head *cache_hash;
|
||||||
wait_queue_head_t free_buffer_wait;
|
wait_queue_head_t free_buffer_wait;
|
||||||
|
|
||||||
|
@ -861,8 +863,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
|
||||||
buffers = dm_bufio_cache_size_per_client >>
|
buffers = dm_bufio_cache_size_per_client >>
|
||||||
(c->sectors_per_block_bits + SECTOR_SHIFT);
|
(c->sectors_per_block_bits + SECTOR_SHIFT);
|
||||||
|
|
||||||
if (buffers < DM_BUFIO_MIN_BUFFERS)
|
if (buffers < c->minimum_buffers)
|
||||||
buffers = DM_BUFIO_MIN_BUFFERS;
|
buffers = c->minimum_buffers;
|
||||||
|
|
||||||
*limit_buffers = buffers;
|
*limit_buffers = buffers;
|
||||||
*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
|
*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
|
||||||
|
@ -1372,6 +1374,12 @@ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dm_bufio_forget);
|
EXPORT_SYMBOL(dm_bufio_forget);
|
||||||
|
|
||||||
|
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
|
||||||
|
{
|
||||||
|
c->minimum_buffers = n;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
|
||||||
|
|
||||||
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
|
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
|
||||||
{
|
{
|
||||||
return c->block_size;
|
return c->block_size;
|
||||||
|
@ -1568,6 +1576,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
|
||||||
INIT_LIST_HEAD(&c->reserved_buffers);
|
INIT_LIST_HEAD(&c->reserved_buffers);
|
||||||
c->need_reserved_buffers = reserved_buffers;
|
c->need_reserved_buffers = reserved_buffers;
|
||||||
|
|
||||||
|
c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
|
||||||
|
|
||||||
init_waitqueue_head(&c->free_buffer_wait);
|
init_waitqueue_head(&c->free_buffer_wait);
|
||||||
c->async_write_error = 0;
|
c->async_write_error = 0;
|
||||||
|
|
||||||
|
|
|
@ -115,6 +115,11 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
|
||||||
*/
|
*/
|
||||||
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
|
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the minimum number of buffers before cleanup happens.
|
||||||
|
*/
|
||||||
|
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
|
||||||
|
|
||||||
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
|
unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
|
||||||
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
|
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
|
||||||
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
|
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
#define DM_MSG_PREFIX "persistent snapshot"
|
#define DM_MSG_PREFIX "persistent snapshot"
|
||||||
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
|
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
|
||||||
|
|
||||||
|
#define DM_PREFETCH_CHUNKS 12
|
||||||
|
|
||||||
/*-----------------------------------------------------------------
|
/*-----------------------------------------------------------------
|
||||||
* Persistent snapshots, by persistent we mean that the snapshot
|
* Persistent snapshots, by persistent we mean that the snapshot
|
||||||
* will survive a reboot.
|
* will survive a reboot.
|
||||||
|
@ -497,6 +499,7 @@ static int read_exceptions(struct pstore *ps,
|
||||||
{
|
{
|
||||||
int r, full = 1;
|
int r, full = 1;
|
||||||
struct dm_bufio_client *client;
|
struct dm_bufio_client *client;
|
||||||
|
chunk_t prefetch_area = 0;
|
||||||
|
|
||||||
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
|
client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
|
||||||
ps->store->chunk_size << SECTOR_SHIFT,
|
ps->store->chunk_size << SECTOR_SHIFT,
|
||||||
|
@ -505,6 +508,11 @@ static int read_exceptions(struct pstore *ps,
|
||||||
if (IS_ERR(client))
|
if (IS_ERR(client))
|
||||||
return PTR_ERR(client);
|
return PTR_ERR(client);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup for one current buffer + desired readahead buffers.
|
||||||
|
*/
|
||||||
|
dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Keeping reading chunks and inserting exceptions until
|
* Keeping reading chunks and inserting exceptions until
|
||||||
* we find a partially full area.
|
* we find a partially full area.
|
||||||
|
@ -512,7 +520,22 @@ static int read_exceptions(struct pstore *ps,
|
||||||
for (ps->current_area = 0; full; ps->current_area++) {
|
for (ps->current_area = 0; full; ps->current_area++) {
|
||||||
struct dm_buffer *bp;
|
struct dm_buffer *bp;
|
||||||
void *area;
|
void *area;
|
||||||
chunk_t chunk = area_location(ps, ps->current_area);
|
chunk_t chunk;
|
||||||
|
|
||||||
|
if (unlikely(prefetch_area < ps->current_area))
|
||||||
|
prefetch_area = ps->current_area;
|
||||||
|
|
||||||
|
if (DM_PREFETCH_CHUNKS) do {
|
||||||
|
chunk_t pf_chunk = area_location(ps, prefetch_area);
|
||||||
|
if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
|
||||||
|
break;
|
||||||
|
dm_bufio_prefetch(client, pf_chunk, 1);
|
||||||
|
prefetch_area++;
|
||||||
|
if (unlikely(!prefetch_area))
|
||||||
|
break;
|
||||||
|
} while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
|
||||||
|
|
||||||
|
chunk = area_location(ps, ps->current_area);
|
||||||
|
|
||||||
area = dm_bufio_read(client, chunk, &bp);
|
area = dm_bufio_read(client, chunk, &bp);
|
||||||
if (unlikely(IS_ERR(area))) {
|
if (unlikely(IS_ERR(area))) {
|
||||||
|
|
Loading…
Reference in a new issue