mmc: block: Add write packing control

The write packing control will ensure that read requests latency is
not increased due to long write packed commands.

The trigger for enabling the write packing is managing to pack several
write requests. The number of potential packed requests that will trigger
the packing can be configured via sysfs by writing the required value to:
/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
The trigger for disabling the write packing is fetching a read request.

Change-Id: I22e8ab04cd9aca220678784c39306068a0996790
Signed-off-by: Maya Erez <merez@codeaurora.org>
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
This commit is contained in:
Tatyana Brokhman 2012-10-07 09:52:16 +02:00 committed by Stephen Boyd
parent 312655328a
commit 91e1411ca9
5 changed files with 126 additions and 0 deletions

View File

@ -8,6 +8,23 @@ The following attributes are read/write.
force_ro Enforce read-only access even if write protect switch is off.
num_wr_reqs_to_start_packing This attribute is used to determine
the trigger for activating the write packing, in case the write
packing control feature is enabled.
When the MMC manages to reach a point where num_wr_reqs_to_start_packing
write requests could be packed, it enables the write packing feature.
This allows us to start the write packing only when it is beneficial
and has minimum affect on the read latency.
The number of potential packed requests that will trigger the packing
can be configured via sysfs by writing the required value to:
/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
The default value of num_wr_reqs_to_start_packing was determined by
running parallel lmdd write and lmdd read operations and calculating
the max number of packed writes requests.
SD and MMC Device Attributes
============================

View File

@ -118,6 +118,7 @@ struct mmc_blk_data {
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
struct device_attribute num_wr_reqs_to_start_packing;
int area_type;
};
@ -284,6 +285,38 @@ out:
return ret;
}
static ssize_t
num_wr_reqs_to_start_packing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
int num_wr_reqs_to_start_packing;
int ret;
num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
mmc_blk_put(md);
return ret;
}
static ssize_t
num_wr_reqs_to_start_packing_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int value;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
sscanf(buf, "%d", &value);
if (value >= 0)
md->queue.num_wr_reqs_to_start_packing = value;
mmc_blk_put(md);
return count;
}
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@ -1413,6 +1446,47 @@ static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
return nr_segs;
}
static void mmc_blk_write_packing_control(struct mmc_queue *mq,
struct request *req)
{
struct mmc_host *host = mq->card->host;
int data_dir;
if (!(host->caps2 & MMC_CAP2_PACKED_WR))
return;
/*
* In case the packing control is not supported by the host, it should
* not have an effect on the write packing. Therefore we have to enable
* the write packing
*/
if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
mq->wr_packing_enabled = true;
return;
}
if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
if (mq->num_of_potential_packed_wr_reqs >
mq->num_wr_reqs_to_start_packing)
mq->wr_packing_enabled = true;
return;
}
data_dir = rq_data_dir(req);
if (data_dir == READ) {
mq->num_of_potential_packed_wr_reqs = 0;
mq->wr_packing_enabled = false;
return;
} else if (data_dir == WRITE) {
mq->num_of_potential_packed_wr_reqs++;
}
if (mq->num_of_potential_packed_wr_reqs >
mq->num_wr_reqs_to_start_packing)
mq->wr_packing_enabled = true;
}
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@ -1430,6 +1504,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
if (!(md->flags & MMC_BLK_PACKED_CMD))
goto no_packed;
if (!mq->wr_packing_enabled)
goto no_packed;
if ((rq_data_dir(cur) == WRITE) &&
mmc_host_packed_wr(card->host))
max_packed_rw = card->ext_csd.max_packed_writes;
@ -1498,6 +1575,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
if (phys_segments > max_phys_segs)
break;
if (rq_data_dir(next) == WRITE)
mq->num_of_potential_packed_wr_reqs++;
list_add_tail(&next->queuelist, &mqrq->packed->list);
cur = next;
reqs++;
@ -1922,6 +2001,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
mmc_blk_write_packing_control(mq, req);
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
@ -2176,6 +2257,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
if (md) {
card = md->queue.card;
device_remove_file(disk_to_dev(md->disk),
&md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@ -2244,6 +2327,20 @@ static int mmc_add_disk(struct mmc_blk_data *md)
if (ret)
goto power_ro_lock_fail;
}
md->num_wr_reqs_to_start_packing.show =
num_wr_reqs_to_start_packing_show;
md->num_wr_reqs_to_start_packing.store =
num_wr_reqs_to_start_packing_store;
sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
md->num_wr_reqs_to_start_packing.attr.name =
"num_wr_reqs_to_start_packing";
md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk),
&md->num_wr_reqs_to_start_packing);
if (ret)
goto power_ro_lock_fail;
return ret;
power_ro_lock_fail:

View File

@ -22,6 +22,13 @@
#define MMC_QUEUE_BOUNCESZ 65536
/*
* Based on benchmark tests the default num of requests to trigger the write
* packing was determined, to keep the read latency as low as possible and
* manage to keep the high write throughput.
*/
#define DEFAULT_NUM_REQS_TO_START_PACK 17
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@ -206,6 +213,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
mq->num_wr_reqs_to_start_packing = DEFAULT_NUM_REQS_TO_START_PACK;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

View File

@ -57,6 +57,9 @@ struct mmc_queue {
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
bool wr_packing_enabled;
int num_of_potential_packed_wr_reqs;
int num_wr_reqs_to_start_packing;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,

View File

@ -283,6 +283,7 @@ struct mmc_host {
MMC_CAP2_PACKED_WR)
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
#define MMC_CAP2_INIT_BKOPS (1 << 15) /* Need to set BKOPS_EN */
#define MMC_CAP2_PACKED_WR_CONTROL (1 << 16) /* Allow write packing control */
mmc_pm_flag_t pm_caps; /* supported pm features */