mmc: block: workaround for timeout issue with some vendor devices

Commit 66a7393a3ba9685d1eddfbce72e3ef8f4848f19f ("mmc: block: ensure CMDQ
is empty before queuing cache flush") added a workaround for particular
vendor's eMMC devices. Workaround was to wait for all the outstanding
requests to finish up before queuing the flush request. Now detailed
root cause analysis from vendor shows that original issue can happen only
if DCMD command is sent to device too quickly (within less than 6
microseconds) after completion of previous small sector (less than 8
sectors) read operations. Hence with this change, we are fine tuning the
previous workaround such that it would almost have no impact on the storage
benchmark performance numbers.

Change-Id: I1df1c5d7bbcd7b526236651077b7dade2626cb30
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
This commit is contained in:
Subhash Jadavani 2015-11-13 12:15:53 -08:00 committed by Sahitya Tummala
parent cf4633a5f0
commit e3e44f4ecb
3 changed files with 41 additions and 16 deletions

View File

@ -2963,6 +2963,15 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
if (!ret && (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD)) {
unsigned int sectors = blk_rq_sectors(req);
if (((sectors > 0) && (sectors < 8))
&& (rq_data_dir(req) == READ))
host->cmdq_ctx.active_small_sector_read_reqs++;
}
return ret;
}
@ -3541,6 +3550,8 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int cmd_flags = req ? req->cmd_flags : 0;
struct mmc_host *host = card->host;
struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
mmc_rpm_hold(card->host, &card->dev);
mmc_claim_host(card->host);
@ -3554,6 +3565,29 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
goto switch_failure;
}
if ((cmd_flags & (REQ_FLUSH | REQ_DISCARD)) &&
(card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
ctx->active_small_sector_read_reqs) {
ret = wait_event_interruptible(ctx->queue_empty_wq,
!ctx->active_reqs);
if (ret) {
pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
mmc_hostname(host),
__func__, ret);
BUG_ON(1);
}
/* clear the counter now */
ctx->active_small_sector_read_reqs = 0;
/*
* If there were small sector (less than 8 sectors) read
* operations in progress then we have to wait for the
* outstanding requests to finish and should also have
* atleast 6 microseconds delay before queuing the DCMD
* request.
*/
udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
}
if (cmd_flags & REQ_DISCARD) {
if (cmd_flags & REQ_SECURE &&
!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
@ -3561,19 +3595,6 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
else
ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
} else if (cmd_flags & REQ_FLUSH) {
if (card->quirks &
MMC_QUIRK_CMDQ_EMPTY_BEFORE_FLUSH) {
ret = wait_event_interruptible(
card->host->cmdq_ctx.queue_empty_wq,
(!card->host->cmdq_ctx.active_reqs));
if (ret) {
pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
mmc_hostname(card->host),
__func__, ret);
BUG_ON(1);
}
}
ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
} else {
ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
@ -4051,7 +4072,7 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY,
add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_FLUSH),
add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD),
/*
* Some Micron MMC cards needs longer data read timeout than

View File

@ -321,6 +321,9 @@ enum mmc_pon_type {
MMC_LONG_PON = 1,
MMC_SHRT_PON,
};
#define MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD 6 /* microseconds */
/*
* MMC device
*/
@ -370,8 +373,8 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_DATA_TIMEOUT (1<<13)
#define MMC_QUIRK_CACHE_DISABLE (1 << 14) /* prevent cache enable */
/* Make sure CMDQ is empty before queuing cache flush */
#define MMC_QUIRK_CMDQ_EMPTY_BEFORE_FLUSH (1 << 15)
/* Make sure CMDQ is empty before queuing DCMD */
#define MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD (1 << 15)
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */

View File

@ -261,6 +261,7 @@ struct mmc_cmdq_context_info {
/* no free tag available */
unsigned long req_starved;
wait_queue_head_t queue_empty_wq;
int active_small_sector_read_reqs;
};
/**