diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt index 7dde34fdcd9f..6b712dd435ed 100644 --- a/Documentation/mmc/mmc-dev-attrs.txt +++ b/Documentation/mmc/mmc-dev-attrs.txt @@ -25,14 +25,14 @@ The following attributes are read/write. running parallel lmdd write and lmdd read operations and calculating the max number of packed writes requests. - min_sectors_to_check_bkops_status This attribute is used to - determine whether the status bit that indicates the need for BKOPS - should be checked. The value is stored in this attribute represents - the minimum number of sectors that needs to be changed in the device - (written or discarded) in order to require the status-bit of BKOPS - to be checked. The value can modified via sysfs by writing the - required value to: - /sys/block//min_sectors_to_check_bkops_status + bkops_check_threshold This attribute is used to determine whether + the status bit that indicates the need for BKOPS should be checked. + The value should be given in percentages of the card size. + This value is used to calculate the minimum number of sectors that + needs to be changed in the device (written or discarded) in order to + require the status-bit of BKOPS to be checked. + The value can modified via sysfs by writing the required value to: + /sys/block//bkops_check_threshold SD and MMC Device Attributes ============================ diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f398a7855be9..039c8ae2a4ab 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -121,7 +121,7 @@ struct mmc_blk_data { struct device_attribute force_ro; struct device_attribute power_ro_lock; struct device_attribute num_wr_reqs_to_start_packing; - struct device_attribute min_sectors_to_check_bkops_status; + struct device_attribute bkops_check_threshold; int area_type; }; @@ -308,43 +308,60 @@ num_wr_reqs_to_start_packing_store(struct device *dev, } static ssize_t -min_sectors_to_check_bkops_status_show(struct device *dev, +bkops_check_threshold_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); - unsigned int min_sectors_to_check_bkops_status; struct mmc_card *card = md->queue.card; int ret; if (!card) - return -EINVAL; - - min_sectors_to_check_bkops_status = - card->bkops_info.min_sectors_to_queue_delayed_work; - - ret = snprintf(buf, PAGE_SIZE, "%d\n", - min_sectors_to_check_bkops_status); + ret = -EINVAL; + else + ret = snprintf(buf, PAGE_SIZE, "%d\n", + card->bkops_info.size_percentage_to_queue_delayed_work); mmc_blk_put(md); return ret; } static ssize_t -min_sectors_to_check_bkops_status_store(struct device *dev, +bkops_check_threshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; + unsigned int card_size; + int ret = count; - if (!card) - return -EINVAL; + if (!card) { + ret = -EINVAL; + goto exit; + } sscanf(buf, "%d", &value); - if (value >= 0) - card->bkops_info.min_sectors_to_queue_delayed_work = value; + if ((value <= 0) || (value >= 100)) { + ret = -EINVAL; + goto exit; + } + card_size = (unsigned int)get_capacity(md->disk); + if (card_size <= 0) { + ret = -EINVAL; + goto exit; + } + card->bkops_info.size_percentage_to_queue_delayed_work = value; + card->bkops_info.min_sectors_to_queue_delayed_work = + (card_size * value) / 100; + + pr_debug("%s: size_percentage = %d, min_sectors = %d", + mmc_hostname(card->host), + card->bkops_info.size_percentage_to_queue_delayed_work, + card->bkops_info.min_sectors_to_queue_delayed_work); + +exit: mmc_blk_put(md); return count; } @@ -2093,6 +2110,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, { struct mmc_blk_data *md; int devidx, ret; + unsigned int percentage = + BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK; devidx = find_first_zero_bit(dev_use, max_devices); if (devidx >= max_devices) @@ -2170,6 +2189,10 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_logical_block_size(md->queue.queue, 512); set_capacity(md->disk, size); + card->bkops_info.size_percentage_to_queue_delayed_work = percentage; + card->bkops_info.min_sectors_to_queue_delayed_work = + ((unsigned int)size * percentage) / 100; + if (mmc_host_cmd23(card->host)) { if (mmc_card_mmc(card) || (mmc_card_sd(card) && @@ -2362,22 +2385,19 @@ static int mmc_add_disk(struct mmc_blk_data *md) if (ret) goto num_wr_reqs_to_start_packing_fail; - md->min_sectors_to_check_bkops_status.show = - min_sectors_to_check_bkops_status_show; - md->min_sectors_to_check_bkops_status.store = - min_sectors_to_check_bkops_status_store; - sysfs_attr_init(&md->min_sectors_to_check_bkops_status.attr); - md->min_sectors_to_check_bkops_status.attr.name = - "min_sectors_to_check_bkops_status"; - md->min_sectors_to_check_bkops_status.attr.mode = S_IRUGO | S_IWUSR; + md->bkops_check_threshold.show = bkops_check_threshold_show; + md->bkops_check_threshold.store = bkops_check_threshold_store; + sysfs_attr_init(&md->bkops_check_threshold.attr); + md->bkops_check_threshold.attr.name = "bkops_check_threshold"; + md->bkops_check_threshold.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), - &md->min_sectors_to_check_bkops_status); + &md->bkops_check_threshold); if (ret) - goto min_sectors_to_check_bkops_status_fails; + goto bkops_check_threshold_fails; return ret; -min_sectors_to_check_bkops_status_fails: +bkops_check_threshold_fails: device_remove_file(disk_to_dev(md->disk), &md->num_wr_reqs_to_start_packing); num_wr_reqs_to_start_packing_fail: diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index e53ad3d4fdad..d8d18cefae2b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -297,8 +297,6 @@ void mmc_start_delayed_bkops(struct mmc_card *card) pr_debug("%s: %s: queueing delayed_bkops_work\n", mmc_hostname(card->host), __func__); - card->bkops_info.sectors_changed = 0; - /* * cancel_delayed_bkops_work will prevent a race condition between * fetching a request by the mmcqd and the delayed work, in case @@ -448,6 +446,7 @@ void mmc_bkops_completion_polling(struct work_struct *work) pr_debug("%s: %s: completed BKOPs, exit polling\n", mmc_hostname(card->host), __func__); mmc_card_clr_doing_bkops(card); + card->bkops_info.sectors_changed = 0; goto out; } diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 84e90feeae7e..066bbd88dc33 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1334,9 +1334,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (card->bkops_info.host_delay_ms) card->bkops_info.delay_ms = card->bkops_info.host_delay_ms; - - card->bkops_info.min_sectors_to_queue_delayed_work = - BKOPS_MIN_SECTORS_TO_QUEUE_DELAYED_WORK; } } diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8807ef1e099f..e5927095dd8c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -236,6 +236,12 @@ struct mmc_part { * @host_delay_ms: The host controller time to start bkops * @delay_ms: The time to start the BKOPS * delayed work once MMC thread is idle + * @min_sectors_to_queue_delayed_work: the changed + * number of sectors that should issue check for BKOPS + * need + * @size_percentage_to_queue_delayed_work: the changed + * percentage of sectors that should issue check for + * BKOPS need * @poll_for_completion: Poll on BKOPS completion * @cancel_delayed_work: A flag to indicate if the delayed work * should be cancelled @@ -247,6 +253,7 @@ struct mmc_bkops_info { unsigned int host_delay_ms; unsigned int delay_ms; unsigned int min_sectors_to_queue_delayed_work; + unsigned int size_percentage_to_queue_delayed_work; /* * A default time for checking the need for non urgent BKOPS once mmcqd * is idle. @@ -264,9 +271,8 @@ struct mmc_bkops_info { * mmcqd thread is idle. * The delayed work for idle BKOPS will be scheduled only after a significant * amount of write or discard data. - * 100MB is chosen based on benchmark tests. */ -#define BKOPS_MIN_SECTORS_TO_QUEUE_DELAYED_WORK 204800 /* 100MB */ +#define BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK 1 /* 1% */ }; /*