cpufreq: ondemand: add input_boost interface

Currently Ondemand governor handles any input event
like touch by scaling the CPU frequency to maximum
available on the target.

This change adds a new sysfs interface "input_boost"
whereby the CPU will scale to this frequency on input events.
The value of this sysfs is user defined so input events
can now be handled by scaling the CPU to lower frequencies
than target max.

Change-Id: I5428fd8797c9984b17a66b01a44557f2160e8b68
Signed-off-by: Dilip Gudlur <dgudlur@codeaurora.org>
This commit is contained in:
Dilip Gudlur 2013-06-17 13:04:31 -07:00 committed by Zhao Wei Liew
parent b5608a5fc5
commit db31d58766

View file

@ -144,6 +144,7 @@ static struct dbs_tuners {
unsigned int sampling_down_factor; unsigned int sampling_down_factor;
int powersave_bias; int powersave_bias;
unsigned int io_is_busy; unsigned int io_is_busy;
unsigned int input_boost;
} dbs_tuners_ins = { } dbs_tuners_ins = {
.up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD, .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
@ -155,6 +156,7 @@ static struct dbs_tuners {
.powersave_bias = 0, .powersave_bias = 0,
.sync_freq = 0, .sync_freq = 0,
.optimal_freq = 0, .optimal_freq = 0,
.input_boost = 0,
}; };
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
@ -319,6 +321,7 @@ show_one(ignore_nice_load, ignore_nice);
show_one(optimal_freq, optimal_freq); show_one(optimal_freq, optimal_freq);
show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load); show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
show_one(sync_freq, sync_freq); show_one(sync_freq, sync_freq);
show_one(input_boost, input_boost);
static ssize_t show_powersave_bias static ssize_t show_powersave_bias
(struct kobject *kobj, struct attribute *attr, char *buf) (struct kobject *kobj, struct attribute *attr, char *buf)
@ -394,6 +397,18 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
return count; return count;
} }
static ssize_t store_input_boost(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
dbs_tuners_ins.input_boost = input;
return count;
}
static ssize_t store_sync_freq(struct kobject *a, struct attribute *b, static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
@ -671,6 +686,7 @@ define_one_global_rw(up_threshold_multi_core);
define_one_global_rw(optimal_freq); define_one_global_rw(optimal_freq);
define_one_global_rw(up_threshold_any_cpu_load); define_one_global_rw(up_threshold_any_cpu_load);
define_one_global_rw(sync_freq); define_one_global_rw(sync_freq);
define_one_global_rw(input_boost);
static struct attribute *dbs_attributes[] = { static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr, &sampling_rate_min.attr,
@ -685,6 +701,7 @@ static struct attribute *dbs_attributes[] = {
&optimal_freq.attr, &optimal_freq.attr,
&up_threshold_any_cpu_load.attr, &up_threshold_any_cpu_load.attr,
&sync_freq.attr, &sync_freq.attr,
&input_boost.attr,
NULL NULL
}; };
@ -997,6 +1014,7 @@ static void dbs_refresh_callback(struct work_struct *work)
struct cpu_dbs_info_s *this_dbs_info; struct cpu_dbs_info_s *this_dbs_info;
struct dbs_work_struct *dbs_work; struct dbs_work_struct *dbs_work;
unsigned int cpu; unsigned int cpu;
unsigned int target_freq;
dbs_work = container_of(work, struct dbs_work_struct, work); dbs_work = container_of(work, struct dbs_work_struct, work);
cpu = dbs_work->cpu; cpu = dbs_work->cpu;
@ -1013,14 +1031,19 @@ static void dbs_refresh_callback(struct work_struct *work)
goto bail_incorrect_governor; goto bail_incorrect_governor;
} }
if (policy->cur < policy->max) { if (dbs_tuners_ins.input_boost)
target_freq = dbs_tuners_ins.input_boost;
else
target_freq = policy->max;
if (policy->cur < target_freq) {
/* /*
* Arch specific cpufreq driver may fail. * Arch specific cpufreq driver may fail.
* Don't update governor frequency upon failure. * Don't update governor frequency upon failure.
*/ */
if (__cpufreq_driver_target(policy, policy->max, if (__cpufreq_driver_target(policy, target_freq,
CPUFREQ_RELATION_L) >= 0) CPUFREQ_RELATION_L) >= 0)
policy->cur = policy->max; policy->cur = target_freq;
this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu, this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu,
&this_dbs_info->prev_cpu_wall); &this_dbs_info->prev_cpu_wall);