lpm-levels: Use residency instead of power and energy overhead

The driver looks through all the enabled modes and does energy
calculation runtime before it selects a low power mode to enter.
With residency values, the cpu/cluster can choose the low power
mode as soon as it comes across the low power mode which meets
residency constraint.

Change-Id: I593810a9cf95ead9031bb9b8608b1da78a7b066f
Signed-off-by: Archana Sathyakumar <asathyak@codeaurora.org>
Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
This commit is contained in:
Archana Sathyakumar 2015-08-11 12:38:41 -06:00 committed by Srinivas Rao L
parent cd96324074
commit 285cdfb61b
3 changed files with 169 additions and 62 deletions

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -38,34 +38,96 @@ static const struct lpm_type_str lpm_types[] = {
{SUSPEND, "suspend_enabled"},
};
static DEFINE_PER_CPU(uint32_t *, max_residency);
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;
static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr)
static void *get_enabled_ptr(struct kobj_attribute *attr,
struct lpm_level_avail *avail)
{
void *arg = NULL;
struct lpm_level_avail *avail = NULL;
if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) {
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
arg = (void *) &avail->idle_enabled;
} else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) {
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
arg = (void *) &avail->suspend_enabled;
}
return arg;
}
static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
struct kobj_attribute *attr)
{
struct lpm_level_avail *avail = NULL;
if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
avail = container_of(attr, struct lpm_level_avail,
idle_enabled_attr);
else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
avail = container_of(attr, struct lpm_level_avail,
suspend_enabled_attr);
return avail;
}
static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
bool probe_time)
{
int i, j;
bool mode_avail;
uint32_t *residency = per_cpu(max_residency, cpu_id);
for (i = 0; i < cpu->nlevels; i++) {
struct power_params *pwr = &cpu->levels[i].pwr;
residency[i] = ~0;
for (j = i + 1; j < cpu->nlevels; j++) {
mode_avail = probe_time ||
lpm_cpu_mode_allow(cpu_id, j, true);
if (mode_avail &&
(residency[i] > pwr->residencies[j]) &&
(pwr->residencies[j] != 0))
residency[i] = pwr->residencies[j];
}
}
}
static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
bool probe_time)
{
int i, j;
bool mode_avail;
for (i = 0; i < cluster->nlevels; i++) {
struct power_params *pwr = &cluster->levels[i].pwr;
pwr->max_residency = ~0;
for (j = 0; j < cluster->nlevels; j++) {
if (i >= j)
mode_avail = probe_time ||
lpm_cluster_mode_allow(cluster, i,
true);
if (mode_avail &&
(pwr->max_residency > pwr->residencies[j]) &&
(pwr->residencies[j] != 0))
pwr->max_residency = pwr->residencies[j];
}
}
}
uint32_t *get_per_cpu_max_residency(int cpu)
{
return per_cpu(max_residency, cpu);
}
ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int ret = 0;
struct kernel_param kp;
kp.arg = get_avail_val(kobj, attr);
kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
ret = param_get_bool(buf, &kp);
if (ret > 0) {
strlcat(buf, "\n", PAGE_SIZE);
@ -80,15 +142,23 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
{
int ret = 0;
struct kernel_param kp;
struct lpm_level_avail *avail;
kp.arg = get_avail_val(kobj, attr);
avail = get_avail_ptr(kobj, attr);
kp.arg = get_enabled_ptr(attr, avail);
ret = param_set_bool(buf, &kp);
if (avail->cpu_node)
set_optimum_cpu_residency(avail->data, avail->idx, false);
else
set_optimum_cluster_residency(avail->data, false);
return ret ? ret : len;
}
static int create_lvl_avail_nodes(const char *name,
struct kobject *parent, struct lpm_level_avail *avail)
struct kobject *parent, struct lpm_level_avail *avail,
void *data, int index, bool cpu_node)
{
struct attribute_group *attr_group = NULL;
struct attribute **attr = NULL;
@ -137,6 +207,9 @@ static int create_lvl_avail_nodes(const char *name,
avail->idle_enabled = true;
avail->suspend_enabled = true;
avail->kobj = kobj;
avail->data = data;
avail->idx = index;
avail->cpu_node = cpu_node;
return ret;
@ -179,7 +252,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
for (i = 0; i < p->cpu->nlevels; i++) {
ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
cpu_kobj[cpu_idx], &level_list[i]);
cpu_kobj[cpu_idx], &level_list[i],
(void *)p->cpu, cpu, true);
if (ret)
goto release_kobj;
}
@ -213,7 +287,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
for (i = 0; i < p->nlevels; i++) {
ret = create_lvl_avail_nodes(p->levels[i].level_name,
cluster_kobj, &p->levels[i].available);
cluster_kobj, &p->levels[i].available,
(void *)p, 0, false);
if (ret)
return ret;
}
@ -417,6 +492,9 @@ static int parse_power_params(struct device_node *node,
key = "qcom,time-overhead";
ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
if (ret)
goto fail;
fail:
if (ret)
pr_err("%s(): %s Error reading %s\n", __func__, node->name,
@ -499,6 +577,7 @@ static int parse_cluster_level(struct device_node *node,
key = "parse_power_params";
ret = parse_power_params(node, &level->pwr);
if (ret)
goto failed;
@ -598,11 +677,31 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
return 0;
}
static int calculate_residency(struct power_params *base_pwr,
struct power_params *next_pwr)
{
int32_t residency = (int32_t)(next_pwr->energy_overhead -
base_pwr->energy_overhead) -
((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
if (residency < 0) {
__WARN_printf("%s: Incorrect power attributes for LPM\n",
__func__);
return 0;
}
return residency < base_pwr->time_overhead_us ?
base_pwr->time_overhead_us : residency;
}
static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
{
struct device_node *n;
int ret = -ENOMEM;
int i;
int i, j;
char *key;
c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
@ -649,6 +748,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
l->is_reset = of_property_read_bool(n, "qcom,is-reset");
}
for (i = 0; i < c->cpu->nlevels; i++) {
for (j = 0; j < c->cpu->nlevels; j++) {
if (i >= j) {
c->cpu->levels[i].pwr.residencies[j] = 0;
continue;
}
c->cpu->levels[i].pwr.residencies[j] =
calculate_residency(&c->cpu->levels[i].pwr,
&c->cpu->levels[j].pwr);
pr_err("%s: idx %d %u\n", __func__, j,
c->cpu->levels[i].pwr.residencies[j]);
}
}
return 0;
failed:
for (i = 0; i < c->cpu->nlevels; i++) {
@ -705,6 +820,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
struct device_node *n;
char *key;
int ret = 0;
int i, j;
c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
if (!c)
@ -762,6 +878,16 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
goto failed_parse_cluster;
c->aff_level = 1;
for_each_cpu(i, &c->child_cpus) {
per_cpu(max_residency, i) = devm_kzalloc(
&lpm_pdev->dev,
sizeof(uint32_t) * c->cpu->nlevels,
GFP_KERNEL);
if (!per_cpu(max_residency, i))
return ERR_PTR(-ENOMEM);
set_optimum_cpu_residency(c->cpu, i, true);
}
}
}
@ -770,6 +896,17 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
else
c->last_level = c->nlevels-1;
for (i = 0; i < c->nlevels; i++) {
for (j = 0; j < c->nlevels; j++) {
if (i >= j) {
c->levels[i].pwr.residencies[j] = 0;
continue;
}
c->levels[i].pwr.residencies[j] = calculate_residency(
&c->levels[i].pwr, &c->levels[j].pwr);
}
}
set_optimum_cluster_residency(c, true);
return c;
failed_parse_cluster:

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -303,18 +303,15 @@ static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu, int *index)
{
int best_level = -1;
uint32_t best_level_pwr = ~0U;
uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
dev->cpu);
uint32_t sleep_us =
(uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
uint32_t modified_time_us = 0;
uint32_t next_event_us = 0;
uint32_t pwr;
int i;
uint32_t lvl_latency_us = 0;
uint32_t lvl_overhead_us = 0;
uint32_t lvl_overhead_energy = 0;
uint32_t *residency = get_per_cpu_max_residency(dev->cpu);
if (!cpu)
return -EINVAL;
@ -336,17 +333,13 @@ static int cpu_power_select(struct cpuidle_device *dev,
if (!allow)
continue;
lvl_latency_us = pwr_params->latency_us;
lvl_overhead_us = pwr_params->time_overhead_us;
lvl_overhead_energy = pwr_params->energy_overhead;
if (i > 0 && suspend_in_progress)
continue;
lvl_latency_us = pwr_params->latency_us;
if (latency_us < lvl_latency_us)
continue;
break;
if (next_event_us) {
if (next_event_us < lvl_latency_us)
@ -357,32 +350,15 @@ static int cpu_power_select(struct cpuidle_device *dev,
next_wakeup_us = next_event_us - lvl_latency_us;
}
if (next_wakeup_us <= pwr_params->time_overhead_us)
continue;
/*
* If wakeup time greater than overhead by a factor of 1000
* assume that core steady state power dominates the power
* equation
*/
if ((next_wakeup_us >> 10) > lvl_overhead_us) {
pwr = pwr_params->ss_power;
} else {
pwr = pwr_params->ss_power;
pwr -= (lvl_overhead_us * pwr_params->ss_power) /
next_wakeup_us;
pwr += pwr_params->energy_overhead / next_wakeup_us;
}
if (best_level_pwr >= pwr) {
if (next_wakeup_us <= residency[i]) {
best_level = i;
best_level_pwr = pwr;
if (next_event_us && next_event_us < sleep_us &&
(mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
modified_time_us
= next_event_us - lvl_latency_us;
else
modified_time_us = 0;
break;
}
}
@ -440,8 +416,6 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
{
int best_level = -1;
int i;
uint32_t best_level_pwr = ~0U;
uint32_t pwr;
struct cpumask mask;
uint32_t latency_us = ~0U;
uint32_t sleep_us;
@ -497,18 +471,9 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
if (level->notify_rpm && msm_rpm_waiting_for_ack())
continue;
if ((sleep_us >> 10) > pwr_params->time_overhead_us) {
pwr = pwr_params->ss_power;
} else {
pwr = pwr_params->ss_power;
pwr -= (pwr_params->time_overhead_us *
pwr_params->ss_power) / sleep_us;
pwr += pwr_params->energy_overhead / sleep_us;
}
if (best_level_pwr >= pwr) {
if (sleep_us <= pwr_params->max_residency) {
best_level = i;
best_level_pwr = pwr;
break;
}
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -27,6 +27,8 @@ struct power_params {
uint32_t ss_power; /* Steady state power */
uint32_t energy_overhead; /* Enter + exit over head */
uint32_t time_overhead_us; /* Enter + exit overhead */
uint32_t residencies[NR_LPM_LEVELS];
uint32_t max_residency;
};
struct lpm_cpu_level {
@ -52,6 +54,9 @@ struct lpm_level_avail {
struct kobject *kobj;
struct kobj_attribute idle_enabled_attr;
struct kobj_attribute suspend_enabled_attr;
void *data;
int idx;
bool cpu_node;
};
struct lpm_cluster_level {
@ -115,7 +120,7 @@ bool lpm_cpu_mode_allow(unsigned int cpu,
unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
unsigned int mode, bool from_idle);
uint32_t *get_per_cpu_max_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;
#ifdef CONFIG_SMP