cpuidle: lpm-levels: Add support for PSCI

Add support to terminate all low power modes in PSCI. The lpm-levels will
work with version 1.0 of PSCI specification using the OS initiated scheme.
The lpm-levels driver would determine the last man standing and vote into
TZ accordingly.

Change-Id: I7cbe2ded36ab320334dcf3f971fd6d4a36a881bf
Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org>
Signed-off-by: Archana Sathyakumar <asathyak@codeaurora.org>
Signed-off-by: Murali Nalajala <mnalajal@codeaurora.org>
Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
This commit is contained in:
Mahesh Sivasubramanian 2015-04-07 17:08:39 -06:00 committed by Maulik Shah
parent 20bdf764bf
commit b9c0e7b60f
4 changed files with 269 additions and 64 deletions

View File

@ -24,7 +24,7 @@ Required properties:
- qcom,spm-device-names: List of SPM device names which control the
low power modes for this driver. The lpm driver uses the device name
to obtain a handle to the SPM driver that controls the cluster's low
power mode.
power mode. This is only required if "qcom,use-psci" is not defined.
- qcom,default-level: The default low power level that a cluster is
programmed. The SPM of the corresponding device is configured at this
low power mode by default.
@ -73,6 +73,18 @@ Required properties:
there is 1 online core.
- qcom,disable-dynamic-int-routing: When set disables the dynamic
routing of rpm-smd and mpm interrupts to next wake up core.
- qcom,use-psci: This boolean property allows the LPM modules to
terminate in PSCI to configure SPM for low power modes.
- qcom,psci-mode-shift: The property is used to determine with bit
location of the cluster mode in the composite state ID used to define
cluster low power modes in PSCI v1.0. Required only if qcom,use-psci
is defined at the lpm-levels root node.
- qcom,psci-mode-mask: The property is used to determine with bit
mask of the cluster mode in the composite state ID used to define
cluster low power modes in PSCI v1.0. Required only if qcom,use-psci
is defined at the lpm-levels root node.
- qcom,psci-mode: ID to be passed into the PSCI firmware. Required
only if qcom,use-psci is defined at the lpm-levels root node.
[Node bindings for qcom,pm-cpu]
qcom,pm-cpu contains the low power modes that a cpu could enter. Currently it
@ -100,6 +112,15 @@ qcom,pm-cpu-levels.
power collapse and the cpu relies on Broadcast timer for scheduled
wakeups. Required only for states where the CPUs internal timer state
is lost.
- qcom,cpu-is-reset: This boolean property map to "power state" bit in
PSCI state_id configuration. This property will tell whether CPU get
reset for a particular LPM are not.
Optional properties:
- qcom,psci-mode-shift: Same as cluster level fields.
- qcom,psci-mode-mask: Same as cluster level fields.
- qcom,psci-cpu-mode: ID to be passed into PSCI firmware.
- qcom,cpu-is-reset: Whether CPU get reset are not.
[Example dts]

View File

@ -21,6 +21,7 @@
#include <linux/moduleparam.h>
#include "lpm-levels.h"
bool use_psci;
enum lpm_type {
IDLE = 0,
SUSPEND,
@ -147,7 +148,7 @@ failed:
static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
{
int cpu;
int i, j, cpu_idx;
int i, cpu_idx;
struct kobject **cpu_kobj = NULL;
struct lpm_level_avail *level_list = NULL;
char cpu_name[20] = {0};
@ -168,25 +169,16 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
}
level_list = devm_kzalloc(&lpm_pdev->dev,
MSM_PM_SLEEP_MODE_NR * sizeof(*level_list),
p->cpu->nlevels * sizeof(*level_list),
GFP_KERNEL);
if (!level_list) {
ret = -ENOMEM;
goto release_kobj;
}
for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
for (j = 0; j < p->cpu->nlevels; j++)
if (p->cpu->levels[j].mode == i)
break;
if (j == p->cpu->nlevels) {
/* Level not defined in DT */
level_list[i].idle_enabled = false;
level_list[i].suspend_enabled = false;
continue;
}
for (i = 0; i < p->cpu->nlevels; i++) {
ret = create_lvl_avail_nodes(p->cpu->levels[j].name,
ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
cpu_kobj[cpu_idx], &level_list[i]);
if (ret)
goto release_kobj;
@ -199,8 +191,7 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
return ret;
release_kobj:
j = cpumask_weight(&p->child_cpus);
for (i = 0; i < j; i++)
for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
kobject_put(cpu_kobj[i]);
return ret;
@ -243,15 +234,15 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
}
bool lpm_cpu_mode_allow(unsigned int cpu,
unsigned int mode, bool from_idle)
unsigned int index, bool from_idle)
{
struct lpm_level_avail *avail = cpu_level_available[cpu];
if (!lpm_pdev || !avail)
return !from_idle;
return !!(from_idle ? avail[mode].idle_enabled :
avail[mode].suspend_enabled);
return !!(from_idle ? avail[index].idle_enabled :
avail[index].suspend_enabled);
}
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
@ -266,7 +257,8 @@ bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
avail->suspend_enabled);
}
static int parse_cluster_params(struct device_node *node, struct lpm_cluster *c)
static int parse_legacy_cluster_params(struct device_node *node,
struct lpm_cluster *c)
{
int i;
char *key;
@ -282,12 +274,6 @@ static int parse_cluster_params(struct device_node *node, struct lpm_cluster *c)
{"cbf", set_system_mode},
};
key = "label";
ret = of_property_read_string(node, key, &c->cluster_name);
if (ret) {
pr_err("%s(): Cannot read required param %s\n", __func__, key);
return ret;
}
key = "qcom,spm-device-names";
c->ndevices = of_property_count_strings(node, key);
@ -351,6 +337,40 @@ failed:
return ret;
}
static int parse_cluster_params(struct device_node *node,
struct lpm_cluster *c)
{
char *key;
int ret;
key = "label";
ret = of_property_read_string(node, key, &c->cluster_name);
if (ret) {
pr_err("%s(): Cannot read required param %s\n", __func__, key);
return ret;
}
if (use_psci) {
key = "qcom,psci-mode-shift";
ret = of_property_read_u32(node, key,
&c->psci_mode_shift);
if (ret) {
pr_err("%s(): Failed to read param: %s\n",
__func__, key);
return ret;
}
key = "qcom,psci-mode-mask";
ret = of_property_read_u32(node, key,
&c->psci_mode_mask);
if (ret)
pr_err("%s(): Failed to read param: %s\n",
__func__, key);
return ret;
} else
return parse_legacy_cluster_params(node, c);
}
static int parse_lpm_mode(const char *str)
{
int i;
@ -404,18 +424,34 @@ static int parse_cluster_level(struct device_node *node,
int i = 0;
struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
int ret = -ENOMEM;
char key[25] = {0};
char *key;
if (!cluster->no_saw_devices) {
key = "label";
ret = of_property_read_string(node, key, &level->level_name);
if (ret)
goto failed;
if (use_psci) {
char *k = "qcom,psci-mode";
ret = of_property_read_u32(node, k, &level->psci_id);
if (ret)
goto failed;
} else if (!cluster->no_saw_devices) {
key = "no saw-devices";
level->mode = devm_kzalloc(&lpm_pdev->dev,
cluster->ndevices * sizeof(*level->mode),
GFP_KERNEL);
if (!level->mode)
if (!level->mode) {
pr_err("Memory allocation failed\n");
goto failed;
}
for (i = 0; i < cluster->ndevices; i++) {
const char *spm_mode;
char key[25] = {0};
snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
ret = of_property_read_string(node, key, &spm_mode);
@ -428,11 +464,13 @@ static int parse_cluster_level(struct device_node *node,
}
}
ret = of_property_read_string(node, "label", &level->level_name);
key = "label";
ret = of_property_read_string(node, key, &level->level_name);
if (ret)
goto failed;
if (cluster->nlevels != cluster->default_level) {
key = "min child idx";
ret = of_property_read_u32(node, "qcom,min-child-idx",
&level->min_child_level);
if (ret)
@ -448,6 +486,7 @@ static int parse_cluster_level(struct device_node *node,
level->last_core_only = of_property_read_bool(node,
"qcom,last-core-only");
key = "parse_power_params";
ret = parse_power_params(node, &level->pwr);
if (ret)
goto failed;
@ -455,13 +494,13 @@ static int parse_cluster_level(struct device_node *node,
cluster->nlevels++;
return 0;
failed:
pr_err("Failed %s() ret = %d\n", __func__, ret);
pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
kfree(level->mode);
level->mode = NULL;
return ret;
}
static int parse_cpu_mode(const char *mode_name)
static int parse_cpu_spm_mode(const char *mode_name)
{
struct lpm_lookup_table pm_sm_lookup[] = {
{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
@ -485,6 +524,37 @@ static int parse_cpu_mode(const char *mode_name)
return ret;
}
static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
{
char *key;
int ret;
key = "qcom,spm-cpu-mode";
ret = of_property_read_string(n, key, &l->name);
if (ret) {
pr_err("Failed %s %d\n", n->name, __LINE__);
return ret;
}
if (use_psci) {
key = "qcom,psci-cpu-mode";
ret = of_property_read_u32(n, key, &l->psci_id);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
n->name);
return ret;
}
} else {
l->mode = parse_cpu_spm_mode(l->name);
if (l->mode < 0)
return l->mode;
}
return 0;
}
static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
{
struct device_node *cpu_node;
@ -531,27 +601,39 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
struct device_node *n;
int ret = -ENOMEM;
int i;
char *key;
c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
if (!c->cpu)
return ret;
c->cpu->parent = c;
if (use_psci) {
key = "qcom,psci-mode-shift";
ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
n->name);
return ret;
}
key = "qcom,psci-mode-mask";
ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
if (ret) {
pr_err("Failed reading %s on device %s\n", key,
n->name);
return ret;
}
}
for_each_child_of_node(node, n) {
struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
char *key = "qcom,spm-cpu-mode";
c->cpu->nlevels++;
ret = of_property_read_string(n, key, &l->name);
if (ret) {
pr_info("Failed %s %d\n", n->name, __LINE__);
goto failed;
}
l->mode = parse_cpu_mode(l->name);
if (l->mode < 0) {
ret = parse_cpu_mode(n, l);
if (ret < 0) {
pr_info("Failed %s\n", l->name);
goto failed;
}
@ -563,6 +645,8 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
key = "qcom,use-broadcast-timer";
l->use_bc_timer = of_property_read_bool(n, key);
key = "qcom,cpu-is-reset";
l->is_reset = of_property_read_bool(n, key);
}
return 0;
failed:
@ -641,7 +725,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
continue;
key = "qcom,pm-cluster-level";
if (!of_node_cmp(n->name, key)) {
WARN_ON(c->no_saw_devices);
WARN_ON(!use_psci && c->no_saw_devices);
if (parse_cluster_level(n, c))
goto failed_parse_cluster;
continue;
@ -651,7 +735,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
if (!of_node_cmp(n->name, key)) {
struct lpm_cluster *child;
WARN_ON(c->no_saw_devices);
WARN_ON(!use_psci && c->no_saw_devices);
child = parse_cluster(n, c);
if (!child)
goto failed_parse_cluster;
@ -680,7 +764,10 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
}
}
c->last_level = c->nlevels-1;
if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
c->last_level = c->default_level;
else
c->last_level = c->nlevels-1;
return c;
@ -695,14 +782,17 @@ failed_parse_params:
kfree(c);
return NULL;
}
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
{
struct device_node *top = NULL;
use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top)
if (!top) {
pr_err("Failed to find root node\n");
return ERR_PTR(-ENODEV);
}
lpm_pdev = pdev;
return parse_cluster(top, NULL);

View File

@ -42,6 +42,7 @@
#include <asm/cputype.h>
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
#include <asm/suspend.h>
#include "lpm-levels.h"
#include "lpm-workarounds.h"
#include <trace/events/power.h>
@ -50,6 +51,8 @@
#define SCLK_HZ (32768)
#define SCM_HANDOFF_LOCK_ID "S:7"
#define PSCI_POWER_STATE(reset) (reset << 30)
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
static remote_spinlock_t scm_handoff_lock;
enum {
@ -83,12 +86,13 @@ static struct hrtimer lpm_hrtimer;
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
static int lpm_cpu_callback(struct notifier_block *cpu_nb,
unsigned long action, void *hcpu);
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle);
static void cluster_prepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle);
static struct notifier_block __refdata lpm_cpu_nblk = {
.notifier_call = lpm_cpu_callback,
@ -176,13 +180,17 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb,
struct lpm_cluster *cluster = per_cpu(cpu_cluster, (unsigned int) cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DYING:
cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu),
NR_LPM_LEVELS, false);
break;
case CPU_STARTING:
cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu),
NR_LPM_LEVELS, false);
NR_LPM_LEVELS, false);
break;
case CPU_ONLINE:
smp_call_function_single(cpu, setup_broadcast_timer,
(void *)true, 1);
(void *)true, 1);
break;
default:
break;
@ -271,6 +279,16 @@ int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
}
static int set_device_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
{
if (use_psci)
return 0;
else if (ops && ops->set_mode)
return ops->set_mode(ops, mode, notify_rpm);
else
return -EINVAL;
}
static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu, int *index)
{
@ -303,7 +321,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
enum msm_pm_sleep_mode mode = level->mode;
bool allow;
allow = lpm_cpu_mode_allow(dev->cpu, mode, true);
allow = lpm_cpu_mode_allow(dev->cpu, i, true);
if (!allow)
continue;
@ -508,7 +526,7 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
}
for (i = 0; i < cluster->ndevices; i++) {
ret = cluster->lpm_dev[i].set_mode(&cluster->lpm_dev[i],
ret = set_device_mode(&cluster->lpm_dev[i],
level->mode[i],
level->notify_rpm);
@ -518,8 +536,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
/*
* Notify that the cluster is entering a low power mode
*/
if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE)
if ((level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE) ||
level->is_reset) {
cpu_cluster_pm_enter(cluster->aff_level);
}
}
if (level->notify_rpm) {
struct cpumask nextcpu, *cpumask;
@ -545,7 +565,7 @@ failed_set_mode:
for (i = 0; i < cluster->ndevices; i++) {
int rc = 0;
level = &cluster->levels[cluster->default_level];
rc = cluster->lpm_dev[i].set_mode(&cluster->lpm_dev[i],
rc = set_device_mode(&cluster->lpm_dev[i],
level->mode[i],
level->notify_rpm);
BUG_ON(rc);
@ -654,15 +674,17 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
for (i = 0; i < cluster->ndevices; i++) {
level = &cluster->levels[cluster->default_level];
ret = cluster->lpm_dev[i].set_mode(&cluster->lpm_dev[i],
ret = set_device_mode(&cluster->lpm_dev[i],
level->mode[i],
level->notify_rpm);
BUG_ON(ret);
if (cluster->levels[last_level].mode[i] ==
MSM_SPM_MODE_POWER_COLLAPSE)
if ((cluster->levels[last_level].mode[i] ==
MSM_SPM_MODE_POWER_COLLAPSE) ||
cluster->levels[last_level].is_reset) {
cpu_cluster_pm_exit(cluster->aff_level);
}
}
unlock_return:
spin_unlock(&cluster->sync_lock);
@ -692,7 +714,8 @@ static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
|| (cpu_level->mode ==
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)))
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
|| (cpu_level->is_reset)))
cpu_pm_enter();
}
@ -708,10 +731,65 @@ static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
|| (cpu_level->mode ==
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)))
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
|| cpu_level->is_reset))
cpu_pm_exit();
}
int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
{
int state_id = 0;
if (!cluster)
return 0;
spin_lock(&cluster->sync_lock);
if (!cpumask_equal(&cluster->num_children_in_sync,
&cluster->child_cpus))
goto unlock_and_return;
state_id |= get_cluster_id(cluster->parent, aff_lvl);
if (cluster->last_level != cluster->default_level) {
struct lpm_cluster_level *level
= &cluster->levels[cluster->last_level];
state_id |= (level->psci_id & cluster->psci_mode_mask)
<< cluster->psci_mode_shift;
(*aff_lvl)++;
}
unlock_and_return:
spin_unlock(&cluster->sync_lock);
return state_id;
}
#if !defined(CONFIG_CPU_V7)
bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
{
int affinity_level = 0;
int state_id = get_cluster_id(cluster, &affinity_level);
int power_state = PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
if (!idx) {
wfi();
return 1;
}
state_id |= (power_state | affinity_level
| cluster->cpu->levels[idx].psci_id);
return !cpu_suspend(state_id);
}
#else
bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
{
WARN_ONCE(true, "PSCI cpu_suspend ops not supported on V7\n");
return false;
}
#endif
static int lpm_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
@ -746,8 +824,12 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
if (idx > 0)
update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, 0xdeaffeed,
true);
success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
true);
if (!use_psci)
success = msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode,
true);
else
success = psci_enter_sleep(cluster, idx, true);
if (idx > 0)
update_debug_pc_event(CPU_EXIT, idx, success, 0xdeaffeed,
true);
@ -952,9 +1034,8 @@ static int lpm_suspend_enter(suspend_state_t state)
int idx;
for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
struct lpm_cpu_level *level = &lpm_cpu->levels[idx];
if (lpm_cpu_mode_allow(cpu, level->mode, false))
if (lpm_cpu_mode_allow(cpu, idx, false))
break;
}
if (idx < 0) {
@ -966,7 +1047,11 @@ static int lpm_suspend_enter(suspend_state_t state)
if (idx > 0)
update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
0xdeaffeed, false);
msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
if (!use_psci)
msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
else
psci_enter_sleep(cluster, idx, true);
if (idx > 0)
update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
false);
@ -1127,7 +1212,6 @@ unlock_and_return:
trace_pre_pc_cb(retflag);
remote_spin_lock_rlock_id(&scm_handoff_lock,
REMOTE_SPINLOCK_TID_START + cpu);
spin_unlock(&cluster->sync_lock);
return retflag;
}

View File

@ -15,6 +15,8 @@
#define NR_LPM_LEVELS 8
extern bool use_psci;
struct lpm_lookup_table {
uint32_t modes;
const char *mode_name;
@ -32,11 +34,15 @@ struct lpm_cpu_level {
enum msm_pm_sleep_mode mode;
bool use_bc_timer;
struct power_params pwr;
unsigned int psci_id;
bool is_reset;
};
struct lpm_cpu {
struct lpm_cpu_level levels[NR_LPM_LEVELS];
int nlevels;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
struct lpm_cluster *parent;
};
@ -59,6 +65,8 @@ struct lpm_cluster_level {
bool sync_level;
bool last_core_only;
struct lpm_level_avail available;
unsigned int psci_id;
bool is_reset;
};
struct low_power_ops {
@ -88,6 +96,8 @@ struct lpm_cluster {
struct cpumask num_children_in_sync;
struct lpm_cluster *parent;
struct lpm_stats *stats;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
};