diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c index 04eddb4f4380..e194f4f34f8b 100644 --- a/arch/arm/mach-highbank/pm.c +++ b/arch/arm/mach-highbank/pm.c @@ -42,12 +42,12 @@ static int highbank_suspend_finish(unsigned long val) static int highbank_pm_enter(suspend_state_t state) { cpu_pm_enter(); - cpu_cluster_pm_enter(); + cpu_cluster_pm_enter(0); highbank_set_cpu_jump(0, cpu_resume); cpu_suspend(0, highbank_suspend_finish); - cpu_cluster_pm_exit(); + cpu_cluster_pm_exit(0); cpu_pm_exit(); highbank_smc1(0x102, 0x1); diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index f98410a257e3..a3e554384dc4 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -124,7 +124,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, */ if ((cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF)) - cpu_cluster_pm_enter(); + cpu_cluster_pm_enter(0); } omap4_enter_lowpower(dev->cpu, cx->cpu_state); @@ -149,7 +149,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, */ if ((cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF)) - cpu_cluster_pm_exit(); + cpu_cluster_pm_exit(0); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c index 45cf52c7e528..e7594fa85066 100644 --- a/arch/arm/mach-tegra/pm.c +++ b/arch/arm/mach-tegra/pm.c @@ -143,13 +143,13 @@ void tegra_idle_lp2_last(void) { tegra_pmc_pm_set(TEGRA_SUSPEND_LP2); - cpu_cluster_pm_enter(); + cpu_cluster_pm_enter(0); suspend_cpu_complex(); cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu); restore_cpu_complex(); - cpu_cluster_pm_exit(); + cpu_cluster_pm_exit(0); } enum tegra_suspend_mode tegra_pm_validate_suspend_mode( diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index b737381f95c9..5926916fce5f 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -655,6 +655,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, list_add(&child->list, &c->child); cpumask_or(&c->child_cpus, &c->child_cpus, &child->child_cpus); + c->aff_level = child->aff_level + 1; continue; } @@ -670,6 +671,8 @@ struct lpm_cluster *parse_cluster(struct device_node *node, if (parse_cpu_levels(n, c)) goto failed_parse_cluster; + + c->aff_level = 1; } } diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 5eac8bd60a19..c32327198421 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -473,9 +473,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, /* * Notify that the cluster is entering a low power mode */ - if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE) { - cpu_cluster_pm_enter(); - } + if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE) + cpu_cluster_pm_enter(cluster->aff_level); } if (level->notify_rpm) { struct cpumask nextcpu; @@ -623,9 +622,8 @@ static void cluster_unprepare(struct lpm_cluster *cluster, BUG_ON(ret); if (cluster->levels[last_level].mode[i] == - MSM_SPM_MODE_POWER_COLLAPSE) { - cpu_cluster_pm_exit(); - } + MSM_SPM_MODE_POWER_COLLAPSE) + cpu_cluster_pm_exit(cluster->aff_level); } unlock_return: spin_unlock(&cluster->sync_lock); diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h index 7daf0ebb7305..04e46c7cebab 100644 --- a/drivers/cpuidle/lpm-levels.h +++ b/drivers/cpuidle/lpm-levels.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -71,6 +71,7 @@ struct lpm_cluster { struct list_head child; const char *cluster_name; const char **name; + unsigned long aff_level; /* Affinity level of the node */ struct low_power_ops *lpm_dev; int ndevices; struct lpm_cluster_level levels[NR_LPM_LEVELS]; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index ac7b6947b6bf..cc8bc0721b08 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -812,7 +812,8 @@ static void gic_cpu_restore(unsigned int gic_nr) writel_relaxed_no_log(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL); } -static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) +static int gic_notifier(struct notifier_block *self, unsigned long cmd, + void *aff_level) { int i; @@ -831,11 +832,20 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) gic_cpu_restore(i); break; case CPU_CLUSTER_PM_ENTER: - gic_dist_save(i); + /* + * Affinity level of the node + * eg: + * cpu level = 0 + * l2 level = 1 + * cci level = 2 + */ + if (!(unsigned long)aff_level) + gic_dist_save(i); break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT: - gic_dist_restore(i); + if (!(unsigned long)aff_level) + gic_dist_restore(i); break; } } diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h index 455b233dd3b1..91117bcc665a 100644 --- a/include/linux/cpu_pm.h +++ b/include/linux/cpu_pm.h @@ -71,8 +71,8 @@ int cpu_pm_register_notifier(struct notifier_block *nb); int cpu_pm_unregister_notifier(struct notifier_block *nb); int cpu_pm_enter(void); int cpu_pm_exit(void); -int cpu_cluster_pm_enter(void); -int cpu_cluster_pm_exit(void); +int cpu_cluster_pm_enter(unsigned long aff_level); +int cpu_cluster_pm_exit(unsigned long aff_level); #else @@ -96,12 +96,12 @@ static inline int cpu_pm_exit(void) return 0; } -static inline int cpu_cluster_pm_enter(void) +static inline int cpu_cluster_pm_enter(unsigned long aff_level) { return 0; } -static inline int cpu_cluster_pm_exit(void) +static inline int cpu_cluster_pm_exit(unsigned long aff_level) { return 0; } diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index 9656a3c36503..22211e754eee 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -25,11 +25,12 @@ static DEFINE_RWLOCK(cpu_pm_notifier_lock); static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain); -static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) +static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls, + void *data) { int ret; - ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, + ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, data, nr_to_call, nr_calls); return notifier_to_errno(ret); @@ -101,13 +102,13 @@ int cpu_pm_enter(void) int ret = 0; read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); + ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls, NULL); if (ret) /* * Inform listeners (nr_calls - 1) about failure of CPU PM * PM entry who are notified earlier to prepare for it. */ - cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); + cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; @@ -131,7 +132,7 @@ int cpu_pm_exit(void) int ret; read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL); + ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL, NULL); read_unlock(&cpu_pm_notifier_lock); return ret; @@ -154,19 +155,21 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); * * Return conditions are same as __raw_notifier_call_chain. */ -int cpu_cluster_pm_enter(void) +int cpu_cluster_pm_enter(unsigned long aff_level) { int nr_calls; int ret = 0; read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); + ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls, + (void *) aff_level); if (ret) /* * Inform listeners (nr_calls - 1) about failure of CPU cluster * PM entry who are notified earlier to prepare for it. */ - cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); + cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL, + (void *) aff_level); read_unlock(&cpu_pm_notifier_lock); return ret; @@ -188,12 +191,12 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); * * Return conditions are same as __raw_notifier_call_chain. */ -int cpu_cluster_pm_exit(void) +int cpu_cluster_pm_exit(unsigned long aff_level) { int ret; read_lock(&cpu_pm_notifier_lock); - ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); + ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL, (void *) aff_level); read_unlock(&cpu_pm_notifier_lock); return ret; @@ -209,13 +212,13 @@ static int cpu_pm_suspend(void) if (ret) return ret; - ret = cpu_cluster_pm_enter(); + ret = cpu_cluster_pm_enter(0); return ret; } static void cpu_pm_resume(void) { - cpu_cluster_pm_exit(); + cpu_cluster_pm_exit(0); cpu_pm_exit(); }