mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
Perf: Re-enable counters after power collapse
Counters need to be individually re-enabled after the CPU comes out of power collapse. Without this the counters will simply be set to their MAX period and starting the PMU will have no effect. Change-Id: I3988a45277057eb80cf580b90ce697d0e6a00c43 Signed-off-by: Ashwin Chaugule <ashwinc@codeaurora.org>
This commit is contained in:
parent
5ec4f5b115
commit
fe77161b14
2 changed files with 27 additions and 5 deletions
|
@ -108,6 +108,12 @@ struct arm_pmu {
|
|||
enum arm_pmu_type type;
|
||||
cpumask_t active_irqs;
|
||||
const char *name;
|
||||
int num_events;
|
||||
atomic_t active_events;
|
||||
struct mutex reserve_mutex;
|
||||
u64 max_period;
|
||||
struct platform_device *plat_device;
|
||||
u32 from_idle;
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
int (*request_pmu_irq)(int irq, irq_handler_t *irq_h);
|
||||
void (*free_pmu_irq)(int irq);
|
||||
|
@ -123,11 +129,6 @@ struct arm_pmu {
|
|||
void (*stop)(void);
|
||||
void (*reset)(void *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
int num_events;
|
||||
atomic_t active_events;
|
||||
struct mutex reserve_mutex;
|
||||
u64 max_period;
|
||||
struct platform_device *plat_device;
|
||||
struct pmu_hw_events *(*get_hw_events)(void);
|
||||
int (*test_set_event_constraints)(struct perf_event *event);
|
||||
int (*clear_event_constraints)(struct perf_event *event);
|
||||
|
|
|
@ -600,6 +600,21 @@ static void armpmu_enable(struct pmu *pmu)
|
|||
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||
struct pmu_hw_events *hw_events = armpmu->get_hw_events();
|
||||
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
||||
int idx;
|
||||
|
||||
if (armpmu->from_idle) {
|
||||
for (idx = 0; idx <= cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = hw_events->events[idx];
|
||||
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
armpmu->enable(&event->hw, idx, event->cpu);
|
||||
}
|
||||
|
||||
/* Reset bit so we don't needlessly re-enable counters.*/
|
||||
armpmu->from_idle = 0;
|
||||
}
|
||||
|
||||
if (enabled)
|
||||
armpmu->start();
|
||||
|
@ -716,6 +731,7 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
|
|||
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
||||
* junk values out of them.
|
||||
*/
|
||||
|
||||
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -785,6 +801,11 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
|||
case CPU_PM_ENTER_FAILED:
|
||||
case CPU_PM_EXIT:
|
||||
if (cpu_has_active_perf() && cpu_pmu->reset) {
|
||||
/*
|
||||
* Flip this bit so armpmu_enable knows it needs
|
||||
* to re-enable active counters.
|
||||
*/
|
||||
cpu_pmu->from_idle = 1;
|
||||
cpu_pmu->reset(NULL);
|
||||
perf_pmu_enable(&cpu_pmu->pmu);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue