Perf: arm64: fix disable of pmu irq during hotplug

PMU irq is disabled when a cpu is hotplugged off
and perf is running. Using cpu_pmu->active_events
to determine if the pmu is running left a window
where it is decremented to 0 in hw_perf_event_destroy,
and then armpmu_release_hardware is called. If a cpu
is hotplugged off in this window it may not disable
its irq. Use a separate flag which is not cleared
until after the irq is released by all online cpus.

The variable needs to be tristate because of the possibility
of a cpu being hotplugged in during this window. In that
case it should not enable its irq when the pmu is being
shut down. Having the GOING_DOWN state allows correct
behavior for cpus both going down and coming up.

Change-Id: I934ba5dec34e681ce8defd7fa7e311b4a2a92c1a
Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
This commit is contained in:
Neil Leeder 2014-08-29 14:30:57 -04:00
parent 3311f65844
commit fd64280eb5
3 changed files with 20 additions and 4 deletions

View file

@ -21,6 +21,12 @@
#ifdef CONFIG_HW_PERF_EVENTS
enum arm_pmu_state {
ARM_PMU_STATE_OFF = 0,
ARM_PMU_STATE_GOING_DOWN,
ARM_PMU_STATE_RUNNING,
};
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
@ -64,6 +70,7 @@ struct arm_pmu {
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
int pmu_state;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;

View file

@ -34,6 +34,7 @@ static char *descriptions =
" 9 Perf: arm64: make request irq pmu-dependent\n"
"10 Perf: arm64: tracectr: initialize counts after hotplug\n"
"11 Perf: arm64: Refine disable/enable in tracecounters\n"
"12 Perf: arm64: fix disable of pmu irq during hotplug\n"
;
static ssize_t desc_read(struct file *fp, char __user *buf,

View file

@ -440,6 +440,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
armpmu_release_hardware(armpmu);
return err;
}
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
return 0;
}
@ -1146,6 +1149,12 @@ static void armv8pmu_free_irq(struct arm_pmu *cpu_pmu)
if (irq <= 0)
return;
/*
* If a cpu comes online during this function, do not enable its irq.
* If a cpu goes offline, it should disable its irq.
*/
cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
if (irq_is_percpu(irq)) {
if (msm_pmu_use_irq) {
on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
@ -1161,6 +1170,7 @@ static void armv8pmu_free_irq(struct arm_pmu *cpu_pmu)
free_irq(irq, cpu_pmu);
}
}
cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}
irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
@ -1465,7 +1475,6 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
struct pmu *pmu;
u64 lcpu = (u64)hcpu;
int cpu = (int)lcpu;
int perf_running;
unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
int ret = NOTIFY_DONE;
@ -1479,13 +1488,12 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
if (!cpu_pmu)
return ret;
perf_running = atomic_read(&cpu_pmu->active_events);
switch (masked_action) {
case CPU_DOWN_PREPARE:
if (cpu_pmu->save_pm_registers)
smp_call_function_single(cpu,
cpu_pmu->save_pm_registers, hcpu, 1);
if (perf_running) {
if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
if (cpu_has_active_perf(cpu))
smp_call_function_single(cpu,
armpmu_update_counters, NULL, 1);
@ -1504,7 +1512,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
cpu_pmu->reset(NULL);
if (cpu_pmu->restore_pm_registers)
cpu_pmu->restore_pm_registers(hcpu);
if (perf_running) {
if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
/* Arm the PMU IRQ before appearing. */
if (msm_pmu_use_irq && cpu_pmu->plat_device) {
irq = platform_get_irq(cpu_pmu->plat_device, 0);