mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
tick: Dynamically set broadcast irq affinity
When a cpu goes to a deep idle state where its local timer is shutdown, it notifies the time frame work to use the broadcast timer instead. Unfortunately, the broadcast device could wake up any CPU, including an idle one which is not concerned by the wake up at all. So in the worst case an idle CPU will wake up to send an IPI to the CPU whose timer expired. Provide an opt-in feature CLOCK_EVT_FEAT_DYNIRQ which tells the core that is should set the interrupt affinity of the broadcast interrupt to the cpu which has the earliest expiry time. This avoids unnecessary spurious wakeups and IPIs. [ tglx: Adopted to cpumask rework, silenced an uninitialized warning, massaged changelog ] Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: viresh.kumar@linaro.org Cc: jacob.jun.pan@linux.intel.com Cc: linux-arm-kernel@lists.infradead.org Cc: santosh.shilimkar@ti.com Cc: linaro-kernel@lists.linaro.org Cc: patches@linaro.org Cc: rickard.andersson@stericsson.com Cc: vincent.guittot@linaro.org Cc: linus.walleij@stericsson.com Cc: john.stultz@linaro.org Link: http://lkml.kernel.org/r/1362219013-18173-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Change-Id: I6a880a39dd595526b80a6d72b88be74163513da9 Signed-off-by: Karthik Parsha <kparsha@codeaurora.org> Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org>
This commit is contained in:
parent
0b5c47ff0b
commit
062f9b42cd
3 changed files with 37 additions and 9 deletions
|
@ -55,6 +55,11 @@ enum clock_event_nofitiers {
|
|||
#define CLOCK_EVT_FEAT_C3STOP 0x000008
|
||||
#define CLOCK_EVT_FEAT_DUMMY 0x000010
|
||||
|
||||
/*
|
||||
* Core shall set the interrupt affinity dynamically in broadcast mode
|
||||
*/
|
||||
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
|
||||
|
||||
/**
|
||||
* struct clock_event_device - clock event device descriptor
|
||||
* @event_handler: Assigned by the framework to be called by the low
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
|
||||
|
|
|
@ -370,14 +370,34 @@ struct cpumask *tick_get_broadcast_oneshot_mask(void)
|
|||
return to_cpumask(tick_broadcast_oneshot_mask);
|
||||
}
|
||||
|
||||
static int tick_broadcast_set_event(ktime_t expires, int force)
|
||||
/*
|
||||
* Set broadcast interrupt affinity
|
||||
*/
|
||||
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
|
||||
const struct cpumask *cpumask)
|
||||
{
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
|
||||
return;
|
||||
|
||||
if (cpumask_equal(bc->cpumask, cpumask))
|
||||
return;
|
||||
|
||||
bc->cpumask = cpumask;
|
||||
irq_set_affinity(bc->irq, bc->cpumask);
|
||||
}
|
||||
|
||||
static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
|
||||
ktime_t expires, int force)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
|
||||
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
|
||||
|
||||
return clockevents_program_event(bc, expires, force);
|
||||
ret = clockevents_program_event(bc, expires, force);
|
||||
if (!ret)
|
||||
tick_broadcast_set_affinity(bc, cpumask_of(cpu));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||
|
@ -406,7 +426,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
|
|||
{
|
||||
struct tick_device *td;
|
||||
ktime_t now, next_event;
|
||||
int cpu;
|
||||
int cpu, next_cpu = 0;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
again:
|
||||
|
@ -417,10 +437,12 @@ again:
|
|||
/* Find all expired events */
|
||||
for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
|
||||
td = &per_cpu(tick_cpu_device, cpu);
|
||||
if (td->evtdev->next_event.tv64 <= now.tv64)
|
||||
if (td->evtdev->next_event.tv64 <= now.tv64) {
|
||||
cpumask_set_cpu(cpu, to_cpumask(tmpmask));
|
||||
else if (td->evtdev->next_event.tv64 < next_event.tv64)
|
||||
} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
|
||||
next_event.tv64 = td->evtdev->next_event.tv64;
|
||||
next_cpu = cpu;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -443,7 +465,7 @@ again:
|
|||
* Rearm the broadcast device. If event expired,
|
||||
* repeat the above
|
||||
*/
|
||||
if (tick_broadcast_set_event(next_event, 0))
|
||||
if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
|
||||
goto again;
|
||||
}
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
|
@ -486,7 +508,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
|||
cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
|
||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
|
||||
if (dev->next_event.tv64 < bc->next_event.tv64)
|
||||
tick_broadcast_set_event(dev->next_event, 1);
|
||||
tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
|
||||
}
|
||||
} else {
|
||||
if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
|
||||
|
@ -555,7 +577,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
|||
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
|
||||
tick_broadcast_init_next_event(to_cpumask(tmpmask),
|
||||
tick_next_period);
|
||||
tick_broadcast_set_event(tick_next_period, 1);
|
||||
tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
|
||||
} else
|
||||
bc->next_event.tv64 = KTIME_MAX;
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue