timer/hrtimer: reprogram remote CPU's timer hardware

It's possible for timers to be added to a remote cpu's timer list
without reprogramming its timer hardware to take note of new timer.
This can cause unnecessary latency for timer to be serviced. Fix the
problem by sending an IPI to remote cpu when a timer is enqueued on
remote cpu that expires before any other existing timer on the cpu.

Change-Id: I69f6a267bb65e558ec35f802d678a67ba1ee1c29
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2015-03-04 17:59:41 +05:30 committed by Pavankumar Kondeti
parent 241c4c9ed5
commit 1c68c8be81
3 changed files with 74 additions and 9 deletions

View File

@ -165,6 +165,7 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
* @cpu: this cpu (to which the base is associated with)
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set: Indicates that clock was set from irq context.
* @expires_next: absolute time of the next event which was scheduled
@ -180,6 +181,7 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
int cpu;
unsigned int active_bases;
unsigned int clock_was_set;
#ifdef CONFIG_HIGH_RES_TIMERS

View File

@ -49,6 +49,7 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/irq_work.h>
#include <asm/uaccess.h>
@ -729,6 +730,20 @@ static void retrigger_next_event(void *arg)
raw_spin_unlock(&base->lock);
}
#ifdef CONFIG_SMP
static void raise_hrtimer_softirq(struct irq_work *arg)
{
if (!hrtimer_hres_active())
return;
raise_softirq(HRTIMER_SOFTIRQ);
}
static DEFINE_PER_CPU(struct irq_work, hrtimer_kick_work) = {
.func = raise_hrtimer_softirq,
};
#endif
/*
* Switch to high resolution mode
*/
@ -793,6 +808,22 @@ static inline void retrigger_next_event(void *arg) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
#ifdef CONFIG_SMP
static void kick_remote_cpu(int cpu)
{
get_cpu();
if (cpu_online(cpu))
irq_work_queue_on(&per_cpu(hrtimer_kick_work, cpu), cpu);
put_cpu();
}
#else
static inline void kick_remote_cpu(int cpu) { }
#endif
/*
* Clock realtime was set
*
@ -1009,7 +1040,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
int ret, leftmost;
int ret, leftmost, kick = 0, cpu;
base = lock_hrtimer_base(timer, &flags);
@ -1039,11 +1070,13 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
leftmost = enqueue_hrtimer(timer, new_base);
cpu = new_base->cpu_base->cpu;
kick = (leftmost && (cpu != smp_processor_id()));
/*
* Only allow reprogramming if the new base is on this CPU.
* (it might still be on another CPU if the timer was pending)
*
* XXX send_remote_softirq() ?
*/
if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
&& hrtimer_enqueue_reprogram(timer, new_base)) {
@ -1063,6 +1096,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unlock_hrtimer_base(timer, &flags);
if (kick)
kick_remote_cpu(cpu);
return ret;
}
@ -1677,6 +1713,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
cpu_base->cpu = cpu;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);

View File

@ -386,17 +386,23 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
list_add_tail(&timer->entry, vec);
}
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
static int internal_add_timer(struct tvec_base *base, struct timer_list *timer)
{
int leftmost = 0;
__internal_add_timer(base, timer);
/*
* Update base->active_timers and base->next_timer
*/
if (!tbase_get_deferrable(timer->base)) {
if (time_before(timer->expires, base->next_timer))
if (time_before(timer->expires, base->next_timer)) {
leftmost = 1;
base->next_timer = timer->expires;
}
base->active_timers++;
}
return leftmost;
}
#ifdef CONFIG_TIMER_STATS
@ -739,7 +745,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
{
struct tvec_base *base, *new_base;
unsigned long flags;
int ret = 0 , cpu;
int ret = 0, cpu, leftmost;
timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
@ -752,10 +758,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
cpu = smp_processor_id();
#ifdef CONFIG_SMP
if (base != tvec_base_deferral) {
#endif
cpu = smp_processor_id();
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
if (!pinned && get_sysctl_timer_migration())
@ -785,7 +792,23 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
#endif
timer->expires = expires;
internal_add_timer(base, timer);
leftmost = internal_add_timer(base, timer);
#ifdef CONFIG_SCHED_HMP
/*
* Check whether the other CPU is in dynticks mode and needs
* to be triggered to reevaluate the timer wheel.
* We are protected against the other CPU fiddling
* with the timer by holding the timer base lock. This also
* makes sure that a CPU on the way to stop its tick can not
* evaluate the timer wheel.
*
* This test is needed for only CONFIG_SCHED_HMP, as !CONFIG_SCHED_HMP
* selects non-idle cpu as target of timer migration.
*/
if (cpu != smp_processor_id() && leftmost)
wake_up_nohz_cpu(cpu);
#endif
out_unlock:
spin_unlock_irqrestore(&base->lock, flags);
@ -944,13 +967,15 @@ void add_timer_on(struct timer_list *timer, int cpu)
{
struct tvec_base *base = per_cpu(tvec_bases, cpu);
unsigned long flags;
int leftmost;
timer_stats_timer_set_start_info(timer);
BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
leftmost = internal_add_timer(base, timer);
/*
* Check whether the other CPU is in dynticks mode and needs
* to be triggered to reevaluate the timer wheel.
@ -959,7 +984,8 @@ void add_timer_on(struct timer_list *timer, int cpu)
* makes sure that a CPU on the way to stop its tick can not
* evaluate the timer wheel.
*/
wake_up_nohz_cpu(cpu);
if (leftmost)
wake_up_nohz_cpu(cpu);
spin_unlock_irqrestore(&base->lock, flags);
}
EXPORT_SYMBOL_GPL(add_timer_on);