mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-09-21 12:10:52 +00:00
sched, rt: Update rq clock when unthrottling of an otherwise idle CPU
If an RT task is awakened while it's rt_rq is throttled, the time between wakeup/enqueue and unthrottle/selection may be accounted as rt_time if the CPU is idle. Set rq->skip_clock_update negative upon throttle release to tell put_prev_task() that we need a clock update. Reported-by: Thomas Giesel <skoe@directbox.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1304059010.7472.1.camel@marge.simson.net Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3e51e3edfd
commit
61eadef6a9
|
@ -466,7 +466,7 @@ struct rq {
|
||||||
u64 nohz_stamp;
|
u64 nohz_stamp;
|
||||||
unsigned char nohz_balance_kick;
|
unsigned char nohz_balance_kick;
|
||||||
#endif
|
#endif
|
||||||
unsigned int skip_clock_update;
|
int skip_clock_update;
|
||||||
|
|
||||||
/* capture load from *all* tasks on this cpu: */
|
/* capture load from *all* tasks on this cpu: */
|
||||||
struct load_weight load;
|
struct load_weight load;
|
||||||
|
@ -652,7 +652,7 @@ static void update_rq_clock(struct rq *rq)
|
||||||
{
|
{
|
||||||
s64 delta;
|
s64 delta;
|
||||||
|
|
||||||
if (rq->skip_clock_update)
|
if (rq->skip_clock_update > 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
|
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
|
||||||
|
@ -4127,7 +4127,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
||||||
|
|
||||||
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
if (prev->on_rq)
|
if (prev->on_rq || rq->skip_clock_update < 0)
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
prev->sched_class->put_prev_task(rq, prev);
|
prev->sched_class->put_prev_task(rq, prev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -562,6 +562,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||||
rt_rq->rt_throttled = 0;
|
rt_rq->rt_throttled = 0;
|
||||||
enqueue = 1;
|
enqueue = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Force a clock update if the CPU was idle,
|
||||||
|
* lest wakeup -> unthrottle time accumulate.
|
||||||
|
*/
|
||||||
|
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
|
||||||
|
rq->skip_clock_update = -1;
|
||||||
}
|
}
|
||||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||||
idle = 0;
|
idle = 0;
|
||||||
|
|
Loading…
Reference in a new issue