mirror of
https://github.com/S3NEO/android_kernel_samsung_msm8226.git
synced 2024-11-07 03:47:13 +00:00
sched: rt: fix SMP bandwidth balancing for throttled groups
Now we exceed the runtime and get throttled - the period rollover tick will subtract the cpu quota from the runtime and check if we're below quota. However with this cpu having a very small portion of the runtime it will not refresh as fast as it should. Therefore, also rebalance the runtime when we're throttled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Daniel K." <dk@uw.no> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ada18de2eb
commit
b79f3833d8
1 changed files with 29 additions and 12 deletions
|
@ -228,6 +228,28 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static int do_balance_runtime(struct rt_rq *rt_rq);
|
||||||
|
|
||||||
|
static int balance_runtime(struct rt_rq *rt_rq)
|
||||||
|
{
|
||||||
|
int more = 0;
|
||||||
|
|
||||||
|
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
||||||
|
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
|
more = do_balance_runtime(rt_rq);
|
||||||
|
spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return more;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline int balance_runtime(struct rt_rq *rt_rq)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
{
|
{
|
||||||
int i, idle = 1;
|
int i, idle = 1;
|
||||||
|
@ -247,6 +269,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
u64 runtime;
|
u64 runtime;
|
||||||
|
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
|
if (rt_rq->rt_throttled)
|
||||||
|
balance_runtime(rt_rq);
|
||||||
runtime = rt_rq->rt_runtime;
|
runtime = rt_rq->rt_runtime;
|
||||||
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
|
||||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||||
|
@ -267,7 +291,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static int balance_runtime(struct rt_rq *rt_rq)
|
static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||||
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
|
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
|
||||||
|
@ -428,17 +452,10 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||||
if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
|
if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (rt_rq->rt_time > runtime) {
|
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
|
||||||
balance_runtime(rt_rq);
|
balance_runtime(rt_rq);
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
|
||||||
|
|
||||||
runtime = sched_rt_runtime(rt_rq);
|
runtime = sched_rt_runtime(rt_rq);
|
||||||
if (runtime == RUNTIME_INF)
|
if (runtime == RUNTIME_INF)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (rt_rq->rt_time > runtime) {
|
if (rt_rq->rt_time > runtime) {
|
||||||
rt_rq->rt_throttled = 1;
|
rt_rq->rt_throttled = 1;
|
||||||
|
|
Loading…
Reference in a new issue