mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
sched: remove migration notification from RT class
Commit 88a7e37d265 (sched: provide per cpu-cgroup option to notify on migrations) added a notifier call when a task is moved to a different CPU. Unfortunately the two call sites in the RT sched class where this occurs happens with a runqueue lock held. This can result in a deadlock if the notifier call attempts to do something like wake up a task. Fortunately the benefit of 88a7e37d265 comes mainly from notifying on migration of non-RT tasks, so we can simply ignore the movements of RT tasks. CRs-Fixed: 491370 Change-Id: I8849d826bf1eeaf85a6f6ad872acb475247c5926 Signed-off-by: Steve Muckle <smuckle@codeaurora.org> Signed-off-by: Mekala Natarajan <mekalan@codeaurora.org>
This commit is contained in:
parent
2dcb52455c
commit
14e8dbd63a
1 changed files with 1 additions and 19 deletions
|
@ -1604,7 +1604,6 @@ static int push_rt_task(struct rq *rq)
|
||||||
struct task_struct *next_task;
|
struct task_struct *next_task;
|
||||||
struct rq *lowest_rq;
|
struct rq *lowest_rq;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool moved = false;
|
|
||||||
|
|
||||||
if (!rq->rt.overloaded)
|
if (!rq->rt.overloaded)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1674,7 +1673,6 @@ retry:
|
||||||
|
|
||||||
deactivate_task(rq, next_task, 0);
|
deactivate_task(rq, next_task, 0);
|
||||||
set_task_cpu(next_task, lowest_rq->cpu);
|
set_task_cpu(next_task, lowest_rq->cpu);
|
||||||
moved = true;
|
|
||||||
activate_task(lowest_rq, next_task, 0);
|
activate_task(lowest_rq, next_task, 0);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
|
|
||||||
|
@ -1685,11 +1683,6 @@ retry:
|
||||||
out:
|
out:
|
||||||
put_task_struct(next_task);
|
put_task_struct(next_task);
|
||||||
|
|
||||||
if (moved && task_notify_on_migrate(next_task))
|
|
||||||
atomic_notifier_call_chain(&migration_notifier_head,
|
|
||||||
cpu_of(lowest_rq),
|
|
||||||
(void *)cpu_of(rq));
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1703,10 +1696,8 @@ static void push_rt_tasks(struct rq *rq)
|
||||||
static int pull_rt_task(struct rq *this_rq)
|
static int pull_rt_task(struct rq *this_rq)
|
||||||
{
|
{
|
||||||
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
||||||
struct task_struct *p = NULL;
|
struct task_struct *p;
|
||||||
struct rq *src_rq;
|
struct rq *src_rq;
|
||||||
bool moved = false;
|
|
||||||
int src_cpu = 0;
|
|
||||||
|
|
||||||
if (likely(!rt_overloaded(this_rq)))
|
if (likely(!rt_overloaded(this_rq)))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1767,10 +1758,6 @@ static int pull_rt_task(struct rq *this_rq)
|
||||||
deactivate_task(src_rq, p, 0);
|
deactivate_task(src_rq, p, 0);
|
||||||
set_task_cpu(p, this_cpu);
|
set_task_cpu(p, this_cpu);
|
||||||
activate_task(this_rq, p, 0);
|
activate_task(this_rq, p, 0);
|
||||||
|
|
||||||
moved = true;
|
|
||||||
src_cpu = cpu_of(src_rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We continue with the search, just in
|
* We continue with the search, just in
|
||||||
* case there's an even higher prio task
|
* case there's an even higher prio task
|
||||||
|
@ -1782,11 +1769,6 @@ skip:
|
||||||
double_unlock_balance(this_rq, src_rq);
|
double_unlock_balance(this_rq, src_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (moved && task_notify_on_migrate(p))
|
|
||||||
atomic_notifier_call_chain(&migration_notifier_head,
|
|
||||||
this_cpu,
|
|
||||||
(void *)src_cpu);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue