diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f8317dfed605..8f32475d0eef 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1604,7 +1604,6 @@ static int push_rt_task(struct rq *rq) struct task_struct *next_task; struct rq *lowest_rq; int ret = 0; - bool moved = false; if (!rq->rt.overloaded) return 0; @@ -1674,7 +1673,6 @@ retry: deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); - moved = true; activate_task(lowest_rq, next_task, 0); ret = 1; @@ -1685,11 +1683,6 @@ retry: out: put_task_struct(next_task); - if (moved && task_notify_on_migrate(next_task)) - atomic_notifier_call_chain(&migration_notifier_head, - cpu_of(lowest_rq), - (void *)cpu_of(rq)); - return ret; } @@ -1703,10 +1696,8 @@ static void push_rt_tasks(struct rq *rq) static int pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, ret = 0, cpu; - struct task_struct *p = NULL; + struct task_struct *p; struct rq *src_rq; - bool moved = false; - int src_cpu = 0; if (likely(!rt_overloaded(this_rq))) return 0; @@ -1767,10 +1758,6 @@ static int pull_rt_task(struct rq *this_rq) deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); - - moved = true; - src_cpu = cpu_of(src_rq); - /* * We continue with the search, just in * case there's an even higher prio task @@ -1782,11 +1769,6 @@ skip: double_unlock_balance(this_rq, src_rq); } - if (moved && task_notify_on_migrate(p)) - atomic_notifier_call_chain(&migration_notifier_head, - this_cpu, - (void *)src_cpu); - return ret; }