mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
sched: remove extra call overhead for schedule()
Lai Jiangshan's patch reminded me that I promised Nick to remove that extra call overhead in schedule(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090313112300.927414207@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
13318a7186
commit
ff743345bf
2 changed files with 7 additions and 9 deletions
|
@ -248,7 +248,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
|
||||
/* didnt get the lock, go to sleep: */
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
__schedule();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -5131,13 +5131,15 @@ pick_next_task(struct rq *rq)
|
|||
/*
|
||||
* schedule() is the main scheduler function.
|
||||
*/
|
||||
asmlinkage void __sched __schedule(void)
|
||||
asmlinkage void __sched schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
unsigned long *switch_count;
|
||||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
rcu_qsctr_inc(cpu);
|
||||
|
@ -5194,15 +5196,9 @@ need_resched_nonpreemptible:
|
|||
|
||||
if (unlikely(reacquire_kernel_lock(current) < 0))
|
||||
goto need_resched_nonpreemptible;
|
||||
}
|
||||
|
||||
asmlinkage void __sched schedule(void)
|
||||
{
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
__schedule();
|
||||
preempt_enable_no_resched();
|
||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
||||
if (need_resched())
|
||||
goto need_resched;
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
|
Loading…
Reference in a new issue