mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
mutex: Use p->on_cpu for the adaptive spin
Since we now have p->on_cpu unconditionally available, use it to re-implement mutex_spin_on_owner. Requested-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110405152728.826338173@chello.nl
This commit is contained in:
parent
3ca7a440da
commit
c6eb3dda25
7 changed files with 40 additions and 57 deletions
|
@ -51,7 +51,7 @@ struct mutex {
|
|||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
|
||||
struct thread_info *owner;
|
||||
struct task_struct *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
const char *name;
|
||||
|
|
|
@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
|
|||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void schedule(void);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
|
||||
|
||||
struct nsproxy;
|
||||
struct user_namespace;
|
||||
|
|
|
@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
|
|||
return;
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current);
|
||||
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
||||
mutex_clear_owner(lock);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
|
|||
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
lock->owner = current;
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
|
|
|
@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
*/
|
||||
|
||||
for (;;) {
|
||||
struct thread_info *owner;
|
||||
struct task_struct *owner;
|
||||
|
||||
/*
|
||||
* If we own the BKL, then don't spin. The owner of
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#ifdef CONFIG_SMP
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
lock->owner = current;
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
|
|
|
@ -4173,70 +4173,53 @@ need_resched:
|
|||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
|
||||
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock->owner != owner)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Ensure we emit the owner->on_cpu, dereference _after_ checking
|
||||
* lock->owner still matches owner, if that fails, owner might
|
||||
* point to free()d memory, if it still matches, the rcu_read_lock()
|
||||
* ensures the memory stays valid.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
ret = owner->on_cpu;
|
||||
fail:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look out! "owner" is an entirely speculative pointer
|
||||
* access and not reliable.
|
||||
*/
|
||||
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct rq *rq;
|
||||
|
||||
if (!sched_feat(OWNER_SPIN))
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
/*
|
||||
* Need to access the cpu field knowing that
|
||||
* DEBUG_PAGEALLOC could have unmapped it if
|
||||
* the mutex owner just released it and exited.
|
||||
*/
|
||||
if (probe_kernel_address(&owner->cpu, cpu))
|
||||
return 0;
|
||||
#else
|
||||
cpu = owner->cpu;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Even if the access succeeded (likely case),
|
||||
* the cpu field may no longer be valid.
|
||||
*/
|
||||
if (cpu >= nr_cpumask_bits)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We need to validate that we can do a
|
||||
* get_cpu() and that we have the percpu area.
|
||||
*/
|
||||
if (!cpu_online(cpu))
|
||||
return 0;
|
||||
|
||||
rq = cpu_rq(cpu);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Owner changed, break to re-assess state.
|
||||
*/
|
||||
if (lock->owner != owner) {
|
||||
/*
|
||||
* If the lock has switched to a different owner,
|
||||
* we likely have heavy contention. Return 0 to quit
|
||||
* optimistic spinning and not contend further:
|
||||
*/
|
||||
if (lock->owner)
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Is that owner really running on that cpu?
|
||||
*/
|
||||
if (task_thread_info(rq->curr) != owner || need_resched())
|
||||
while (owner_running(lock, owner)) {
|
||||
if (need_resched())
|
||||
return 0;
|
||||
|
||||
arch_mutex_cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* If the owner changed to another task there is likely
|
||||
* heavy contention, stop spinning.
|
||||
*/
|
||||
if (lock->owner)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue