locking/mcs: Allow architecture specific asm files to be used for contended case

This patch allows each architecture to add its specific assembly optimized
arch_mcs_spin_lock_contended and arch_mcs_spinlock_uncontended for
MCS lock and unlock functions.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: AswinChandramouleeswaran <aswin@hp.com>
Cc: George Spelvin <linux@horizon.com>
Cc: Rik vanRiel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: MichelLespinasse <walken@google.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Alex Shi <alex.shi@linaro.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Figo.zhang" <figo1802@gmail.com>
Cc: "Paul E.McKenney" <paulmck@linux.vnet.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew R Wilcox <matthew.r.wilcox@intel.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1390347382.3138.67.camel@schen9-DESK
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
Git-commit: ddf1d169c0a489d498c1799a7043904a43b0c159
[joonwoop@codeaurora.org: Resolve merge conflicts; we don't have changes
for arch other than ARM/ARM64]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Tim Chen 2014-01-21 15:36:22 -08:00 committed by Joonwoo Park
parent 9306555406
commit 3caa6fcb90
31 changed files with 157 additions and 0 deletions

View file

@ -2,4 +2,5 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -20,6 +20,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h

View file

@ -13,6 +13,7 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h

View file

@ -21,6 +21,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h

View file

@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += trace_clock.h

View file

@ -19,6 +19,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += mutex.h

View file

@ -24,6 +24,7 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += mmu.h
generic-y += mmu_context.h

View file

@ -5,5 +5,6 @@ header-y += arch-v32/
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += trace_clock.h

View file

@ -1,4 +1,5 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -25,6 +25,7 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += pci.h

View file

@ -2,4 +2,5 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += trace_clock.h

View file

@ -15,6 +15,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += mutex.h
generic-y += percpu.h

View file

@ -23,6 +23,7 @@ generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h

View file

@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
generic-y += syscalls.h
generic-y += trace_clock.h

View file

@ -1,2 +1,3 @@
# MIPS headers
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -1,4 +1,6 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += mcs_spinlock.h
>>>>>>> ddf1d16... locking/mcs: Allow architecture specific asm files to be used for contended case
generic-y += trace_clock.h

View file

@ -34,6 +34,7 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h

View file

@ -11,6 +11,7 @@ generic-y += kdebug.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h

View file

@ -1,4 +1,5 @@
generic-y += clkdev.h
generic-y += mcs_spinlock.h
generic-y += rwsem.h
generic-y += trace_clock.h

View file

@ -1,4 +1,5 @@
generic-y += clkdev.h
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -2,4 +2,5 @@
header-y +=
generic-y += clkdev.h
generic-y += mcs_spinlock.h
generic-y += trace_clock.h

View file

@ -14,6 +14,7 @@ generic-y += irq_regs.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += param.h

View file

@ -10,6 +10,7 @@ generic-y += irq_regs.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += mutex.h
generic-y += serial.h

View file

@ -17,6 +17,7 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h

View file

@ -13,6 +13,7 @@ generic-y += hw_irq.h
generic-y += io.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mutex.h
generic-y += param.h
generic-y += pci.h

View file

@ -24,6 +24,7 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mman.h
generic-y += module.h
generic-y += msgbuf.h

View file

@ -5,3 +5,4 @@ genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
generic-y += clkdev.h
generic-y += mcs_spinlock.h

View file

@ -18,6 +18,7 @@ generic-y += kvm_para.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += percpu.h
generic-y += resource.h
generic-y += scatterlist.h

View file

@ -0,0 +1,13 @@
#ifndef __ASM_MCS_SPINLOCK_H
#define __ASM_MCS_SPINLOCK_H
/*
* Architectures can define their own:
*
* arch_mcs_spin_lock_contended(l)
* arch_mcs_spin_unlock_contended(l)
*
* See kernel/locking/mcs_spinlock.c.
*/
#endif /* __ASM_MCS_SPINLOCK_H */

View file

@ -0,0 +1,114 @@
/*
* MCS lock defines
*
* This file contains the main data structure and API definitions of MCS lock.
*
* The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
* with the desirable properties of being fair, and with each cpu trying
* to acquire the lock spinning on a local variable.
* It avoids expensive cache bouncings that common test-and-set spin-lock
* implementations incur.
*/
#ifndef __LINUX_MCS_SPINLOCK_H
#define __LINUX_MCS_SPINLOCK_H
#include <asm/mcs_spinlock.h>
struct mcs_spinlock {
struct mcs_spinlock *next;
int locked; /* 1 if lock acquired */
};
#ifndef arch_mcs_spin_lock_contended
/*
* Using smp_load_acquire() provides a memory barrier that ensures
* subsequent operations happen after the lock is acquired.
*/
#define arch_mcs_spin_lock_contended(l) \
do { \
while (!(smp_load_acquire(l))) \
arch_mutex_cpu_relax(); \
} while (0)
#endif
#ifndef arch_mcs_spin_unlock_contended
/*
* smp_store_release() provides a memory barrier to ensure all
* operations in the critical section has been completed before
* unlocking.
*/
#define arch_mcs_spin_unlock_contended(l) \
smp_store_release((l), 1)
#endif
/*
* Note: the smp_load_acquire/smp_store_release pair is not
* sufficient to form a full memory barrier across
* cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
* For applications that need a full barrier across multiple cpus
* with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
* used after mcs_lock.
*/
/*
* In order to acquire the lock, the caller should declare a local node and
* pass a reference of the node to this function in addition to the lock.
* If the lock has already been acquired, then this will proceed to spin
* on this node->locked until the previous lock holder sets the node->locked
* in mcs_spin_unlock().
*
* We don't inline mcs_spin_lock() so that perf can correctly account for the
* time spent in this lock function.
*/
static inline
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
struct mcs_spinlock *prev;
/* Init node */
node->locked = 0;
node->next = NULL;
prev = xchg(lock, node);
if (likely(prev == NULL)) {
/*
* Lock acquired, don't need to set node->locked to 1. Threads
* only spin on its own node->locked value for lock acquisition.
* However, since this thread can immediately acquire the lock
* and does not proceed to spin on its own node->locked, this
* value won't be used. If a debug mode is needed to
* audit lock status, then set node->locked value here.
*/
return;
}
ACCESS_ONCE(prev->next) = node;
/* Wait until the lock holder passes the lock down. */
arch_mcs_spin_lock_contended(&node->locked);
}
/*
* Releases the lock. The caller should pass in the corresponding node that
* was used to acquire the lock.
*/
static inline
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{
struct mcs_spinlock *next = ACCESS_ONCE(node->next);
if (likely(!next)) {
/*
* Release the lock by setting it to NULL
*/
if (likely(cmpxchg(lock, node, NULL) == node))
return;
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
}
/* Pass lock to next waiter. */
arch_mcs_spin_unlock_contended(&next->locked);
}
#endif /* __LINUX_MCS_SPINLOCK_H */