2011-07-26 23:09:08 +00:00
|
|
|
/* Atomic operations usable in machine independent code */
|
2010-11-11 22:05:08 +00:00
|
|
|
#ifndef _LINUX_ATOMIC_H
|
|
|
|
#define _LINUX_ATOMIC_H
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
2017-04-18 01:29:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BluesMan: Porting from 3.10.61 kernel to 3.10.73
|
|
|
|
* Without this, compilation fails when we build
|
|
|
|
* Queue spinlocks
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_OSQ_MUTEX_AND_QUEUE_SPINLOCK
|
|
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
|
|
#define smp_mb__after_atomic_inc() smp_mb()
|
|
|
|
#endif
|
|
|
|
|
arch: Prepare for smp_mb__{before,after}_atomic()
Since the smp_mb__{before,after}*() ops are fundamentally dependent on
how an arch can implement atomics it doesn't make sense to have 3
variants of them. They must all be the same.
Furthermore, the 3 variants suggest they're only valid for those 3
atomic ops, while we have many more where they could be applied.
So move away from
smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() and reduce the
interface to just the two: smp_mb__{before,after}_atomic().
This patch prepares the way by introducing default implementations in
asm-generic/barrier.h that default to a full barrier and providing
__deprecated inlines for the previous 6 barriers if they're not
provided by the arch.
This should allow for a mostly painless transition (lots of deprecated
warns in the interim).
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-wr59327qdyi9mbzn6x937s4e@git.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Chen, Gong" <gong.chen@linux.intel.com>
Cc: John Sullivan <jsrhbz@kanargh.force9.co.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mauro Carvalho Chehab <m.chehab@samsung.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
Git-commit: febdbfe8a91ce0d11939d4940b592eb0dba8d663
[joonwoop@codeaurora.org: fixed trivial merge conflict.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
2014-02-06 17:16:07 +00:00
|
|
|
/*
|
|
|
|
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
|
|
|
|
* We need the ugly external functions to break header recursion hell.
|
|
|
|
*/
|
|
|
|
#ifndef smp_mb__before_atomic_inc
|
|
|
|
static inline void __deprecated smp_mb__before_atomic_inc(void)
|
|
|
|
{
|
|
|
|
extern void __smp_mb__before_atomic(void);
|
|
|
|
__smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef smp_mb__after_atomic_inc
|
|
|
|
static inline void __deprecated smp_mb__after_atomic_inc(void)
|
|
|
|
{
|
|
|
|
extern void __smp_mb__after_atomic(void);
|
|
|
|
__smp_mb__after_atomic();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef smp_mb__before_atomic_dec
|
|
|
|
static inline void __deprecated smp_mb__before_atomic_dec(void)
|
|
|
|
{
|
|
|
|
extern void __smp_mb__before_atomic(void);
|
|
|
|
__smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef smp_mb__after_atomic_dec
|
|
|
|
static inline void __deprecated smp_mb__after_atomic_dec(void)
|
|
|
|
{
|
|
|
|
extern void __smp_mb__after_atomic(void);
|
|
|
|
__smp_mb__after_atomic();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-07-26 23:09:07 +00:00
|
|
|
/**
|
|
|
|
* atomic_add_unless - add unless the number is already a given value
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @a: the amount to add to v...
|
|
|
|
* @u: ...unless v is equal to u.
|
|
|
|
*
|
|
|
|
* Atomically adds @a to @v, so long as @v was not already @u.
|
|
|
|
* Returns non-zero if @v was not @u, and zero otherwise.
|
|
|
|
*/
|
|
|
|
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
{
|
|
|
|
return __atomic_add_unless(v, a, u) != u;
|
|
|
|
}
|
|
|
|
|
2011-07-26 23:09:06 +00:00
|
|
|
/**
|
|
|
|
* atomic_inc_not_zero - increment unless the number is zero
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
*
|
|
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
|
|
*/
|
2012-02-29 21:09:53 +00:00
|
|
|
#ifndef atomic_inc_not_zero
|
2011-07-26 23:09:06 +00:00
|
|
|
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
2012-02-29 21:09:53 +00:00
|
|
|
#endif
|
2011-07-26 23:09:06 +00:00
|
|
|
|
2010-11-11 22:05:08 +00:00
|
|
|
/**
|
|
|
|
* atomic_inc_not_zero_hint - increment if not null
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @hint: probable value of the atomic before the increment
|
|
|
|
*
|
|
|
|
* This version of atomic_inc_not_zero() gives a hint of probable
|
|
|
|
* value of the atomic. This helps processor to not read the memory
|
|
|
|
* before doing the atomic read/modify/write cycle, lowering
|
|
|
|
* number of bus transactions on some arches.
|
|
|
|
*
|
|
|
|
* Returns: 0 if increment was not done, 1 otherwise.
|
|
|
|
*/
|
|
|
|
#ifndef atomic_inc_not_zero_hint
|
|
|
|
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
|
|
|
|
{
|
|
|
|
int val, c = hint;
|
|
|
|
|
|
|
|
/* sanity test, should be removed by compiler if hint is a constant */
|
|
|
|
if (!hint)
|
|
|
|
return atomic_inc_not_zero(v);
|
|
|
|
|
|
|
|
do {
|
|
|
|
val = atomic_cmpxchg(v, c, c + 1);
|
|
|
|
if (val == c)
|
|
|
|
return 1;
|
|
|
|
c = val;
|
|
|
|
} while (c);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-06-20 14:52:57 +00:00
|
|
|
#ifndef atomic_inc_unless_negative
|
|
|
|
static inline int atomic_inc_unless_negative(atomic_t *p)
|
|
|
|
{
|
|
|
|
int v, v1;
|
|
|
|
for (v = 0; v >= 0; v = v1) {
|
|
|
|
v1 = atomic_cmpxchg(p, v, v + 1);
|
|
|
|
if (likely(v1 == v))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef atomic_dec_unless_positive
|
|
|
|
static inline int atomic_dec_unless_positive(atomic_t *p)
|
|
|
|
{
|
|
|
|
int v, v1;
|
|
|
|
for (v = 0; v <= 0; v = v1) {
|
|
|
|
v1 = atomic_cmpxchg(p, v, v - 1);
|
|
|
|
if (likely(v1 == v))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-10-08 23:32:18 +00:00
|
|
|
/*
|
|
|
|
* atomic_dec_if_positive - decrement by 1 if old value positive
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
*
|
|
|
|
* The function returns the old value of *v minus 1, even if
|
|
|
|
* the atomic variable, v, was not decremented.
|
|
|
|
*/
|
|
|
|
#ifndef atomic_dec_if_positive
|
|
|
|
static inline int atomic_dec_if_positive(atomic_t *v)
|
|
|
|
{
|
|
|
|
int c, old, dec;
|
|
|
|
c = atomic_read(v);
|
|
|
|
for (;;) {
|
|
|
|
dec = c - 1;
|
|
|
|
if (unlikely(dec < 0))
|
|
|
|
break;
|
|
|
|
old = atomic_cmpxchg((v), c, dec);
|
|
|
|
if (likely(old == c))
|
|
|
|
break;
|
|
|
|
c = old;
|
|
|
|
}
|
|
|
|
return dec;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-05-11 12:33:33 +00:00
|
|
|
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
|
|
|
|
static inline void atomic_or(int i, atomic_t *v)
|
|
|
|
{
|
|
|
|
int old;
|
|
|
|
int new;
|
|
|
|
|
|
|
|
do {
|
|
|
|
old = atomic_read(v);
|
|
|
|
new = old | i;
|
|
|
|
} while (atomic_cmpxchg(v, old, new) != old);
|
|
|
|
}
|
|
|
|
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
|
|
|
|
|
2011-07-26 23:09:08 +00:00
|
|
|
#include <asm-generic/atomic-long.h>
|
|
|
|
#ifdef CONFIG_GENERIC_ATOMIC64
|
|
|
|
#include <asm-generic/atomic64.h>
|
|
|
|
#endif
|
2010-11-11 22:05:08 +00:00
|
|
|
#endif /* _LINUX_ATOMIC_H */
|