mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
29671f22a8
rwsem_is_locked() tests ->activity without locks, so we should always keep ->activity consistent. However, the code in __rwsem_do_wake() breaks this rule, it updates ->activity after _all_ readers waken up, this may give some reader a wrong ->activity value, thus cause rwsem_is_locked() behaves wrong. Quote from Andrew: " - we have one or more processes sleeping in down_read(), waiting for access. - we wake one or more processes up without altering ->activity - they start to run and they do rwsem_is_locked(). This incorrectly returns "false", because the waker process is still crunching away in __rwsem_do_wake(). - the waker now alters ->activity, but it was too late. " So we need get a spinlock to protect this. And rwsem_is_locked() should not block, thus we use spin_trylock_irqsave(). [akpm@linux-foundation.org: simplify code] Reported-by: Brian Behlendorf <behlendorf1@llnl.gov> Cc: Ben Woodard <bwoodard@llnl.gov> Cc: David Howells <dhowells@redhat.com> Signed-off-by: WANG Cong <amwang@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
74 lines
2.2 KiB
C
74 lines
2.2 KiB
C
/* rwsem-spinlock.h: fallback C implementation
|
|
*
|
|
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
|
|
* - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
|
|
* - Derived also from comments by Linus
|
|
*/
|
|
|
|
#ifndef _LINUX_RWSEM_SPINLOCK_H
|
|
#define _LINUX_RWSEM_SPINLOCK_H
|
|
|
|
#ifndef _LINUX_RWSEM_H
|
|
#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
|
|
#endif
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/list.h>
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct rwsem_waiter;
|
|
|
|
/*
|
|
* the rw-semaphore definition
|
|
* - if activity is 0 then there are no active readers or writers
|
|
* - if activity is +ve then that is the number of active readers
|
|
* - if activity is -1 then there is one active writer
|
|
* - if wait_list is not empty, then there are processes waiting for the semaphore
|
|
*/
|
|
struct rw_semaphore {
|
|
__s32 activity;
|
|
spinlock_t wait_lock;
|
|
struct list_head wait_list;
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
struct lockdep_map dep_map;
|
|
#endif
|
|
};
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
|
|
#else
|
|
# define __RWSEM_DEP_MAP_INIT(lockname)
|
|
#endif
|
|
|
|
#define __RWSEM_INITIALIZER(name) \
|
|
{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
|
|
__RWSEM_DEP_MAP_INIT(name) }
|
|
|
|
#define DECLARE_RWSEM(name) \
|
|
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
|
|
|
|
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
|
|
struct lock_class_key *key);
|
|
|
|
#define init_rwsem(sem) \
|
|
do { \
|
|
static struct lock_class_key __key; \
|
|
\
|
|
__init_rwsem((sem), #sem, &__key); \
|
|
} while (0)
|
|
|
|
extern void __down_read(struct rw_semaphore *sem);
|
|
extern int __down_read_trylock(struct rw_semaphore *sem);
|
|
extern void __down_write(struct rw_semaphore *sem);
|
|
extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
|
|
extern int __down_write_trylock(struct rw_semaphore *sem);
|
|
extern void __up_read(struct rw_semaphore *sem);
|
|
extern void __up_write(struct rw_semaphore *sem);
|
|
extern void __downgrade_write(struct rw_semaphore *sem);
|
|
extern int rwsem_is_locked(struct rw_semaphore *sem);
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_RWSEM_SPINLOCK_H */
|