mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
39d2f1ab2a
XFS object flushing doesn't quite match existing completion semantics. It mixed exclusive access with completion. That is, we need to mark an object as being flushed before flushing it to disk, and then block any other attempt to flush it until the completion occurs. We do this but adding an extra count to the completion before we start using them. However, we still need to determine if there is a completion in progress, and allow no-blocking attempts fo completions to decrement the count. To do this we introduce: int try_wait_for_completion(struct completion *x) returns a failure status if done == 0, otherwise decrements done to zero and returns a "started" status. This is provided to allow counted completions to begin safely while holding object locks in inverted order. int completion_done(struct completion *x) returns 1 if there is no waiter, 0 if there is a waiter (i.e. a completion in progress). This replaces the use of semaphores for providing this exclusion and completion mechanism. SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:31816a Signed-off-by: David Chinner <david@fromorbit.com> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
103 lines
2.5 KiB
C
103 lines
2.5 KiB
C
#ifndef __LINUX_COMPLETION_H
|
|
#define __LINUX_COMPLETION_H
|
|
|
|
/*
|
|
* (C) Copyright 2001 Linus Torvalds
|
|
*
|
|
* Atomic wait-for-completion handler data structures.
|
|
* See kernel/sched.c for details.
|
|
*/
|
|
|
|
#include <linux/wait.h>
|
|
|
|
struct completion {
|
|
unsigned int done;
|
|
wait_queue_head_t wait;
|
|
};
|
|
|
|
#define COMPLETION_INITIALIZER(work) \
|
|
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
|
|
|
|
#define COMPLETION_INITIALIZER_ONSTACK(work) \
|
|
({ init_completion(&work); work; })
|
|
|
|
#define DECLARE_COMPLETION(work) \
|
|
struct completion work = COMPLETION_INITIALIZER(work)
|
|
|
|
/*
|
|
* Lockdep needs to run a non-constant initializer for on-stack
|
|
* completions - so we use the _ONSTACK() variant for those that
|
|
* are on the kernel stack:
|
|
*/
|
|
#ifdef CONFIG_LOCKDEP
|
|
# define DECLARE_COMPLETION_ONSTACK(work) \
|
|
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
|
|
#else
|
|
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
|
|
#endif
|
|
|
|
static inline void init_completion(struct completion *x)
|
|
{
|
|
x->done = 0;
|
|
init_waitqueue_head(&x->wait);
|
|
}
|
|
|
|
extern void wait_for_completion(struct completion *);
|
|
extern int wait_for_completion_interruptible(struct completion *x);
|
|
extern int wait_for_completion_killable(struct completion *x);
|
|
extern unsigned long wait_for_completion_timeout(struct completion *x,
|
|
unsigned long timeout);
|
|
extern unsigned long wait_for_completion_interruptible_timeout(
|
|
struct completion *x, unsigned long timeout);
|
|
|
|
extern void complete(struct completion *);
|
|
extern void complete_all(struct completion *);
|
|
|
|
#define INIT_COMPLETION(x) ((x).done = 0)
|
|
|
|
|
|
/**
|
|
* try_wait_for_completion - try to decrement a completion without blocking
|
|
* @x: completion structure
|
|
*
|
|
* Returns: 0 if a decrement cannot be done without blocking
|
|
* 1 if a decrement succeeded.
|
|
*
|
|
* If a completion is being used as a counting completion,
|
|
* attempt to decrement the counter without blocking. This
|
|
* enables us to avoid waiting if the resource the completion
|
|
* is protecting is not available.
|
|
*/
|
|
static inline bool try_wait_for_completion(struct completion *x)
|
|
{
|
|
int ret = 1;
|
|
|
|
spin_lock_irq(&x->wait.lock);
|
|
if (!x->done)
|
|
ret = 0;
|
|
else
|
|
x->done--;
|
|
spin_unlock_irq(&x->wait.lock);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* completion_done - Test to see if a completion has any waiters
|
|
* @x: completion structure
|
|
*
|
|
* Returns: 0 if there are waiters (wait_for_completion() in progress)
|
|
* 1 if there are no waiters.
|
|
*
|
|
*/
|
|
static inline bool completion_done(struct completion *x)
|
|
{
|
|
int ret = 1;
|
|
|
|
spin_lock_irq(&x->wait.lock);
|
|
if (!x->done)
|
|
ret = 0;
|
|
spin_unlock_irq(&x->wait.lock);
|
|
return ret;
|
|
}
|
|
|
|
#endif
|