[PATCH] x86-64: get rid of ARCH_HAVE_XTIME_LOCK

ARCH_HAVE_XTIME_LOCK is used by x86_64 arch .  This arch needs to place a
read only copy of xtime_lock into vsyscall page.  This read only copy is
named __xtime_lock, and xtime_lock is defined in
arch/x86_64/kernel/vmlinux.lds.S as an alias.  So the declaration of
xtime_lock in kernel/timer.c was guarded by ARCH_HAVE_XTIME_LOCK define,
defined to true on x86_64.

We can get same result with _attribute__((weak)) in the declaration. linker
should do the job.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
This commit is contained in:
Eric Dumazet 2007-02-13 13:26:21 +01:00 committed by Andi Kleen
parent 26054ed02b
commit 5809f9d442
3 changed files with 2 additions and 9 deletions

View file

@ -56,11 +56,6 @@ extern struct vxtime_data vxtime;
extern int vgetcpu_mode;
extern struct timezone sys_tz;
extern int sysctl_vsyscall;
extern seqlock_t xtime_lock;
extern int sysctl_vsyscall;
#define ARCH_HAVE_XTIME_LOCK 1
#endif /* __KERNEL__ */

View file

@ -90,7 +90,7 @@ static inline struct timespec timespec_sub(struct timespec lhs,
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern seqlock_t xtime_lock __attribute__((weak));
void timekeeping_init(void);

View file

@ -1162,11 +1162,9 @@ static inline void calc_load(unsigned long ticks)
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
#ifndef ARCH_HAVE_XTIME_LOCK
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
EXPORT_SYMBOL(xtime_lock);
#endif
/*
* This function runs timers and the timer-tq in bottom half context.