arm: Support the safe WFE sequence for Krait CPUs

Certain version of the Krait processor require a specific
code sequence to be executed prior to executing a WFE
instruction to permit that instruction to place the
processor into a low-power state.

Change-Id: I308adc691f110a323cbd84e9779675ac045826fa
Signed-off-by: Stepan Moskovchenko <stepanm@codeaurora.org>
This commit is contained in:
Stepan Moskovchenko 2012-07-13 20:40:46 -07:00 committed by Stephen Boyd
parent 359cd77da2
commit 43ec03179a
3 changed files with 86 additions and 11 deletions

View File

@ -7,6 +7,8 @@
#include <asm/processor.h>
extern int msm_krait_need_wfe_fixup;
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
@ -34,6 +36,31 @@
#define WFE() ALT_SMP("wfe", "nop")
#endif
/*
* The fixup involves disabling interrupts during execution of the WFE
* instruction. This could potentially lead to deadlock if a thread is trying
* to acquire a spinlock which is being released from an interrupt context.
*/
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
#define WFE_SAFE(fixup, tmp) \
" mrs " tmp ", cpsr\n" \
" cmp " fixup ", #0\n" \
" wfeeq\n" \
" beq 10f\n" \
" cpsid if\n" \
" mrc p15, 7, " fixup ", c15, c0, 5\n" \
" bic " fixup ", " fixup ", #0x10000\n" \
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
" isb\n" \
" wfe\n" \
" orr " fixup ", " fixup ", #0x10000\n" \
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
" isb\n" \
"10: msr cpsr_cf, " tmp "\n"
#else
#define WFE_SAFE(fixup, tmp) " wfe\n"
#endif
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
@ -65,7 +92,7 @@ static inline void dsb_sev(void)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long tmp, flags = 0;
u32 newval;
arch_spinlock_t lockval;
@ -80,7 +107,32 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
if (msm_krait_need_wfe_fixup) {
local_irq_save(flags);
__asm__ __volatile__(
"mrc p15, 7, %0, c15, c0, 5\n"
: "=r" (tmp)
:
: "cc");
tmp &= ~(0x10000);
__asm__ __volatile__(
"mcr p15, 7, %0, c15, c0, 5\n"
:
: "r" (tmp)
: "cc");
isb();
}
wfe();
if (msm_krait_need_wfe_fixup) {
tmp |= 0x10000;
__asm__ __volatile__(
"mcr p15, 7, %0, c15, c0, 5\n"
:
: "r" (tmp)
: "cc");
isb();
local_irq_restore(flags);
}
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
@ -139,18 +191,18 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
"1: ldrex %0, [%2]\n"
" teq %0, #0\n"
" beq 2f\n"
WFE()
WFE_SAFE("%1", "%0")
"2:\n"
" strexeq %0, %2, [%1]\n"
" strexeq %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "=&r" (tmp), "+r" (fixup)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
@ -207,18 +259,18 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
"1: ldrex %0, [%3]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
" strexpl %1, %0, [%3]\n"
" bpl 2f\n"
WFE()
WFE_SAFE("%2", "%0")
"2:\n"
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "=&r" (tmp), "=&r" (tmp2), "+r" (fixup)
: "r" (&rw->lock)
: "cc");

View File

@ -173,6 +173,7 @@ config ARCH_MSM8960
select HOLES_IN_ZONE if SPARSEMEM
select MSM_RUN_QUEUE_STATS
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
config ARCH_MSM8930
bool "MSM8930"
@ -204,6 +205,7 @@ config ARCH_MSM8930
select MSM_PM8X60 if PM
select HOLES_IN_ZONE if SPARSEMEM
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
config ARCH_APQ8064
bool "APQ8064"
@ -230,6 +232,7 @@ config ARCH_APQ8064
select MIGHT_HAVE_PCI
select ARCH_SUPPORTS_MSI
select ARM_HAS_SG_CHAIN
select MSM_KRAIT_WFE_FIXUP
config ARCH_MSM8974
bool "MSM8974"
@ -374,6 +377,9 @@ config ARCH_MSM_CORTEXMP
select MSM_SMP
bool
config MSM_KRAIT_WFE_FIXUP
bool
config ARCH_MSM_CORTEX_A5
bool
select HAVE_HW_BRKPT_RESERVED_RW_ACCESS

View File

@ -32,6 +32,7 @@
#include <asm/setup.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/cputype.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@ -40,6 +41,8 @@
static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
int msm_krait_need_wfe_fixup;
EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
static int __init early_initrd(char *p)
{
@ -910,3 +913,17 @@ static int __init keepinitrd_setup(char *__unused)
__setup("keepinitrd", keepinitrd_setup);
#endif
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
static int __init msm_krait_wfe_init(void)
{
unsigned int val, midr;
midr = read_cpuid_id() & 0xffffff00;
if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
}
return 0;
}
pure_initcall(msm_krait_wfe_init);
#endif