Skip to content

Commit fc4df71

Browse files
Stepan MoskovchenkoShruthi Krishna
authored andcommitted
arm: Support the safe WFE sequence for Krait CPUs
Certain version of the Krait processor require a specific code sequence to be executed prior to executing a WFE instruction to permit that instruction to place the processor into a low-power state. Change-Id: I614f8ce24936c793c91d5c43c7a7931a04f11dda Signed-off-by: Shruthi Krishna <[email protected]>
1 parent 0b8ece3 commit fc4df71

File tree

3 files changed

+67
-14
lines changed

3 files changed

+67
-14
lines changed

arch/arm/include/asm/spinlock.h

Lines changed: 44 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77

88
#include <asm/processor.h>
99

10+
extern int msm_krait_need_wfe_fixup;
11+
1012
/*
1113
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
1214
* extensions, so when running on UP, we have to patch these instructions away.
@@ -34,6 +36,31 @@
3436
#define WFE() ALT_SMP("wfe", "nop")
3537
#endif
3638

39+
/*
40+
* The fixup involves disabling interrupts during execution of the WFE
41+
* instruction. This could potentially lead to deadlock if a thread is trying
42+
* to acquire a spinlock which is being released from an interrupt context.
43+
*/
44+
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
45+
#define WFE_SAFE(fixup, tmp) \
46+
" mrs " tmp ", cpsr\n" \
47+
" cmp " fixup ", #0\n" \
48+
" wfeeq\n" \
49+
" beq 10f\n" \
50+
" cpsid if\n" \
51+
" mrc p15, 7, " fixup ", c15, c0, 5\n" \
52+
" bic " fixup ", " fixup ", #0x10000\n" \
53+
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
54+
" isb\n" \
55+
" wfe\n" \
56+
" orr " fixup ", " fixup ", #0x10000\n" \
57+
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
58+
" isb\n" \
59+
"10: msr cpsr_cf, " tmp "\n"
60+
#else
61+
#define WFE_SAFE(fixup, tmp) " wfe\n"
62+
#endif
63+
3764
static inline void dsb_sev(void)
3865
{
3966
#if __LINUX_ARM_ARCH__ >= 7
@@ -71,18 +98,18 @@ static inline void dsb_sev(void)
7198

7299
static inline void arch_spin_lock(arch_spinlock_t *lock)
73100
{
74-
unsigned long tmp;
101+
unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
75102

76103
__asm__ __volatile__(
77104
"1: ldrex %[tmp], [%[lock]]\n"
78105
" teq %[tmp], #0\n"
79106
" beq 2f\n"
80-
WFE()
107+
WFE_SAFE("%[fixup]", "%[tmp]")
81108
"2:\n"
82109
" strexeq %[tmp], %[bit0], [%[lock]]\n"
83110
" teqeq %[tmp], #0\n"
84111
" bne 1b"
85-
: [tmp] "=&r" (tmp)
112+
: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
86113
: [lock] "r" (&lock->lock), [bit0] "r" (1)
87114
: "cc");
88115

@@ -149,6 +176,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
149176
static inline void arch_spin_lock(arch_spinlock_t *lock)
150177
{
151178
unsigned long tmp, ticket, next_ticket;
179+
unsigned long fixup = msm_krait_need_wfe_fixup;
152180

153181
/* Grab the next ticket and wait for it to be "served" */
154182
__asm__ __volatile__(
@@ -161,13 +189,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
161189
"2:\n"
162190
#ifdef CONFIG_CPU_32v6K
163191
" beq 3f\n"
164-
WFE()
192+
WFE_SAFE("%[fixup]", "%[tmp]")
165193
"3:\n"
166194
#endif
167195
" ldr %[tmp], [%[lockaddr]]\n"
168196
" cmp %[ticket], %[tmp], lsr #16\n"
169197
" bne 2b"
170-
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
198+
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
199+
[next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
171200
: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
172201
: "cc");
173202
smp_mb();
@@ -216,15 +245,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
216245

217246
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
218247
{
219-
unsigned long ticket;
248+
unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;
220249

221250
/* Wait for now_serving == next_ticket */
222251
__asm__ __volatile__(
223252
#ifdef CONFIG_CPU_32v6K
224253
" cmpne %[lockaddr], %[lockaddr]\n"
225254
"1:\n"
226255
" beq 2f\n"
227-
WFE()
256+
WFE_SAFE("%[fixup]", "%[tmp]")
228257
"2:\n"
229258
#else
230259
"1:\n"
@@ -234,7 +263,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
234263
" uxth %[ticket], %[ticket]\n"
235264
" cmp %[ticket], #0\n"
236265
" bne 1b"
237-
: [ticket]"=&r" (ticket)
266+
: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
267+
[fixup]"+r" (fixup)
238268
: [lockaddr]"r" (&lock->lock)
239269
: "cc");
240270
}
@@ -262,18 +292,18 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
262292

263293
static inline void arch_write_lock(arch_rwlock_t *rw)
264294
{
265-
unsigned long tmp;
295+
unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
266296

267297
__asm__ __volatile__(
268298
"1: ldrex %[tmp], [%[lock]]\n"
269299
" teq %[tmp], #0\n"
270300
" beq 2f\n"
271-
WFE()
301+
WFE_SAFE("%[fixup]", "%[tmp]")
272302
"2:\n"
273303
" strexeq %[tmp], %[bit31], [%[lock]]\n"
274304
" teq %[tmp], #0\n"
275305
" bne 1b"
276-
: [tmp] "=&r" (tmp)
306+
: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
277307
: [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
278308
: "cc");
279309

@@ -330,18 +360,18 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
330360
*/
331361
static inline void arch_read_lock(arch_rwlock_t *rw)
332362
{
333-
unsigned long tmp, tmp2;
363+
unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
334364

335365
__asm__ __volatile__(
336366
"1: ldrex %[tmp], [%[lock]]\n"
337367
" adds %[tmp], %[tmp], #1\n"
338368
" strexpl %[tmp2], %[tmp], [%[lock]]\n"
339369
" bpl 2f\n"
340-
WFE()
370+
WFE_SAFE("%[fixup]", "%[tmp]")
341371
"2:\n"
342372
" rsbpls %[tmp], %[tmp2], #0\n"
343373
" bmi 1b"
344-
: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2)
374+
: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
345375
: [lock] "r" (&rw->lock)
346376
: "cc");
347377

arch/arm/mach-msm/Kconfig

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ config ARCH_MSM8960
168168
select MULTI_IRQ_HANDLER
169169
select MSM_PM8X60 if PM
170170
select HOLES_IN_ZONE if SPARSEMEM
171+
select MSM_KRAIT_WFE_FIXUP
171172

172173
config ARCH_MSM8930
173174
bool "MSM8930"
@@ -205,6 +206,7 @@ config ARCH_MSM8930
205206
select MULTI_IRQ_HANDLER
206207
select MSM_PM8X60 if PM
207208
select HOLES_IN_ZONE if SPARSEMEM
209+
select MSM_KRAIT_WFE_FIXUP
208210

209211
config ARCH_APQ8064
210212
bool "APQ8064"
@@ -222,6 +224,7 @@ config ARCH_APQ8064
222224
select MULTI_IRQ_HANDLER
223225
select MSM_PM8X60 if PM
224226
select HOLES_IN_ZONE if SPARSEMEM
227+
select MSM_KRAIT_WFE_FIXUP
225228

226229
config ARCH_MSMCOPPER
227230
bool "MSM Copper"
@@ -308,6 +311,9 @@ config ARCH_MSM_KRAITMP
308311
select MSM_SMP
309312
bool
310313

314+
config MSM_KRAIT_WFE_FIXUP
315+
bool
316+
311317
config ARCH_MSM_CORTEX_A5
312318
bool
313319
select HAVE_HW_BRKPT_RESERVED_RW_ACCESS

arch/arm/mm/init.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <asm/sizes.h>
2929
#include <asm/tlb.h>
3030
#include <asm/fixmap.h>
31+
#include <asm/cputype.h>
3132

3233
#include <asm/mach/arch.h>
3334
#include <asm/mach/map.h>
@@ -36,6 +37,8 @@
3637

3738
static unsigned long phys_initrd_start __initdata = 0;
3839
static unsigned long phys_initrd_size __initdata = 0;
40+
int msm_krait_need_wfe_fixup;
41+
EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
3942

4043
static int __init early_initrd(char *p)
4144
{
@@ -891,3 +894,17 @@ static int __init keepinitrd_setup(char *__unused)
891894

892895
__setup("keepinitrd", keepinitrd_setup);
893896
#endif
897+
898+
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
899+
static int __init msm_krait_wfe_init(void)
900+
{
901+
unsigned int val, midr;
902+
midr = read_cpuid_id() & 0xffffff00;
903+
if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
904+
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
905+
msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
906+
}
907+
return 0;
908+
}
909+
pure_initcall(msm_krait_wfe_init);
910+
#endif

0 commit comments

Comments
 (0)