77
88#include <asm/processor.h>
99
10+ extern int msm_krait_need_wfe_fixup ;
11+
1012/*
1113 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
1214 * extensions, so when running on UP, we have to patch these instructions away.
3436#define WFE () ALT_SMP("wfe", "nop")
3537#endif
3638
39+ /*
40+ * The fixup involves disabling interrupts during execution of the WFE
41+ * instruction. This could potentially lead to deadlock if a thread is trying
42+ * to acquire a spinlock which is being released from an interrupt context.
43+ */
44+ #ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
45+ #define WFE_SAFE (fixup , tmp ) \
46+ " mrs " tmp ", cpsr\n" \
47+ " cmp " fixup ", #0\n" \
48+ " wfeeq\n" \
49+ " beq 10f\n" \
50+ " cpsid if\n" \
51+ " mrc p15, 7, " fixup ", c15, c0, 5\n" \
52+ " bic " fixup ", " fixup ", #0x10000\n" \
53+ " mcr p15, 7, " fixup ", c15, c0, 5\n" \
54+ " isb\n" \
55+ " wfe\n" \
56+ " orr " fixup ", " fixup ", #0x10000\n" \
57+ " mcr p15, 7, " fixup ", c15, c0, 5\n" \
58+ " isb\n" \
59+ "10: msr cpsr_cf, " tmp "\n"
60+ #else
61+ #define WFE_SAFE (fixup , tmp ) " wfe\n"
62+ #endif
63+
3764static inline void dsb_sev (void )
3865{
3966#if __LINUX_ARM_ARCH__ >= 7
@@ -71,18 +98,18 @@ static inline void dsb_sev(void)
7198
7299static inline void arch_spin_lock (arch_spinlock_t * lock )
73100{
74- unsigned long tmp ;
101+ unsigned long tmp , fixup = msm_krait_need_wfe_fixup ;
75102
76103 __asm__ __volatile__(
77104"1: ldrex %[tmp], [%[lock]]\n"
78105" teq %[tmp], #0\n"
79106" beq 2f\n"
80- WFE ( )
107+ WFE_SAFE ( "%[fixup]" , "%[tmp]" )
81108"2:\n"
82109" strexeq %[tmp], %[bit0], [%[lock]]\n"
83110" teqeq %[tmp], #0\n"
84111" bne 1b"
85- : [tmp ] "= & r " (tmp)
112+ : [tmp ] "= & r " (tmp), [fixup] " + r " (fixup)
86113 : [lock ] "r " (&lock->lock), [bit0] " r " (1)
87114 : " cc ");
88115
@@ -149,6 +176,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
149176static inline void arch_spin_lock (arch_spinlock_t * lock )
150177{
151178 unsigned long tmp , ticket , next_ticket ;
179+ unsigned long fixup = msm_krait_need_wfe_fixup ;
152180
153181 /* Grab the next ticket and wait for it to be "served" */
154182 __asm__ __volatile__(
@@ -161,13 +189,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
161189"2:\n"
162190#ifdef CONFIG_CPU_32v6K
163191" beq 3f\n"
164- WFE ( )
192+ WFE_SAFE ( "%[fixup]" , "%[tmp]" )
165193"3:\n"
166194#endif
167195" ldr %[tmp], [%[lockaddr]]\n"
168196" cmp %[ticket], %[tmp], lsr #16\n"
169197" bne 2b"
170- : [ticket ]"= & r " (ticket), [tmp]" = & r " (tmp), [next_ticket]" = & r " (next_ticket)
198+ : [ticket ]"= & r " (ticket), [tmp]" = & r " (tmp),
199+ [next_ticket ]"= & r " (next_ticket), [fixup]" + r " (fixup)
171200 : [lockaddr ]"r" (& lock -> lock ), [val1 ]"r" (1 )
172201 : "cc ");
173202 smp_mb ();
@@ -216,15 +245,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
216245
217246static inline void arch_spin_unlock_wait (arch_spinlock_t * lock )
218247{
219- unsigned long ticket ;
248+ unsigned long ticket , tmp , fixup = msm_krait_need_wfe_fixup ;
220249
221250 /* Wait for now_serving == next_ticket */
222251 __asm__ __volatile__(
223252#ifdef CONFIG_CPU_32v6K
224253" cmpne %[lockaddr], %[lockaddr]\n"
225254"1:\n"
226255" beq 2f\n"
227- WFE ( )
256+ WFE_SAFE ( "%[fixup]" , "%[tmp]" )
228257"2:\n"
229258#else
230259"1:\n"
@@ -234,7 +263,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
234263" uxth %[ticket], %[ticket]\n"
235264" cmp %[ticket], #0\n"
236265" bne 1b"
237- : [ticket ]"= & r " (ticket)
266+ : [ticket ]"= & r " (ticket), [tmp]" = & r " (tmp),
267+ [fixup ]"+ r " (fixup)
238268 : [lockaddr ]"r" (& lock -> lock )
239269 : "cc" );
240270}
@@ -262,18 +292,18 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
262292
263293static inline void arch_write_lock (arch_rwlock_t * rw )
264294{
265- unsigned long tmp ;
295+ unsigned long tmp , fixup = msm_krait_need_wfe_fixup ;
266296
267297 __asm__ __volatile__(
268298"1: ldrex %[tmp], [%[lock]]\n"
269299" teq %[tmp], #0\n"
270300" beq 2f\n"
271- WFE ( )
301+ WFE_SAFE ( "%[fixup]" , "%[tmp]" )
272302"2:\n"
273303" strexeq %[tmp], %[bit31], [%[lock]]\n"
274304" teq %[tmp], #0\n"
275305" bne 1b"
276- : [tmp ] "= & r " (tmp)
306+ : [tmp ] "= & r " (tmp), [fixup] " + r " (fixup)
277307 : [lock ] "r " (&rw->lock), [bit31] " r " (0x80000000)
278308 : " cc ");
279309
@@ -330,18 +360,18 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
330360 */
331361static inline void arch_read_lock (arch_rwlock_t * rw )
332362{
333- unsigned long tmp , tmp2 ;
363+ unsigned long tmp , tmp2 , fixup = msm_krait_need_wfe_fixup ;
334364
335365 __asm__ __volatile__(
336366"1: ldrex %[tmp], [%[lock]]\n"
337367" adds %[tmp], %[tmp], #1\n"
338368" strexpl %[tmp2], %[tmp], [%[lock]]\n"
339369" bpl 2f\n"
340- WFE ( )
370+ WFE_SAFE ( "%[fixup]" , "%[tmp]" )
341371"2:\n"
342372" rsbpls %[tmp], %[tmp2], #0\n"
343373" bmi 1b"
344- : [tmp ] "= & r " (tmp), [tmp2] " = & r " (tmp2)
374+ : [tmp ] "= & r " (tmp), [tmp2] " = & r " (tmp2), [fixup] " + r " (fixup)
345375 : [lock ] "r" (& rw -> lock )
346376 : "cc ");
347377
0 commit comments