[rtems commit] Fix interrupt epilogue for ARMv7-AR and PowerPC
Sebastian Huber
sebh at rtems.org
Tue Nov 17 06:49:11 UTC 2015
Module: rtems
Branch: 4.11
Commit: 0c9bf40b89cd6763f9ec5d913d440c8b0074a092
Changeset: http://git.rtems.org/rtems/commit/?id=0c9bf40b89cd6763f9ec5d913d440c8b0074a092
Author: Sebastian Huber <sebastian.huber at embedded-brains.de>
Date: Wed Nov 11 11:49:45 2015 +0100
Fix interrupt epilogue for ARMv7-AR and PowerPC
Close #2470.
---
.../bspsupport/ppc_exc_async_normal.S | 51 ++++++++++++++++------
cpukit/score/cpu/arm/arm_exc_interrupt.S | 27 ++++++++++++
2 files changed, 65 insertions(+), 13 deletions(-)
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index 59e621f..0e71dad 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -29,29 +29,29 @@
#define SCRATCH_5_REGISTER r12
#define FRAME_REGISTER r14
-#define VECTOR_OFFSET(reg) GPR4_OFFSET(reg)
-#define SELF_CPU_OFFSET(reg) GPR5_OFFSET(reg)
-#define ISR_NEST_OFFSET(reg) GPR6_OFFSET(reg)
-#define DISPATCH_LEVEL_OFFSET(reg) GPR7_OFFSET(reg)
-#define HANDLER_OFFSET(reg) GPR8_OFFSET(reg)
-#define SCRATCH_0_OFFSET(reg) GPR0_OFFSET(reg)
-#define SCRATCH_1_OFFSET(reg) GPR3_OFFSET(reg)
-#define SCRATCH_2_OFFSET(reg) GPR9_OFFSET(reg)
-#define SCRATCH_3_OFFSET(reg) GPR10_OFFSET(reg)
-#define SCRATCH_4_OFFSET(reg) GPR11_OFFSET(reg)
-#define SCRATCH_5_OFFSET(reg) GPR12_OFFSET(reg)
+#define VECTOR_OFFSET GPR4_OFFSET
+#define SELF_CPU_OFFSET GPR5_OFFSET
+#define ISR_NEST_OFFSET GPR6_OFFSET
+#define DISPATCH_LEVEL_OFFSET GPR7_OFFSET
+#define HANDLER_OFFSET GPR8_OFFSET
+#define SCRATCH_0_OFFSET GPR0_OFFSET
+#define SCRATCH_1_OFFSET GPR3_OFFSET
+#define SCRATCH_2_OFFSET GPR9_OFFSET
+#define SCRATCH_3_OFFSET GPR10_OFFSET
+#define SCRATCH_4_OFFSET GPR11_OFFSET
+#define SCRATCH_5_OFFSET GPR12_OFFSET
/*
* The register 2 slot is free, since this is the read-only small data anchor.
*/
-#define FRAME_OFFSET(reg) GPR2_OFFSET(reg)
+#define FRAME_OFFSET GPR2_OFFSET
#ifdef RTEMS_PROFILING
/*
* The PPC_EXC_MINIMAL_FRAME_SIZE is enough to store this additional register.
*/
#define ENTRY_INSTANT_REGISTER r15
-#define ENTRY_INSTANT_OFFSET(reg) GPR13_OFFSET(reg)
+#define ENTRY_INSTANT_OFFSET GPR13_OFFSET
.macro GET_TIME_BASE REG
#ifdef ppc8540
@@ -399,6 +399,31 @@ thread_dispatching_done:
evldd HANDLER_REGISTER, PPC_EXC_ACC_OFFSET(r1)
#endif
+ /*
+ * We must clear reservations here, since otherwise compare-and-swap
+ * atomic operations with interrupts enabled may yield wrong results.
+ * A compare-and-swap atomic operation is generated by the compiler
+ * like this:
+ *
+ * .L1:
+ * lwarx r9, r0, r3
+ * cmpw r9, r4
+ * bne- .L2
+ * stwcx. r5, r0, r3
+ * bne- .L1
+ * .L2:
+ *
+ * Consider the following scenario. A thread is interrupted right
+ * before the stwcx. The interrupt updates the value using a
+ * compare-and-swap sequence. Everything is fine up to this point.
+ * The interrupt performs now a compare-and-swap sequence which fails
+ * with a branch to .L2. The current processor has now a reservation.
+ * The interrupt returns without further stwcx. The thread updates the
+ * value using the unrelated reservation of the interrupt.
+ */
+ li SCRATCH_0_REGISTER, FRAME_OFFSET
+ stwcx. SCRATCH_0_REGISTER, r1, SCRATCH_0_REGISTER
+
/* Load SRR0, SRR1, CR, CTR, XER, and LR */
lwz SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
lwz SCRATCH_1_REGISTER, SRR1_FRAME_OFFSET(r1)
diff --git a/cpukit/score/cpu/arm/arm_exc_interrupt.S b/cpukit/score/cpu/arm/arm_exc_interrupt.S
index 7930c32..fcb1510 100644
--- a/cpukit/score/cpu/arm/arm_exc_interrupt.S
+++ b/cpukit/score/cpu/arm/arm_exc_interrupt.S
@@ -209,6 +209,33 @@ thread_dispatch_done:
/* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
ldmia sp!, {EXCHANGE_LR, EXCHANGE_SPSR}
+#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
+ /*
+ * We must clear reservations here, since otherwise compare-and-swap
+ * atomic operations with interrupts enabled may yield wrong results.
+ * A compare-and-swap atomic operation is generated by the compiler
+ * like this:
+ *
+ * .L1:
+ * ldrex r1, [r0]
+ * cmp r1, r3
+ * bne .L2
+ * strex r3, r2, [r0]
+ * cmp r3, #0
+ * bne .L1
+ * .L2:
+ *
+ * Consider the following scenario. A thread is interrupted right
+ * before the strex. The interrupt updates the value using a
+ * compare-and-swap sequence. Everything is fine up to this point.
+ * The interrupt performs now a compare-and-swap sequence which fails
+ * with a branch to .L2. The current processor has now a reservation.
+ * The interrupt returns without further strex. The thread updates the
+ * value using the unrelated reservation of the interrupt.
+ */
+ clrex
+#endif
+
/* Return from interrupt */
subs pc, lr, #4
More information about the vc
mailing list