[rtems commit] smp: Add PowerPC support

Sebastian Huber sebh at rtems.org
Fri May 31 13:15:45 UTC 2013


Module:    rtems
Branch:    master
Commit:    ffbeb6f6a382d3a7d2d406f4496d0259b7364854
Changeset: http://git.rtems.org/rtems/commit/?id=ffbeb6f6a382d3a7d2d406f4496d0259b7364854

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Fri Jan 18 09:42:49 2013 +0100

smp: Add PowerPC support

---

 aclocal/enable-smp.m4                              |    2 +-
 c/src/aclocal/enable-smp.m4                        |    2 +-
 .../bspsupport/ppc_exc_async_normal.S              |   23 +++++
 cpukit/aclocal/enable-smp.m4                       |    2 +-
 cpukit/score/cpu/powerpc/Makefile.am               |    1 +
 cpukit/score/cpu/powerpc/preinstall.am             |    4 +
 cpukit/score/cpu/powerpc/rtems/score/cpu.h         |   17 ++++-
 cpukit/score/cpu/powerpc/rtems/score/cpusmplock.h  |   95 ++++++++++++++++++++
 8 files changed, 142 insertions(+), 4 deletions(-)

diff --git a/aclocal/enable-smp.m4 b/aclocal/enable-smp.m4
index b290ac3..d2a3565 100644
--- a/aclocal/enable-smp.m4
+++ b/aclocal/enable-smp.m4
@@ -6,7 +6,7 @@ AC_ARG_ENABLE(smp,
 [AS_HELP_STRING([--enable-smp],[enable smp interface])],
 [case "${enableval}" in 
   yes) case "${RTEMS_CPU}" in
-         sparc|i386) RTEMS_HAS_SMP=yes ;;
+         powerpc|sparc|i386) RTEMS_HAS_SMP=yes ;;
          *)          RTEMS_HAS_SMP=no ;;
        esac
        ;;
diff --git a/c/src/aclocal/enable-smp.m4 b/c/src/aclocal/enable-smp.m4
index b290ac3..d2a3565 100644
--- a/c/src/aclocal/enable-smp.m4
+++ b/c/src/aclocal/enable-smp.m4
@@ -6,7 +6,7 @@ AC_ARG_ENABLE(smp,
 [AS_HELP_STRING([--enable-smp],[enable smp interface])],
 [case "${enableval}" in 
   yes) case "${RTEMS_CPU}" in
-         sparc|i386) RTEMS_HAS_SMP=yes ;;
+         powerpc|sparc|i386) RTEMS_HAS_SMP=yes ;;
          *)          RTEMS_HAS_SMP=no ;;
        esac
        ;;
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index f123166..399c227 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -152,6 +152,7 @@ ppc_exc_wrap_async_normal:
 	evstdd	SCRATCH_1_REGISTER, PPC_EXC_ACC_OFFSET(r1)
 #endif
 
+#ifndef RTEMS_SMP
 	/* Increment ISR nest level and thread dispatch disable level */
 	cmpwi	ISR_NEST_REGISTER, 0
 	addi	ISR_NEST_REGISTER, ISR_NEST_REGISTER, 1
@@ -196,6 +197,28 @@ ppc_exc_wrap_async_normal:
 	subic.	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_REGISTER, 1
 	stw	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
 	stw	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
+#else /* RTEMS_SMP */
+	/* ISR Enter */
+	bl	_ISR_SMP_Enter
+	cmpwi	r3, 0
+
+	/* Switch stack if necessary */
+	mfspr	SCRATCH_0_REGISTER, SPRG1
+	iselgt	r1, r1, SCRATCH_0_REGISTER
+
+	bl      bsp_interrupt_dispatch
+
+	/*
+	 * Switch back to original stack (FRAME_REGISTER == r1 if we are still
+	 * on the IRQ stack) and restore FRAME_REGISTER.
+	 */
+	mr	r1, FRAME_REGISTER
+	lwz	FRAME_REGISTER, FRAME_OFFSET(r1)
+
+	/* ISR Leave */
+	bl	_ISR_SMP_Exit
+	cmpwi	r3, 1
+#endif /* RTEMS_SMP */
 
 	/* Call thread dispatcher if necessary */
 	bne	thread_dispatching_done
diff --git a/cpukit/aclocal/enable-smp.m4 b/cpukit/aclocal/enable-smp.m4
index b290ac3..d2a3565 100644
--- a/cpukit/aclocal/enable-smp.m4
+++ b/cpukit/aclocal/enable-smp.m4
@@ -6,7 +6,7 @@ AC_ARG_ENABLE(smp,
 [AS_HELP_STRING([--enable-smp],[enable smp interface])],
 [case "${enableval}" in 
   yes) case "${RTEMS_CPU}" in
-         sparc|i386) RTEMS_HAS_SMP=yes ;;
+         powerpc|sparc|i386) RTEMS_HAS_SMP=yes ;;
          *)          RTEMS_HAS_SMP=no ;;
        esac
        ;;
diff --git a/cpukit/score/cpu/powerpc/Makefile.am b/cpukit/score/cpu/powerpc/Makefile.am
index b205762..f5664a3 100644
--- a/cpukit/score/cpu/powerpc/Makefile.am
+++ b/cpukit/score/cpu/powerpc/Makefile.am
@@ -8,6 +8,7 @@ include_rtems_score_HEADERS = rtems/score/powerpc.h
 include_rtems_score_HEADERS += rtems/score/cpu.h
 include_rtems_score_HEADERS += rtems/score/types.h
 include_rtems_score_HEADERS += rtems/score/cpuatomic.h
+include_rtems_score_HEADERS += rtems/score/cpusmplock.h
 
 include_rtems_powerpcdir = $(includedir)/rtems/powerpc
 include_rtems_powerpc_HEADERS = rtems/powerpc/registers.h
diff --git a/cpukit/score/cpu/powerpc/preinstall.am b/cpukit/score/cpu/powerpc/preinstall.am
index 3293498..1d7fd8b 100644
--- a/cpukit/score/cpu/powerpc/preinstall.am
+++ b/cpukit/score/cpu/powerpc/preinstall.am
@@ -43,6 +43,10 @@ $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h: rtems/score/cpuatomic.h $(PROJECT_IN
 	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h
 PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpuatomic.h
 
+$(PROJECT_INCLUDE)/rtems/score/cpusmplock.h: rtems/score/cpusmplock.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpusmplock.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpusmplock.h
+
 $(PROJECT_INCLUDE)/rtems/powerpc/$(dirstamp):
 	@$(MKDIR_P) $(PROJECT_INCLUDE)/rtems/powerpc
 	@: > $(PROJECT_INCLUDE)/rtems/powerpc/$(dirstamp)
diff --git a/cpukit/score/cpu/powerpc/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
index 080c5bb..fdb2886 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
@@ -472,7 +472,7 @@ typedef struct CPU_Interrupt_frame {
  *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
  */
 
-#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
+#define CPU_ALLOCATE_INTERRUPT_STACK TRUE
 
 /*
  *  Does the RTEMS invoke the user's ISR with the vector number and
@@ -996,6 +996,21 @@ void _CPU_Context_volatile_clobber( uintptr_t pattern );
 
 void _CPU_Context_validate( uintptr_t pattern );
 
+#ifdef RTEMS_SMP
+  #define _CPU_Context_switch_to_first_task_smp( _context ) \
+    _CPU_Context_restore( _context )
+
+  static inline void _CPU_Processor_event_broadcast( void )
+  {
+    __asm__ volatile ( "" : : : "memory" );
+  }
+
+  static inline void _CPU_Processor_event_receive( void )
+  {
+    __asm__ volatile ( "" : : : "memory" );
+  }
+#endif
+
 typedef struct {
   uint32_t EXC_SRR0;
   uint32_t EXC_SRR1;
diff --git a/cpukit/score/cpu/powerpc/rtems/score/cpusmplock.h b/cpukit/score/cpu/powerpc/rtems/score/cpusmplock.h
new file mode 100644
index 0000000..f5ff962
--- /dev/null
+++ b/cpukit/score/cpu/powerpc/rtems/score/cpusmplock.h
@@ -0,0 +1,95 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLockPowerPC
+ *
+ * @brief PowerPC SMP Lock Implementation
+ */
+
+/*
+ * Copyright (c) 2013 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_POWERPC_SMPLOCK_H
+#define _RTEMS_SCORE_POWERPC_SMPLOCK_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreSMPLockPowerPC PowerPC SMP Locks
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * A ticket lock implementation is used.
+ *
+ * @{
+ */
+
+typedef struct {
+  uint32_t next_ticket;
+  uint32_t now_serving;
+} CPU_SMP_lock_Control;
+
+#define CPU_SMP_LOCK_INITIALIZER { 0, 0 }
+
+static inline void _CPU_SMP_lock_Initialize( CPU_SMP_lock_Control *lock )
+{
+  lock->next_ticket = 0;
+  lock->now_serving = 0;
+}
+
+static inline void _CPU_SMP_lock_Acquire( CPU_SMP_lock_Control *lock )
+{
+  uint32_t my_ticket;
+  uint32_t next_ticket;
+
+  __asm__ volatile (
+    "1: lwarx %[my_ticket], 0, %[next_ticket_addr]\n"
+    "addi %[next_ticket], %[my_ticket], 1\n"
+    "stwcx. %[next_ticket], 0, [%[next_ticket_addr]]\n"
+    "bne 1b\n"
+    "isync"
+    : [my_ticket] "=&r" (my_ticket),
+      [next_ticket] "=&r" (next_ticket)
+    : [next_ticket_addr] "r" (&lock->next_ticket)
+    : "cc", "memory"
+  );
+
+  while ( my_ticket != lock->now_serving ) {
+    __asm__ volatile ( "" : : : "memory" );
+  }
+}
+
+static inline void _CPU_SMP_lock_Release( CPU_SMP_lock_Control *lock )
+{
+  __asm__ volatile ( "msync" : : : "memory" );
+  ++lock->now_serving;
+}
+
+#define _CPU_SMP_lock_ISR_disable_and_acquire( lock, isr_cookie ) \
+  do { \
+    _CPU_ISR_Disable( isr_cookie ); \
+    _CPU_SMP_lock_Acquire( lock ); \
+  } while (0)
+
+#define _CPU_SMP_lock_Release_and_ISR_enable( lock, isr_cookie ) \
+  do { \
+    _CPU_SMP_lock_Release( lock ); \
+    _CPU_ISR_Enable( isr_cookie ); \
+  } while (0)
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_POWERPC_SMPLOCK_H */




More information about the vc mailing list