change log for rtems (2011-07-21)

rtems-vc at rtems.org rtems-vc at rtems.org
Thu Jul 21 15:11:01 UTC 2011


 *sh*:
2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>

	* rtems/score/cpu.h: Added SPE support to CPU context.

M  1.166  cpukit/score/cpu/powerpc/ChangeLog
M   1.46  cpukit/score/cpu/powerpc/rtems/score/cpu.h

diff -u rtems/cpukit/score/cpu/powerpc/ChangeLog:1.165 rtems/cpukit/score/cpu/powerpc/ChangeLog:1.166
--- rtems/cpukit/score/cpu/powerpc/ChangeLog:1.165	Tue May 17 09:53:12 2011
+++ rtems/cpukit/score/cpu/powerpc/ChangeLog	Thu Jul 21 09:49:47 2011
@@ -1,3 +1,7 @@
+2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>
+
+	* rtems/score/cpu.h: Added SPE support to CPU context.
+
 2011-05-17	Ralf Corsépius <ralf.corsepius at rtems.org>
 
 	* Makefile.am: Reformat.

diff -u rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h:1.45 rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h:1.46
--- rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h:1.45	Fri Feb 11 03:24:09 2011
+++ rtems/cpukit/score/cpu/powerpc/rtems/score/cpu.h	Thu Jul 21 09:49:47 2011
@@ -23,7 +23,7 @@
  *
  *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
  *
- *  Copyright (c) 2010 embedded brains GmbH.
+ *  Copyright (c) 2010-2011 embedded brains GmbH.
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -252,6 +252,7 @@
 #ifndef ASM
 
 typedef struct {
+  #ifndef __SPE__
     uint32_t   gpr1;	/* Stack pointer for all */
     uint32_t   gpr2;	/* Reserved SVR4, section ptr EABI + */
     uint32_t   gpr13;	/* Section ptr SVR4/EABI */
@@ -276,24 +277,105 @@
     uint32_t   cr;	/* PART of the CR is non volatile for all */
     uint32_t   pc;	/* Program counter/Link register */
     uint32_t   msr;	/* Initial interrupt level */
-#ifdef __ALTIVEC__
-	/* 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
-	 * and padding to ensure cache-alignment.
-	 * Unfortunately, we can't verify the cache line size here
-	 * in the cpukit but altivec support code will produce an
-	 * error if this is ever different from 32 bytes.
-	 * 
-	 * Note: it is the BSP/CPU-support's responsibility to
-	 *       save/restore volatile vregs across interrupts
-	 *       and exceptions.
-	 */
-	uint8_t    altivec[16*12 + 32 + 32];
-#endif
+    #ifdef __ALTIVEC__
+      /*
+       * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
+       * and padding to ensure cache-alignment.  Unfortunately, we can't verify
+       * the cache line size here in the cpukit but altivec support code will
+       * produce an error if this is ever different from 32 bytes.
+       * 
+       * Note: it is the BSP/CPU-support's responsibility to save/restore
+       *       volatile vregs across interrupts and exceptions.
+       */
+      uint8_t altivec[16*12 + 32 + 32];
+    #endif
+  #else
+    /* Non-volatile context according to E500ABIUG and EABI */
+    uint32_t context [
+      8 /* Cache line padding */
+      + 1 /* Stack pointer */
+      + 1 /* MSR */
+      + 1 /* LR */
+      + 1 /* CR */
+      + 18 * 2 /* GPR 14 to GPR 31 */
+    ];
+  #endif
 } Context_Control;
+#endif /* ASM */
 
-#define _CPU_Context_Get_SP( _context ) \
-  (_context)->gpr1
+#ifndef __SPE__
+  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
+    do { \
+      (_context)->gpr1 = _sp; \
+    } while (0)
+
+  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
+    do { \
+      (_context)->msr = _msr; \
+    } while (0)
+
+  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
+    do { \
+      (_context)->pc = _pc; \
+    } while (0)
+
+  #define _CPU_Context_Get_SP( _context ) \
+    (_context)->gpr1
+#else
+  #define PPC_CONTEXT_CACHE_LINE_0 32
+  #define PPC_CONTEXT_OFFSET_SP 32
+  #define PPC_CONTEXT_OFFSET_MSR 36
+  #define PPC_CONTEXT_OFFSET_LR 40
+  #define PPC_CONTEXT_OFFSET_CR 44
+  #define PPC_CONTEXT_OFFSET_GPR14 48
+  #define PPC_CONTEXT_OFFSET_GPR15 56
+  #define PPC_CONTEXT_CACHE_LINE_1 64
+  #define PPC_CONTEXT_OFFSET_GPR16 64
+  #define PPC_CONTEXT_OFFSET_GPR17 72
+  #define PPC_CONTEXT_OFFSET_GPR18 80
+  #define PPC_CONTEXT_OFFSET_GPR19 88
+  #define PPC_CONTEXT_CACHE_LINE_2 96
+  #define PPC_CONTEXT_OFFSET_GPR20 96
+  #define PPC_CONTEXT_OFFSET_GPR21 104
+  #define PPC_CONTEXT_OFFSET_GPR22 112
+  #define PPC_CONTEXT_OFFSET_GPR23 120
+  #define PPC_CONTEXT_CACHE_LINE_3 128
+  #define PPC_CONTEXT_OFFSET_GPR24 128
+  #define PPC_CONTEXT_OFFSET_GPR25 136
+  #define PPC_CONTEXT_OFFSET_GPR26 144
+  #define PPC_CONTEXT_OFFSET_GPR27 152
+  #define PPC_CONTEXT_CACHE_LINE_4 160
+  #define PPC_CONTEXT_OFFSET_GPR28 160
+  #define PPC_CONTEXT_OFFSET_GPR29 168
+  #define PPC_CONTEXT_OFFSET_GPR30 176
+  #define PPC_CONTEXT_OFFSET_GPR31 184
+
+  #define PPC_CONTEXT_AREA( _context ) \
+    ((uint32_t *) (((uintptr_t) (_context)) & ~0x1fU))
+
+  #define PPC_CONTEXT_FIELD( _context, _offset ) \
+    PPC_CONTEXT_AREA( _context ) [(_offset) / 4]
+
+  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
+    do { \
+      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP ) = _sp; \
+    } while (0)
+
+  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
+    do { \
+      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_MSR ) = _msr; \
+    } while (0)
+
+  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
+    do { \
+      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_LR ) = _pc; \
+    } while (0)
 
+  #define _CPU_Context_Get_SP( _context ) \
+    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP )
+#endif
+
+#ifndef ASM
 typedef struct {
     /* The ABIs (PowerOpen/SVR4/EABI) only require saving f14-f31 over
      * procedure calls.  However, this would mean that the interrupt


 *sh*:
2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>

	PR 1799/bsps
	* new-exceptions/bspsupport/ppc_exc_async_normal.S: New file.
	* new-exceptions/cpu.c, new-exceptions/cpu_asm.S,
	new-exceptions/bspsupport/ppc_exc_asm_macros.h,
	new-exceptions/bspsupport/ppc_exc_global_handler.c,
	new-exceptions/bspsupport/ppc_exc_prologue.c,
	new-exceptions/bspsupport/vectors.h: Added support for SPE.
	* configure.ac, preinstall.am, Makefile.am: Added support for qoriq
	BSPs.

M  1.389  c/src/lib/libcpu/powerpc/ChangeLog
M   1.66  c/src/lib/libcpu/powerpc/Makefile.am
M   1.59  c/src/lib/libcpu/powerpc/configure.ac
M   1.14  c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h
A    1.1  c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
M    1.3  c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_global_handler.c
M    1.6  c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_prologue.c
M   1.11  c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
M   1.29  c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
M   1.16  c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
M   1.28  c/src/lib/libcpu/powerpc/preinstall.am

diff -u rtems/c/src/lib/libcpu/powerpc/ChangeLog:1.388 rtems/c/src/lib/libcpu/powerpc/ChangeLog:1.389
--- rtems/c/src/lib/libcpu/powerpc/ChangeLog:1.388	Sat Jun 18 02:17:04 2011
+++ rtems/c/src/lib/libcpu/powerpc/ChangeLog	Thu Jul 21 10:03:31 2011
@@ -1,3 +1,15 @@
+2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>
+
+	PR 1799/bsps
+	* new-exceptions/bspsupport/ppc_exc_async_normal.S: New file.
+	* new-exceptions/cpu.c, new-exceptions/cpu_asm.S,
+	new-exceptions/bspsupport/ppc_exc_asm_macros.h,
+	new-exceptions/bspsupport/ppc_exc_global_handler.c,
+	new-exceptions/bspsupport/ppc_exc_prologue.c,
+	new-exceptions/bspsupport/vectors.h: Added support for SPE.
+	* configure.ac, preinstall.am, Makefile.am: Added support for qoriq
+	BSPs.
+
 2011-06-18	Ralf Corsépius <ralf.corsepius at rtems.org>
 
 	* Makefile.am: Remove reference to non-existing file

diff -u rtems/c/src/lib/libcpu/powerpc/Makefile.am:1.65 rtems/c/src/lib/libcpu/powerpc/Makefile.am:1.66
--- rtems/c/src/lib/libcpu/powerpc/Makefile.am:1.65	Sat Jun 18 02:17:04 2011
+++ rtems/c/src/lib/libcpu/powerpc/Makefile.am	Thu Jul 21 10:03:31 2011
@@ -35,6 +35,7 @@
 new_exceptions_exc_bspsupport_rel_SOURCES = \
     new-exceptions/bspsupport/ppc-code-copy.c \
     new-exceptions/bspsupport/ppc_exc.S \
+    new-exceptions/bspsupport/ppc_exc_async_normal.S \
     new-exceptions/bspsupport/ppc_exc_naked.S \
     new-exceptions/bspsupport/ppc_exc_hdl.c \
     new-exceptions/bspsupport/ppc_exc_initialize.c \
@@ -475,5 +476,24 @@
 # END: MPC55XX                                                               #
 ##############################################################################
 
+##############################################################################
+# START: QorIQ                                                               #
+##############################################################################
+if qoriq
+
+# Network
+include_bsp_HEADERS += mpc83xx/network/tsec.h
+if HAS_NETWORKING
+noinst_PROGRAMS += tsec.rel
+tsec_rel_SOURCES = mpc83xx/network/tsec.c
+tsec_rel_CPPFLAGS = -D__INSIDE_RTEMS_BSD_TCPIP_STACK__ -D__BSD_VISIBLE
+tsec_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
+endif
+
+endif
+##############################################################################
+# END: QorIQ                                                                 #
+##############################################################################
+
 include $(srcdir)/preinstall.am
 include $(top_srcdir)/../../../automake/local.am

diff -u rtems/c/src/lib/libcpu/powerpc/configure.ac:1.58 rtems/c/src/lib/libcpu/powerpc/configure.ac:1.59
--- rtems/c/src/lib/libcpu/powerpc/configure.ac:1.58	Wed Feb  2 09:01:00 2011
+++ rtems/c/src/lib/libcpu/powerpc/configure.ac	Thu Jul 21 10:03:31 2011
@@ -50,6 +50,7 @@
 || test "$RTEMS_CPU_MODEL" = "mpc8245" \
 || test "$RTEMS_CPU_MODEL" = "mpc8260" \
 || test "$RTEMS_CPU_MODEL" = "mpc83xx" \
+|| test "$RTEMS_CPU_MODEL" = "qoriq" \
 || test "$RTEMS_CPU_MODEL" = "e500")
 
 # test on CPU type
@@ -71,13 +72,15 @@
 || test "$RTEMS_CPU_MODEL" = "mpc860" )
 AM_CONDITIONAL(mpc8260, test "$RTEMS_CPU_MODEL" = "mpc8260")
 AM_CONDITIONAL(mpc83xx, test "$RTEMS_CPU_MODEL" = "mpc83xx")
+AM_CONDITIONAL(qoriq, test "$RTEMS_CPU_MODEL" = "qoriq")
 
 # the ppc405 shares files with the ppc403
 AM_CONDITIONAL(ppc403,[test "$RTEMS_CPU_MODEL" = "ppc403" \
 || test "$RTEMS_CPU_MODEL" = "ppc405"])
 AM_CONDITIONAL(ppc405, test "$RTEMS_CPU_MODEL" = "ppc405")
 
-AM_CONDITIONAL(e500, test "$RTEMS_CPU_MODEL" = "e500")
+AM_CONDITIONAL(e500, test "$RTEMS_CPU_MODEL" = "e500" \
+|| test "$RTEMS_CPU_MODEL" = "qoriq" )
 
 RTEMS_CHECK_NETWORKING
 AM_CONDITIONAL(HAS_NETWORKING,test "$HAS_NETWORKING" = "yes")

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h:1.13 rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h:1.14
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h:1.13	Mon Jan 31 10:12:24 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_asm_macros.h	Thu Jul 21 10:03:31 2011
@@ -758,7 +758,27 @@
 	/* r14 is the FRAME_REGISTER and will be saved elsewhere */
 
 	/* Save non-volatile registers r15 .. r31 */
+#ifndef __SPE__
 	stmw	r15, GPR15_OFFSET(FRAME_REGISTER)
+#else
+	stw	r15, GPR15_OFFSET(FRAME_REGISTER)
+	stw	r16, GPR16_OFFSET(FRAME_REGISTER)
+	stw	r17, GPR17_OFFSET(FRAME_REGISTER)
+	stw	r18, GPR18_OFFSET(FRAME_REGISTER)
+	stw	r19, GPR19_OFFSET(FRAME_REGISTER)
+	stw	r20, GPR20_OFFSET(FRAME_REGISTER)
+	stw	r21, GPR21_OFFSET(FRAME_REGISTER)
+	stw	r22, GPR22_OFFSET(FRAME_REGISTER)
+	stw	r23, GPR23_OFFSET(FRAME_REGISTER)
+	stw	r24, GPR24_OFFSET(FRAME_REGISTER)
+	stw	r25, GPR25_OFFSET(FRAME_REGISTER)
+	stw	r26, GPR26_OFFSET(FRAME_REGISTER)
+	stw	r27, GPR27_OFFSET(FRAME_REGISTER)
+	stw	r28, GPR28_OFFSET(FRAME_REGISTER)
+	stw	r29, GPR29_OFFSET(FRAME_REGISTER)
+	stw	r30, GPR30_OFFSET(FRAME_REGISTER)
+	stw	r31, GPR31_OFFSET(FRAME_REGISTER)
+#endif
 
 	b	wrap_disable_thread_dispatching_done_\_FLVR
 
@@ -773,7 +793,27 @@
 	/* r14 is the FRAME_REGISTER and will be restored elsewhere */
 
 	/* Restore non-volatile registers r15 .. r31 */
+#ifndef __SPE__
 	lmw	r15, GPR15_OFFSET(r1)
+#else
+	lwz	r15, GPR15_OFFSET(FRAME_REGISTER)
+	lwz	r16, GPR16_OFFSET(FRAME_REGISTER)
+	lwz	r17, GPR17_OFFSET(FRAME_REGISTER)
+	lwz	r18, GPR18_OFFSET(FRAME_REGISTER)
+	lwz	r19, GPR19_OFFSET(FRAME_REGISTER)
+	lwz	r20, GPR20_OFFSET(FRAME_REGISTER)
+	lwz	r21, GPR21_OFFSET(FRAME_REGISTER)
+	lwz	r22, GPR22_OFFSET(FRAME_REGISTER)
+	lwz	r23, GPR23_OFFSET(FRAME_REGISTER)
+	lwz	r24, GPR24_OFFSET(FRAME_REGISTER)
+	lwz	r25, GPR25_OFFSET(FRAME_REGISTER)
+	lwz	r26, GPR26_OFFSET(FRAME_REGISTER)
+	lwz	r27, GPR27_OFFSET(FRAME_REGISTER)
+	lwz	r28, GPR28_OFFSET(FRAME_REGISTER)
+	lwz	r29, GPR29_OFFSET(FRAME_REGISTER)
+	lwz	r30, GPR30_OFFSET(FRAME_REGISTER)
+	lwz	r31, GPR31_OFFSET(FRAME_REGISTER)
+#endif
 
 	/* Restore stack pointer */
 	stw	SCRATCH_REGISTER_0, 0(r1)

diff -u /dev/null rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S:1.1
--- /dev/null	Thu Jul 21 10:11:00 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S	Thu Jul 21 10:03:31 2011
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2011 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Obere Lagerstr. 30
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ *
+ * $Id$
+ */
+
+#include <bspopts.h>
+#include <rtems/score/percpu.h>
+#include <bsp/vectors.h>
+
+#define VECTOR_REGISTER r4
+#define ISR_NEST_HADDR_REGISTER r5
+#define ISR_NEST_REGISTER r6
+#define DISPATCH_LEVEL_REGISTER r7
+#define HANDLER_REGISTER r8
+#define SCRATCH_0_REGISTER r0
+#define SCRATCH_1_REGISTER r3
+#define SCRATCH_2_REGISTER r9
+#define SCRATCH_3_REGISTER r10
+#define SCRATCH_4_REGISTER r11
+#define SCRATCH_5_REGISTER r12
+#define FRAME_REGISTER r14
+
+#define VECTOR_OFFSET(reg) GPR4_OFFSET(reg)
+#define ISR_NEST_HADDR_OFFSET(reg) GPR5_OFFSET(reg)
+#define ISR_NEST_OFFSET(reg) GPR6_OFFSET(reg)
+#define DISPATCH_LEVEL_OFFSET(reg) GPR7_OFFSET(reg)
+#define HANDLER_OFFSET(reg) GPR8_OFFSET(reg)
+#define SCRATCH_0_OFFSET(reg) GPR0_OFFSET(reg)
+#define SCRATCH_1_OFFSET(reg) GPR3_OFFSET(reg)
+#define SCRATCH_2_OFFSET(reg) GPR9_OFFSET(reg)
+#define SCRATCH_3_OFFSET(reg) GPR10_OFFSET(reg)
+#define SCRATCH_4_OFFSET(reg) GPR11_OFFSET(reg)
+#define SCRATCH_5_OFFSET(reg) GPR12_OFFSET(reg)
+
+/*
+ * The register 2 slot is free, since this is the read-only small data anchor.
+ */
+#define FRAME_OFFSET(reg) GPR2_OFFSET(reg)
+
+	.global	ppc_exc_min_prolog_async_tmpl_normal
+	.global ppc_exc_wrap_async_normal
+
+ppc_exc_min_prolog_async_tmpl_normal:
+
+	stwu	r1, -PPC_EXC_MINIMAL_FRAME_SIZE(r1)
+	stw	VECTOR_REGISTER, PPC_EXC_VECTOR_PROLOGUE_OFFSET(r1)
+	li	VECTOR_REGISTER, 0xffff8000
+
+	/*
+	 * We store the absolute branch target address here.  It will be used
+	 * to generate the branch operation in ppc_exc_make_prologue().
+	 */
+	.int	ppc_exc_wrap_async_normal
+
+ppc_exc_wrap_async_normal:
+
+	/* Save non-volatile FRAME_REGISTER */
+	stw	FRAME_REGISTER, FRAME_OFFSET(r1)
+
+#ifdef __SPE__
+	/* Enable SPE */
+	mfmsr	FRAME_REGISTER
+	oris	FRAME_REGISTER, FRAME_REGISTER, MSR_SPE >> 16
+	mtmsr	FRAME_REGISTER
+#endif
+
+	/* Move frame pointer to non-volatile FRAME_REGISTER */
+	mr	FRAME_REGISTER, r1
+
+	/* Load ISR nest level and thread dispatch disable level */
+	PPC_EXC_GPR_STORE	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
+	lis	ISR_NEST_HADDR_REGISTER, ISR_NEST_LEVEL at ha
+	PPC_EXC_GPR_STORE	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
+	lwz	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
+	PPC_EXC_GPR_STORE	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
+	lwz	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
+
+	PPC_EXC_GPR_STORE	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+
+#ifdef __SPE__
+	/*
+	 * Save high order part of VECTOR_REGISTER here.  The low order part
+	 * was saved in the minimal prologue.
+	 */
+	evmergehi	SCRATCH_0_REGISTER, SCRATCH_0_REGISTER, VECTOR_REGISTER
+	stw	SCRATCH_0_REGISTER, VECTOR_OFFSET(r1)
+#endif
+
+	PPC_EXC_GPR_STORE	HANDLER_REGISTER, HANDLER_OFFSET(r1)
+
+	/*
+	 * Load the handler address.  Get the handler table index from the
+	 * vector number.  We have to discard the exception type.  Take only
+	 * the least significant five bits (= LAST_VALID_EXC + 1) from the
+	 * vector register.  Multiply by four (= size of function pointer).
+	 */
+	rlwinm	SCRATCH_0_REGISTER, VECTOR_REGISTER, 2, 25, 29
+	lis	HANDLER_REGISTER, ppc_exc_handler_table at h
+	ori	HANDLER_REGISTER, HANDLER_REGISTER, ppc_exc_handler_table at l
+	lwzx	HANDLER_REGISTER, HANDLER_REGISTER, SCRATCH_0_REGISTER
+
+	PPC_EXC_GPR_STORE	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+	PPC_EXC_GPR_STORE	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+	PPC_EXC_GPR_STORE	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+	PPC_EXC_GPR_STORE	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+	PPC_EXC_GPR_STORE	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+
+	/* Save SRR0, SRR1, CR, CTR, XER, and LR */
+	mfsrr0	SCRATCH_0_REGISTER
+	mfsrr1	SCRATCH_1_REGISTER
+	mfcr	SCRATCH_2_REGISTER
+	mfctr	SCRATCH_3_REGISTER
+	mfxer	SCRATCH_4_REGISTER
+	mflr	SCRATCH_5_REGISTER
+	stw	SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
+	stw	SCRATCH_1_REGISTER, SRR1_FRAME_OFFSET(r1)
+	stw	SCRATCH_2_REGISTER, EXC_CR_OFFSET(r1)
+	stw	SCRATCH_3_REGISTER, EXC_CTR_OFFSET(r1)
+	stw	SCRATCH_4_REGISTER, EXC_XER_OFFSET(r1)
+	stw	SCRATCH_5_REGISTER, EXC_LR_OFFSET(r1)
+
+#ifdef __SPE__
+	/* Save SPEFSCR and ACC */
+	mfspr	SCRATCH_0_REGISTER, FSL_EIS_SPEFSCR
+	evxor	SCRATCH_1_REGISTER, SCRATCH_1_REGISTER, SCRATCH_1_REGISTER
+	evmwumiaa	SCRATCH_1_REGISTER, SCRATCH_1_REGISTER, SCRATCH_1_REGISTER
+	stw	SCRATCH_0_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
+	evstdd	SCRATCH_1_REGISTER, PPC_EXC_ACC_OFFSET(r1)
+#endif
+
+	/* Increment ISR nest level and thread dispatch disable level */
+	cmpwi	ISR_NEST_REGISTER, 0
+	addi	ISR_NEST_REGISTER, ISR_NEST_REGISTER, 1
+	addi	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_REGISTER, 1
+	stw	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
+	stw	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
+
+	/* Switch stack if necessary */
+	mfspr	SCRATCH_0_REGISTER, SPRG1
+	iselgt	r1, r1, SCRATCH_0_REGISTER
+
+	/*
+	 * Call high level exception handler.
+	 *
+	 * First parameter = exception frame pointer + FRAME_LINK_SPACE
+	 * Second parameter = vector number (r4 is the VECTOR_REGISTER)
+	 */
+	addi	r3, FRAME_REGISTER, FRAME_LINK_SPACE
+	rlwinm	VECTOR_REGISTER, VECTOR_REGISTER, 0, 27, 31
+	mtctr	HANDLER_REGISTER
+	bctrl
+
+	/* Load ISR nest level and thread dispatch disable level */
+	lis	ISR_NEST_HADDR_REGISTER, ISR_NEST_LEVEL at ha
+	lwz	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
+	lwz	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
+
+	/*
+	 * Switch back to original stack (FRAME_REGISTER == r1 if we are still
+	 * on the IRQ stack) and restore FRAME_REGISTER.
+	 */
+	mr	r1, FRAME_REGISTER
+	lwz	FRAME_REGISTER, FRAME_OFFSET(r1)
+
+	/* Decrement ISR nest level and thread dispatch disable level */
+	subi	ISR_NEST_REGISTER, ISR_NEST_REGISTER, 1
+	subic.	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_REGISTER, 1
+	stw	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
+	stw	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
+
+	/* Call thread dispatcher if necessary */
+	bne	thread_dispatching_done
+	bl	_Thread_Dispatch
+thread_dispatching_done:
+
+#ifdef __SPE__
+	/* Load SPEFSCR and ACC */
+	lwz	DISPATCH_LEVEL_REGISTER, PPC_EXC_SPEFSCR_OFFSET(r1)
+	evldd	HANDLER_REGISTER, PPC_EXC_ACC_OFFSET(r1)
+#endif
+
+	/* Load SRR0, SRR1, CR, CTR, XER, and LR */
+	lwz	SCRATCH_0_REGISTER, SRR0_FRAME_OFFSET(r1)
+	lwz	SCRATCH_1_REGISTER, SRR1_FRAME_OFFSET(r1)
+	lwz	SCRATCH_2_REGISTER, EXC_CR_OFFSET(r1)
+	lwz	SCRATCH_3_REGISTER, EXC_CTR_OFFSET(r1)
+	lwz	SCRATCH_4_REGISTER, EXC_XER_OFFSET(r1)
+	lwz	SCRATCH_5_REGISTER, EXC_LR_OFFSET(r1)
+
+	PPC_EXC_GPR_LOAD	VECTOR_REGISTER, VECTOR_OFFSET(r1)
+	PPC_EXC_GPR_LOAD	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
+	PPC_EXC_GPR_LOAD	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
+
+#ifdef __SPE__
+	/* Restore SPEFSCR */
+	mtspr	FSL_EIS_SPEFSCR, DISPATCH_LEVEL_REGISTER
+#endif
+	PPC_EXC_GPR_LOAD	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
+
+#ifdef __SPE__
+	/* Restore ACC */
+	evmra	HANDLER_REGISTER, HANDLER_REGISTER
+#endif
+	PPC_EXC_GPR_LOAD	HANDLER_REGISTER, HANDLER_OFFSET(r1)
+
+	/* Restore SRR0, SRR1, CR, CTR, XER, and LR */
+	mtsrr0	SCRATCH_0_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+	mtsrr1	SCRATCH_1_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+	mtcr	SCRATCH_2_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+	mtctr	SCRATCH_3_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+	mtxer	SCRATCH_4_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+	mtlr	SCRATCH_5_REGISTER
+	PPC_EXC_GPR_LOAD	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+
+	/* Pop stack */
+	addi	r1, r1, PPC_EXC_MINIMAL_FRAME_SIZE
+
+	/* Return */
+	rfi

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_global_handler.c:1.2 rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_global_handler.c:1.3
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_global_handler.c:1.2	Fri Jan 28 14:38:13 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_global_handler.c	Thu Jul 21 10:03:31 2011
@@ -21,6 +21,12 @@
 
 #include <bsp/vectors.h>
 
+#ifndef __SPE__
+  #define GET_GPR(gpr) (gpr)
+#else
+  #define GET_GPR(gpr) ((int) ((gpr) >> 32))
+#endif
+
 exception_handler_t globalExceptHdl = C_exception_handler;
 
 /* T. Straumann: provide a stack trace
@@ -62,7 +68,7 @@
   printk("Stack Trace: \n  ");
   if (excPtr) {
     printk("IP: 0x%08x, ", excPtr->EXC_SRR0);
-    sp = (LRFrame) excPtr->GPR1;
+    sp = (LRFrame) GET_GPR(excPtr->GPR1);
     lr = (void *) excPtr->EXC_LR;
   } else {
     /* there's no macro for this */
@@ -133,44 +139,44 @@
 
   /* Dump registers */
 
-  printk("\t R0  = %08x", excPtr->GPR0);
+  printk("\t R0  = %08x", GET_GPR(excPtr->GPR0));
   if (synch) {
-    printk(" R1  = %08x", excPtr->GPR1);
-    printk(" R2  = %08x", excPtr->GPR2);
+    printk(" R1  = %08x", GET_GPR(excPtr->GPR1));
+    printk(" R2  = %08x", GET_GPR(excPtr->GPR2));
   } else {
     printk("               ");
     printk("               ");
   }
-  printk(" R3  = %08x\n", excPtr->GPR3);
-  printk("\t R4  = %08x", excPtr->GPR4);
-  printk(" R5  = %08x", excPtr->GPR5);
-  printk(" R6  = %08x", excPtr->GPR6);
-  printk(" R7  = %08x\n", excPtr->GPR7);
-  printk("\t R8  = %08x", excPtr->GPR8);
-  printk(" R9  = %08x", excPtr->GPR9);
-  printk(" R10 = %08x", excPtr->GPR10);
-  printk(" R11 = %08x\n", excPtr->GPR11);
-  printk("\t R12 = %08x", excPtr->GPR12);
+  printk(" R3  = %08x\n", GET_GPR(excPtr->GPR3));
+  printk("\t R4  = %08x", GET_GPR(excPtr->GPR4));
+  printk(" R5  = %08x", GET_GPR(excPtr->GPR5));
+  printk(" R6  = %08x", GET_GPR(excPtr->GPR6));
+  printk(" R7  = %08x\n", GET_GPR(excPtr->GPR7));
+  printk("\t R8  = %08x", GET_GPR(excPtr->GPR8));
+  printk(" R9  = %08x", GET_GPR(excPtr->GPR9));
+  printk(" R10 = %08x", GET_GPR(excPtr->GPR10));
+  printk(" R11 = %08x\n", GET_GPR(excPtr->GPR11));
+  printk("\t R12 = %08x", GET_GPR(excPtr->GPR12));
   if (synch) {
-    printk(" R13 = %08x", excPtr->GPR13);
-    printk(" R14 = %08x", excPtr->GPR14);
-    printk(" R15 = %08x\n", excPtr->GPR15);
-    printk("\t R16 = %08x", excPtr->GPR16);
-    printk(" R17 = %08x", excPtr->GPR17);
-    printk(" R18 = %08x", excPtr->GPR18);
-    printk(" R19 = %08x\n", excPtr->GPR19);
-    printk("\t R20 = %08x", excPtr->GPR20);
-    printk(" R21 = %08x", excPtr->GPR21);
-    printk(" R22 = %08x", excPtr->GPR22);
-    printk(" R23 = %08x\n", excPtr->GPR23);
-    printk("\t R24 = %08x", excPtr->GPR24);
-    printk(" R25 = %08x", excPtr->GPR25);
-    printk(" R26 = %08x", excPtr->GPR26);
-    printk(" R27 = %08x\n", excPtr->GPR27);
-    printk("\t R28 = %08x", excPtr->GPR28);
-    printk(" R29 = %08x", excPtr->GPR29);
-    printk(" R30 = %08x", excPtr->GPR30);
-    printk(" R31 = %08x\n", excPtr->GPR31);
+    printk(" R13 = %08x", GET_GPR(excPtr->GPR13));
+    printk(" R14 = %08x", GET_GPR(excPtr->GPR14));
+    printk(" R15 = %08x\n", GET_GPR(excPtr->GPR15));
+    printk("\t R16 = %08x", GET_GPR(excPtr->GPR16));
+    printk(" R17 = %08x", GET_GPR(excPtr->GPR17));
+    printk(" R18 = %08x", GET_GPR(excPtr->GPR18));
+    printk(" R19 = %08x\n", GET_GPR(excPtr->GPR19));
+    printk("\t R20 = %08x", GET_GPR(excPtr->GPR20));
+    printk(" R21 = %08x", GET_GPR(excPtr->GPR21));
+    printk(" R22 = %08x", GET_GPR(excPtr->GPR22));
+    printk(" R23 = %08x\n", GET_GPR(excPtr->GPR23));
+    printk("\t R24 = %08x", GET_GPR(excPtr->GPR24));
+    printk(" R25 = %08x", GET_GPR(excPtr->GPR25));
+    printk(" R26 = %08x", GET_GPR(excPtr->GPR26));
+    printk(" R27 = %08x\n", GET_GPR(excPtr->GPR27));
+    printk("\t R28 = %08x", GET_GPR(excPtr->GPR28));
+    printk(" R29 = %08x", GET_GPR(excPtr->GPR29));
+    printk(" R30 = %08x", GET_GPR(excPtr->GPR30));
+    printk(" R31 = %08x\n", GET_GPR(excPtr->GPR31));
   } else {
     printk("\n");
   }

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_prologue.c:1.5 rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_prologue.c:1.6
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_prologue.c:1.5	Mon Jan 31 10:12:24 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_prologue.c	Thu Jul 21 10:03:31 2011
@@ -49,6 +49,7 @@
 extern const uint32_t ppc_exc_min_prolog_sync_tmpl_e500_mchk [];
 extern const uint32_t ppc_exc_min_prolog_async_tmpl_e500_mchk [];
 extern const uint32_t ppc_exc_min_prolog_tmpl_naked [];
+extern const uint32_t ppc_exc_min_prolog_async_tmpl_normal [];
 
 static const uint32_t *const ppc_exc_prologue_templates [] = {
   [PPC_EXC_CLASSIC] = ppc_exc_min_prolog_sync_tmpl_std,
@@ -126,10 +127,18 @@
   } else if (
     category == PPC_EXC_CLASSIC
       && ppc_cpu_is_bookE() != PPC_BOOKE_STD
-        && ppc_cpu_is_bookE() != PPC_BOOKE_E500
+      && ppc_cpu_is_bookE() != PPC_BOOKE_E500
   ) {
     prologue_template = ppc_exc_min_prolog_auto;
     prologue_template_size = (size_t) ppc_exc_min_prolog_size;
+  } else if (
+    category == PPC_EXC_CLASSIC_ASYNC
+      && ppc_cpu_is_bookE() == PPC_BOOKE_E500
+      && (ppc_interrupt_get_disable_mask() & MSR_CE) == 0
+  ) {
+    prologue_template = ppc_exc_min_prolog_async_tmpl_normal;
+    prologue_template_size = (size_t) ppc_exc_min_prolog_size;
+    fixup_vector = true;
   } else {
     prologue_template = ppc_exc_prologue_templates [category];
     prologue_template_size = (size_t) ppc_exc_min_prolog_size;

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h:1.10 rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h:1.11
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h:1.10	Tue Jun  7 08:58:23 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h	Thu Jul 21 10:03:31 2011
@@ -143,11 +143,27 @@
 
 /** @} */
 
-#define PPC_EXC_GPR_TYPE unsigned
-#define PPC_EXC_GPR_SIZE 4
-#define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_EXC_GPR_SIZE + 36)
-#define PPC_EXC_MINIMAL_FRAME_SIZE 96
-#define PPC_EXC_FRAME_SIZE 176
+#ifndef __SPE__
+  #define PPC_EXC_GPR_TYPE unsigned
+  #define PPC_EXC_GPR_SIZE 4
+  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_EXC_GPR_SIZE + 36)
+  #define PPC_EXC_VECTOR_PROLOGUE_OFFSET PPC_EXC_GPR_OFFSET(4)
+  #define PPC_EXC_GPR_LOAD lwz
+  #define PPC_EXC_GPR_STORE stw
+  #define PPC_EXC_MINIMAL_FRAME_SIZE 96
+  #define PPC_EXC_FRAME_SIZE 176
+#else
+  #define PPC_EXC_GPR_TYPE uint64_t
+  #define PPC_EXC_GPR_SIZE 8
+  #define PPC_EXC_SPEFSCR_OFFSET 36
+  #define PPC_EXC_ACC_OFFSET 40
+  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_EXC_GPR_SIZE + 48)
+  #define PPC_EXC_VECTOR_PROLOGUE_OFFSET (PPC_EXC_GPR_OFFSET(4) + 4)
+  #define PPC_EXC_GPR_LOAD evldd
+  #define PPC_EXC_GPR_STORE evstdd
+  #define PPC_EXC_MINIMAL_FRAME_SIZE 160
+  #define PPC_EXC_FRAME_SIZE 320
+#endif
 
 /**
  * @defgroup ppc_exc_frame PowerPC Exception Frame
@@ -250,6 +266,10 @@
   unsigned EXC_CTR;
   unsigned EXC_XER;
   unsigned EXC_LR;
+  #ifdef __SPE__
+    uint32_t EXC_SPEFSCR;
+    uint64_t EXC_ACC;
+  #endif
   PPC_EXC_GPR_TYPE GPR0;
   PPC_EXC_GPR_TYPE GPR1;
   PPC_EXC_GPR_TYPE GPR2;

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c:1.28 rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c:1.29
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c:1.28	Fri Feb 11 03:46:53 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c	Thu Jul 21 10:03:31 2011
@@ -28,6 +28,8 @@
  *  $Id$
  */
 
+#include <string.h>
+
 #include <rtems/system.h>
 #include <rtems/score/isr.h>
 #include <rtems/score/context.h>
@@ -73,7 +75,6 @@
   sp &= ~(CPU_STACK_ALIGNMENT-1);
 
   *((uint32_t*)sp) = 0;
-  the_context->gpr1 = sp;
 
   _CPU_MSR_GET( msr_value );
 
@@ -97,8 +98,6 @@
     msr_value &= ~ppc_interrupt_get_disable_mask();
   }
 
-  the_context->msr = msr_value;
-
   /*
    *  The FP bit of the MSR should only be enabled if this is a floating
    *  point task.  Unfortunately, the vfprintf_r routine in newlib
@@ -119,12 +118,17 @@
    * only way...)
    */
   if ( is_fp )
-    the_context->msr |= PPC_MSR_FP;
+    msr_value |= PPC_MSR_FP;
   else
-    the_context->msr &= ~PPC_MSR_FP;
+    msr_value &= ~PPC_MSR_FP;
+
+  memset( the_context, 0, sizeof( *the_context ) );
 
-  the_context->pc = (uint32_t)entry_point;
+  PPC_CONTEXT_SET_SP( the_context, sp );
+  PPC_CONTEXT_SET_PC( the_context, (uint32_t) entry_point );
+  PPC_CONTEXT_SET_MSR( the_context, msr_value );
 
+#ifndef __SPE__
 #if (PPC_ABI == PPC_ABI_SVR4)
   /*
    * SVR4 says R2 is for 'system-reserved' use; it cannot hurt to
@@ -148,6 +152,7 @@
 #else
 #error unsupported PPC_ABI
 #endif
+#endif /* __SPE__ */
 
 #ifdef __ALTIVEC__
   _CPU_Context_initialize_altivec(the_context);

diff -u rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S:1.15 rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S:1.16
--- rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S:1.15	Tue Jun  7 07:59:39 2011
+++ rtems/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S	Thu Jul 21 10:03:31 2011
@@ -24,6 +24,8 @@
  *  COPYRIGHT (c) 1989-1997.
  *  On-Line Applications Research Corporation (OAR).
  *
+ *  Copyright (c) 2011 embedded brains GmbH.
+ *
  *  The license and distribution terms for this file may in
  *  the file LICENSE in this distribution or at
  *  http://www.rtems.com/license/LICENSE.
@@ -33,7 +35,25 @@
 
 #include <rtems/asm.h>
 #include <rtems/powerpc/powerpc.h>
-#include <rtems/powerpc/registers.h>
+#include <rtems/score/cpu.h>
+#include <bspopts.h>
+
+#if BSP_DATA_CACHE_ENABLED && PPC_CACHE_ALIGNMENT == 32
+  #define DATA_CACHE_ALIGNMENT(reg) \
+    li reg, PPC_CACHE_ALIGNMENT
+  #define DATA_CACHE_ZERO(rega, regb) \
+    dcbz rega, regb
+  #define DATA_CACHE_TOUCH(rega, regb) \
+    dcbt rega, regb
+  #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset) \
+    li reg, offset; dcbz reg, r3; dcbt reg, r4
+#else
+  #define DATA_CACHE_ALIGNMENT(reg)
+  #define DATA_CACHE_ZERO(rega, regb)
+  #define DATA_CACHE_TOUCH(rega, regb)
+  #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset) \
+    li reg, offset
+#endif
 
 /*
  * Offsets for various Contexts
@@ -290,26 +310,26 @@
 	ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
 	PUBLIC_PROC (_CPU_Context_switch)
 PROC (_CPU_Context_switch):
+#ifndef __SPE__
 	sync
 	isync
 	/* This assumes that all the registers are in the given order */
-#if ( BSP_DATA_CACHE_ENABLED )
-#if PPC_CACHE_ALIGNMENT != 32
-#error "code assumes PPC_CACHE_ALIGNMENT == 32!"
-#endif
-	li	r5, PPC_CACHE_ALIGNMENT
-#endif
+	DATA_CACHE_ALIGNMENT(r5)
 	addi	r9,r3,-4
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbz	r5, r9
+	DATA_CACHE_ZERO(r5, r9)
+#ifdef RTEMS_MULTIPROCESSING
+	/*
+	 * We have to clear the reservation of the executing thread.  See also
+	 * Book E section 6.1.6.2 "Atomic Update Primitives".
+	 */
+	li	r10, GP_1 + 4
+	stwcx.	r1, r9, r10
 #endif
 	stw	r1, GP_1+4(r9)
 	stw	r2, GP_2+4(r9)
 #if (PPC_USE_MULTIPLE == 1)
 	addi	r9, r9, GP_18+4
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbz	r5, r9
-#endif
+	DATA_CACHE_ZERO(r5, r9)
 	stmw	r13, GP_13-GP_18(r9)
 #else
 	stw	r13, GP_13+4(r9)
@@ -318,9 +338,7 @@
 	stw	r16, GP_16+4(r9)
 	stw	r17, GP_17+4(r9)
 	stwu	r18, GP_18+4(r9)
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbz	r5, r9
-#endif
+	DATA_CACHE_ZERO(r5, r9)
 	stw	r19, GP_19-GP_18(r9)
 	stw	r20, GP_20-GP_18(r9)
 	stw	r21, GP_21-GP_18(r9)
@@ -335,9 +353,7 @@
 	stw	r30, GP_30-GP_18(r9)
 	stw	r31, GP_31-GP_18(r9)
 #endif
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbt	r0, r4
-#endif
+	DATA_CACHE_TOUCH(r0, r4)
 	mfcr	r6
 	stw	r6, GP_CR-GP_18(r9)
 	mflr	r7
@@ -350,21 +366,15 @@
 	EXTERN_PROC(_CPU_Context_switch_altivec)
 	bl		_CPU_Context_switch_altivec
 	mr      r4, r14
-#if ( BSP_DATA_CACHE_ENABLED )
-	li      r5, PPC_CACHE_ALIGNMENT
-#endif
+	DATA_CACHE_ALIGNMENT(r5)
 #endif
 
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbt	r5, r4
-#endif
+	DATA_CACHE_TOUCH(r5, r4)
 	lwz	r1, GP_1(r4)
 	lwz	r2, GP_2(r4)
 #if (PPC_USE_MULTIPLE == 1)
 	addi	r4, r4, GP_19
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbt	r5, r4
-#endif
+	DATA_CACHE_TOUCH(r5, r4)
 	lmw	r13, GP_13-GP_19(r4)
 #else
 	lwz	r13, GP_13(r4)
@@ -374,9 +384,7 @@
 	lwz	r17, GP_17(r4)
 	lwz	r18, GP_18(r4)
 	lwzu	r19, GP_19(r4)
-#if ( BSP_DATA_CACHE_ENABLED )
-	dcbt	r5, r4
-#endif
+	DATA_CACHE_TOUCH(r5, r4)
 	lwz	r20, GP_20-GP_19(r4)
 	lwz	r21, GP_21-GP_19(r4)
 	lwz	r22, GP_22-GP_19(r4)
@@ -399,6 +407,100 @@
 	isync
 
 	blr
+#else /* __SPE__ */
+	/* Align to a cache line */
+	clrrwi	r3, r3, 5
+	clrrwi	r4, r4, 5
+
+	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_0)
+	DATA_CACHE_ZERO_AND_TOUCH(r11, PPC_CONTEXT_CACHE_LINE_1)
+
+	/* Save context to r3 */
+
+	mfmsr	r5
+	mflr	r6
+	mfcr	r7
+#ifdef RTEMS_MULTIPROCESSING
+	/*
+	 * We have to clear the reservation of the executing thread.  See also
+	 * Book E section 6.1.6.2 "Atomic Update Primitives".
+	 *
+	 * Here we assume PPC_CONTEXT_OFFSET_SP == PPC_CONTEXT_CACHE_LINE_0.
+	 */
+	stwcx.	r1, r3, r10
+#endif
+	stw	r1, PPC_CONTEXT_OFFSET_SP(r3)
+	stw	r5, PPC_CONTEXT_OFFSET_MSR(r3)
+	stw	r6, PPC_CONTEXT_OFFSET_LR(r3)
+	stw	r7, PPC_CONTEXT_OFFSET_CR(r3)
+	evstdd	r14, PPC_CONTEXT_OFFSET_GPR14(r3)
+	evstdd	r15, PPC_CONTEXT_OFFSET_GPR15(r3)
+
+	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+
+	evstdd	r16, PPC_CONTEXT_OFFSET_GPR16(r3)
+	evstdd	r17, PPC_CONTEXT_OFFSET_GPR17(r3)
+	evstdd	r18, PPC_CONTEXT_OFFSET_GPR18(r3)
+	evstdd	r19, PPC_CONTEXT_OFFSET_GPR19(r3)
+
+	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_3)
+
+	evstdd	r20, PPC_CONTEXT_OFFSET_GPR20(r3)
+	evstdd	r21, PPC_CONTEXT_OFFSET_GPR21(r3)
+	evstdd	r22, PPC_CONTEXT_OFFSET_GPR22(r3)
+	evstdd	r23, PPC_CONTEXT_OFFSET_GPR23(r3)
+
+	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_4)
+
+	evstdd	r24, PPC_CONTEXT_OFFSET_GPR24(r3)
+	evstdd	r25, PPC_CONTEXT_OFFSET_GPR25(r3)
+	evstdd	r26, PPC_CONTEXT_OFFSET_GPR26(r3)
+	evstdd	r27, PPC_CONTEXT_OFFSET_GPR27(r3)
+
+	evstdd	r28, PPC_CONTEXT_OFFSET_GPR28(r3)
+	evstdd	r29, PPC_CONTEXT_OFFSET_GPR29(r3)
+	evstdd	r30, PPC_CONTEXT_OFFSET_GPR30(r3)
+	evstdd	r31, PPC_CONTEXT_OFFSET_GPR31(r3)
+
+	/* Restore context from r4 */
+restore_context:
+
+	lwz	r1, PPC_CONTEXT_OFFSET_SP(r4)
+	lwz	r5, PPC_CONTEXT_OFFSET_MSR(r4)
+	lwz	r6, PPC_CONTEXT_OFFSET_LR(r4)
+	lwz	r7, PPC_CONTEXT_OFFSET_CR(r4)
+
+	evldd	r14, PPC_CONTEXT_OFFSET_GPR14(r4)
+	evldd	r15, PPC_CONTEXT_OFFSET_GPR15(r4)
+
+	DATA_CACHE_TOUCH(r0, r1)
+
+	evldd	r16, PPC_CONTEXT_OFFSET_GPR16(r4)
+	evldd	r17, PPC_CONTEXT_OFFSET_GPR17(r4)
+	evldd	r18, PPC_CONTEXT_OFFSET_GPR18(r4)
+	evldd	r19, PPC_CONTEXT_OFFSET_GPR19(r4)
+
+	evldd	r20, PPC_CONTEXT_OFFSET_GPR20(r4)
+	evldd	r21, PPC_CONTEXT_OFFSET_GPR21(r4)
+	evldd	r22, PPC_CONTEXT_OFFSET_GPR22(r4)
+	evldd	r23, PPC_CONTEXT_OFFSET_GPR23(r4)
+
+	evldd	r24, PPC_CONTEXT_OFFSET_GPR24(r4)
+	evldd	r25, PPC_CONTEXT_OFFSET_GPR25(r4)
+	evldd	r26, PPC_CONTEXT_OFFSET_GPR26(r4)
+	evldd	r27, PPC_CONTEXT_OFFSET_GPR27(r4)
+
+	evldd	r28, PPC_CONTEXT_OFFSET_GPR28(r4)
+	evldd	r29, PPC_CONTEXT_OFFSET_GPR29(r4)
+	evldd	r30, PPC_CONTEXT_OFFSET_GPR30(r4)
+	evldd	r31, PPC_CONTEXT_OFFSET_GPR31(r4)
+
+	mtcr	r7
+	mtlr	r6
+	mtmsr	r5
+
+	blr
+#endif /* __SPE__ */
 
 /*
  *  _CPU_Context_restore
@@ -414,6 +516,7 @@
 	ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
 	PUBLIC_PROC (_CPU_Context_restore)
 PROC (_CPU_Context_restore):
+#ifndef __SPE__
 	lwz	r5, GP_CR(r3)
 	lwz	r6, GP_PC(r3)
 	lwz	r7, GP_MSR(r3)
@@ -451,3 +554,9 @@
 	b _CPU_Context_restore_altivec
 #endif
 	blr
+#else /* __SPE__ */
+	/* Align to a cache line */
+	clrrwi	r4, r3, 5
+
+	b	restore_context
+#endif /* __SPE__ */

diff -u rtems/c/src/lib/libcpu/powerpc/preinstall.am:1.27 rtems/c/src/lib/libcpu/powerpc/preinstall.am:1.28
--- rtems/c/src/lib/libcpu/powerpc/preinstall.am:1.27	Thu Dec 30 07:12:03 2010
+++ rtems/c/src/lib/libcpu/powerpc/preinstall.am	Thu Jul 21 10:03:31 2011
@@ -286,3 +286,9 @@
 	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/irq.h
 PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/irq.h
 endif
+if qoriq
+$(PROJECT_INCLUDE)/bsp/tsec.h: mpc83xx/network/tsec.h $(PROJECT_INCLUDE)/bsp/$(dirstamp)
+	$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/bsp/tsec.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/bsp/tsec.h
+
+endif


 *sh*:
2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>

	* shared/clock/clock.c: Fix to clear the first pending decrementer
	exception.

M  1.262  c/src/lib/libbsp/powerpc/ChangeLog
M   1.11  c/src/lib/libbsp/powerpc/shared/clock/clock.c

diff -u rtems/c/src/lib/libbsp/powerpc/ChangeLog:1.261 rtems/c/src/lib/libbsp/powerpc/ChangeLog:1.262
--- rtems/c/src/lib/libbsp/powerpc/ChangeLog:1.261	Wed Jul 20 11:39:30 2011
+++ rtems/c/src/lib/libbsp/powerpc/ChangeLog	Thu Jul 21 10:10:07 2011
@@ -1,3 +1,8 @@
+2011-07-21	Sebastian Huber <sebastian.huber at embedded-brains.de>
+
+	* shared/clock/clock.c: Fix to clear the first pending decrementer
+	exception.
+
 2011-07-20	Till Straumann <strauman at slac.stanford.edu>
 
 	PR 1837/bsps

diff -u rtems/c/src/lib/libbsp/powerpc/shared/clock/clock.c:1.10 rtems/c/src/lib/libbsp/powerpc/shared/clock/clock.c:1.11
--- rtems/c/src/lib/libbsp/powerpc/shared/clock/clock.c:1.10	Sun Nov 29 22:28:46 2009
+++ rtems/c/src/lib/libbsp/powerpc/shared/clock/clock.c	Thu Jul 21 10:10:07 2011
@@ -7,12 +7,13 @@
  */
 
 /*
- * Copyright (c) 2008, 2009
- * Embedded Brains GmbH
- * Obere Lagerstr. 30
- * D-82178 Puchheim
- * Germany
- * rtems at embedded-brains.de
+ * Copyright (c) 2008-2011 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Obere Lagerstr. 30
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
  *
  * The license and distribution terms for this file may be
  * found in the file LICENSE in this distribution or at
@@ -94,6 +95,18 @@
 	return 0;
 }
 
+static int ppc_clock_exception_handler_first( BSP_Exception_frame *frame, unsigned number)
+{
+	/* We have to clear the first pending decrementer exception this way */
+
+	if (ppc_decrementer_register() >= 0x80000000) {
+		ppc_clock_exception_handler( frame, number);
+	}
+
+	ppc_exc_set_handler( ASM_DEC_VECTOR, ppc_clock_exception_handler);
+
+	return 0;
+}
 
 static int ppc_clock_exception_handler_booke( BSP_Exception_frame *frame, unsigned number)
 {
@@ -208,7 +221,7 @@
 		ppc_clock_next_time_base = ppc_time_base() + ppc_clock_decrementer_value;
 
 		/* Install exception handler */
-		ppc_exc_set_handler( ASM_DEC_VECTOR, ppc_clock_exception_handler);
+		ppc_exc_set_handler( ASM_DEC_VECTOR, ppc_clock_exception_handler_first);
 	}
 
 	/* Set the decrementer value */



--

Generated by Deluxe Loginfo [http://www.codewiz.org/projects/index.html#loginfo] 2.122 by Bernardo Innocenti <bernie at develer.com>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.rtems.org/pipermail/vc/attachments/20110721/6400c3db/attachment-0001.html>


More information about the vc mailing list