[rtems commit] powerpc: Simplify context switch

Sebastian Huber sebh at rtems.org
Mon Jun 4 07:51:40 UTC 2012


Module:    rtems
Branch:    master
Commit:    1869bb7101de25205f325287419aaa25a13143c7
Changeset: http://git.rtems.org/rtems/commit/?id=1869bb7101de25205f325287419aaa25a13143c7

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Fri May 18 15:47:23 2012 +0200

powerpc: Simplify context switch

PowerPC cores with the SPE (Signal Processing Extension) have 64-bit
general-purpose registers.  The SPE context switch code has been merged
with the standard context switch code.  The context switch may use cache
operations to increase the performance.  It will be ensured that the
context is 32-byte aligned (PPC_DEFAULT_CACHE_LINE_SIZE).  This
increases the overall memory size of the context area in the thread
control block slightly.  The general-purpose registers GPR2 and GPR13
are no longer part of the context.  The BSP must initialize these
registers during startup (usually initialized by the __eabi() function).

The new BSP option BSP_USE_DATA_CACHE_BLOCK_TOUCH can be used to enable
the dcbt instruction in the context switch.

The new BSP option BSP_USE_SYNC_IN_CONTEXT_SWITCH can be used to enable
sync and isync instructions in the context switch.  This should be not
necessary in most cases.

---

 c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c  |    2 +-
 .../libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S    |   11 -
 .../bspsupport/ppc_exc_async_normal.S              |   42 ++--
 .../powerpc/new-exceptions/bspsupport/vectors.h    |   76 ++---
 c/src/lib/libcpu/powerpc/new-exceptions/cpu.c      |   34 +--
 c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S  |  354 +++++---------------
 cpukit/score/cpu/powerpc/cpu.c                     |   39 ++-
 cpukit/score/cpu/powerpc/rtems/score/cpu.h         |  241 ++++++--------
 8 files changed, 285 insertions(+), 514 deletions(-)

diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
index 0eba0b7..07b9fd2 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup.c
@@ -233,7 +233,7 @@ unsigned          pvr;
 	 * for use by assembly code.
 	 * Therefore, we compute it here and store it in memory...
 	 */
-	_CPU_altivec_ctxt_off  = (uint32_t) &((Context_Control*)0)->altivec;
+	_CPU_altivec_ctxt_off  = offsetof(ppc_context, altivec);
 	/* 
 	 * Add space possibly needed for alignment
 	 */
diff --git a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S
index 6b78c0b..1a5c906 100644
--- a/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S
+++ b/c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S
@@ -623,17 +623,6 @@ _CPU_load_altivec_volatile:
 #endif
 	blr
 
-	.global _CPU_Context_restore_altivec
-_CPU_Context_restore_altivec:
-	/* Restore is like 'switch' but we don't have
-	 * to save an old context.
-	 * Move argument to second arg and load NULL pointer
-	 * to first one, then jump to 'switch' routine.
-	 */
-	mr	r4, r3
-	li  r3,  0
-	b _CPU_Context_switch_altivec
-
 	.global _CPU_Context_switch_altivec
 _CPU_Context_switch_altivec:
 
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index 3165d6b..dd6f694 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -78,14 +78,14 @@ ppc_exc_wrap_async_normal:
 	mr	FRAME_REGISTER, r1
 
 	/* Load ISR nest level and thread dispatch disable level */
-	PPC_EXC_GPR_STORE	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
+	PPC_GPR_STORE	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
 	lis	ISR_NEST_HADDR_REGISTER, ISR_NEST_LEVEL at ha
-	PPC_EXC_GPR_STORE	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
+	PPC_GPR_STORE	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
 	lwz	ISR_NEST_REGISTER, ISR_NEST_LEVEL at l(ISR_NEST_HADDR_REGISTER)
-	PPC_EXC_GPR_STORE	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
+	PPC_GPR_STORE	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
 	lwz	DISPATCH_LEVEL_REGISTER, _Thread_Dispatch_disable_level at sdarel(r13)
 
-	PPC_EXC_GPR_STORE	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
 
 #ifdef __SPE__
 	/*
@@ -96,7 +96,7 @@ ppc_exc_wrap_async_normal:
 	stw	SCRATCH_0_REGISTER, VECTOR_OFFSET(r1)
 #endif
 
-	PPC_EXC_GPR_STORE	HANDLER_REGISTER, HANDLER_OFFSET(r1)
+	PPC_GPR_STORE	HANDLER_REGISTER, HANDLER_OFFSET(r1)
 
 	/*
 	 * Load the handler address.  Get the handler table index from the
@@ -109,11 +109,11 @@ ppc_exc_wrap_async_normal:
 	ori	HANDLER_REGISTER, HANDLER_REGISTER, ppc_exc_handler_table at l
 	lwzx	HANDLER_REGISTER, HANDLER_REGISTER, SCRATCH_0_REGISTER
 
-	PPC_EXC_GPR_STORE	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
-	PPC_EXC_GPR_STORE	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
-	PPC_EXC_GPR_STORE	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
-	PPC_EXC_GPR_STORE	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
-	PPC_EXC_GPR_STORE	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+	PPC_GPR_STORE	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
 
 	/* Save SRR0, SRR1, CR, CTR, XER, and LR */
 	mfsrr0	SCRATCH_0_REGISTER
@@ -197,35 +197,35 @@ thread_dispatching_done:
 	lwz	SCRATCH_4_REGISTER, EXC_XER_OFFSET(r1)
 	lwz	SCRATCH_5_REGISTER, EXC_LR_OFFSET(r1)
 
-	PPC_EXC_GPR_LOAD	VECTOR_REGISTER, VECTOR_OFFSET(r1)
-	PPC_EXC_GPR_LOAD	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
-	PPC_EXC_GPR_LOAD	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
+	PPC_GPR_LOAD	VECTOR_REGISTER, VECTOR_OFFSET(r1)
+	PPC_GPR_LOAD	ISR_NEST_HADDR_REGISTER, ISR_NEST_HADDR_OFFSET(r1)
+	PPC_GPR_LOAD	ISR_NEST_REGISTER, ISR_NEST_OFFSET(r1)
 
 #ifdef __SPE__
 	/* Restore SPEFSCR */
 	mtspr	FSL_EIS_SPEFSCR, DISPATCH_LEVEL_REGISTER
 #endif
-	PPC_EXC_GPR_LOAD	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
+	PPC_GPR_LOAD	DISPATCH_LEVEL_REGISTER, DISPATCH_LEVEL_OFFSET(r1)
 
 #ifdef __SPE__
 	/* Restore ACC */
 	evmra	HANDLER_REGISTER, HANDLER_REGISTER
 #endif
-	PPC_EXC_GPR_LOAD	HANDLER_REGISTER, HANDLER_OFFSET(r1)
+	PPC_GPR_LOAD	HANDLER_REGISTER, HANDLER_OFFSET(r1)
 
 	/* Restore SRR0, SRR1, CR, CTR, XER, and LR */
 	mtsrr0	SCRATCH_0_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
 	mtsrr1	SCRATCH_1_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_1_REGISTER, SCRATCH_1_OFFSET(r1)
 	mtcr	SCRATCH_2_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_2_REGISTER, SCRATCH_2_OFFSET(r1)
 	mtctr	SCRATCH_3_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_3_REGISTER, SCRATCH_3_OFFSET(r1)
 	mtxer	SCRATCH_4_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_4_REGISTER, SCRATCH_4_OFFSET(r1)
 	mtlr	SCRATCH_5_REGISTER
-	PPC_EXC_GPR_LOAD	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
+	PPC_GPR_LOAD	SCRATCH_5_REGISTER, SCRATCH_5_OFFSET(r1)
 
 	/* Pop stack */
 	addi	r1, r1, PPC_EXC_MINIMAL_FRAME_SIZE
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
index 8eda3a1..1a071c2 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/vectors.h
@@ -142,23 +142,15 @@ extern "C" {
 /** @} */
 
 #ifndef __SPE__
-  #define PPC_EXC_GPR_TYPE unsigned
-  #define PPC_EXC_GPR_SIZE 4
-  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_EXC_GPR_SIZE + 36)
+  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_GPR_SIZE + 36)
   #define PPC_EXC_VECTOR_PROLOGUE_OFFSET PPC_EXC_GPR_OFFSET(4)
-  #define PPC_EXC_GPR_LOAD lwz
-  #define PPC_EXC_GPR_STORE stw
   #define PPC_EXC_MINIMAL_FRAME_SIZE 96
   #define PPC_EXC_FRAME_SIZE 176
 #else
-  #define PPC_EXC_GPR_TYPE uint64_t
-  #define PPC_EXC_GPR_SIZE 8
   #define PPC_EXC_SPEFSCR_OFFSET 36
   #define PPC_EXC_ACC_OFFSET 40
-  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_EXC_GPR_SIZE + 48)
+  #define PPC_EXC_GPR_OFFSET(gpr) ((gpr) * PPC_GPR_SIZE + 48)
   #define PPC_EXC_VECTOR_PROLOGUE_OFFSET (PPC_EXC_GPR_OFFSET(4) + 4)
-  #define PPC_EXC_GPR_LOAD evldd
-  #define PPC_EXC_GPR_STORE evstdd
   #define PPC_EXC_MINIMAL_FRAME_SIZE 160
   #define PPC_EXC_FRAME_SIZE 320
 #endif
@@ -268,38 +260,38 @@ typedef struct {
     uint32_t EXC_SPEFSCR;
     uint64_t EXC_ACC;
   #endif
-  PPC_EXC_GPR_TYPE GPR0;
-  PPC_EXC_GPR_TYPE GPR1;
-  PPC_EXC_GPR_TYPE GPR2;
-  PPC_EXC_GPR_TYPE GPR3;
-  PPC_EXC_GPR_TYPE GPR4;
-  PPC_EXC_GPR_TYPE GPR5;
-  PPC_EXC_GPR_TYPE GPR6;
-  PPC_EXC_GPR_TYPE GPR7;
-  PPC_EXC_GPR_TYPE GPR8;
-  PPC_EXC_GPR_TYPE GPR9;
-  PPC_EXC_GPR_TYPE GPR10;
-  PPC_EXC_GPR_TYPE GPR11;
-  PPC_EXC_GPR_TYPE GPR12;
-  PPC_EXC_GPR_TYPE GPR13;
-  PPC_EXC_GPR_TYPE GPR14;
-  PPC_EXC_GPR_TYPE GPR15;
-  PPC_EXC_GPR_TYPE GPR16;
-  PPC_EXC_GPR_TYPE GPR17;
-  PPC_EXC_GPR_TYPE GPR18;
-  PPC_EXC_GPR_TYPE GPR19;
-  PPC_EXC_GPR_TYPE GPR20;
-  PPC_EXC_GPR_TYPE GPR21;
-  PPC_EXC_GPR_TYPE GPR22;
-  PPC_EXC_GPR_TYPE GPR23;
-  PPC_EXC_GPR_TYPE GPR24;
-  PPC_EXC_GPR_TYPE GPR25;
-  PPC_EXC_GPR_TYPE GPR26;
-  PPC_EXC_GPR_TYPE GPR27;
-  PPC_EXC_GPR_TYPE GPR28;
-  PPC_EXC_GPR_TYPE GPR29;
-  PPC_EXC_GPR_TYPE GPR30;
-  PPC_EXC_GPR_TYPE GPR31;
+  PPC_GPR_TYPE GPR0;
+  PPC_GPR_TYPE GPR1;
+  PPC_GPR_TYPE GPR2;
+  PPC_GPR_TYPE GPR3;
+  PPC_GPR_TYPE GPR4;
+  PPC_GPR_TYPE GPR5;
+  PPC_GPR_TYPE GPR6;
+  PPC_GPR_TYPE GPR7;
+  PPC_GPR_TYPE GPR8;
+  PPC_GPR_TYPE GPR9;
+  PPC_GPR_TYPE GPR10;
+  PPC_GPR_TYPE GPR11;
+  PPC_GPR_TYPE GPR12;
+  PPC_GPR_TYPE GPR13;
+  PPC_GPR_TYPE GPR14;
+  PPC_GPR_TYPE GPR15;
+  PPC_GPR_TYPE GPR16;
+  PPC_GPR_TYPE GPR17;
+  PPC_GPR_TYPE GPR18;
+  PPC_GPR_TYPE GPR19;
+  PPC_GPR_TYPE GPR20;
+  PPC_GPR_TYPE GPR21;
+  PPC_GPR_TYPE GPR22;
+  PPC_GPR_TYPE GPR23;
+  PPC_GPR_TYPE GPR24;
+  PPC_GPR_TYPE GPR25;
+  PPC_GPR_TYPE GPR26;
+  PPC_GPR_TYPE GPR27;
+  PPC_GPR_TYPE GPR28;
+  PPC_GPR_TYPE GPR29;
+  PPC_GPR_TYPE GPR30;
+  PPC_GPR_TYPE GPR31;
   unsigned EXC_MSR;
   unsigned EXC_DAR;
 } BSP_Exception_frame;
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
index 1a34609..5c0f8d1 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu.c
@@ -65,6 +65,7 @@ void _CPU_Context_Initialize(
   bool              is_fp
 )
 {
+  ppc_context *the_ppc_context;
   uint32_t   msr_value;
   uint32_t   sp;
 
@@ -122,35 +123,10 @@ void _CPU_Context_Initialize(
 
   memset( the_context, 0, sizeof( *the_context ) );
 
-  PPC_CONTEXT_SET_SP( the_context, sp );
-  PPC_CONTEXT_SET_PC( the_context, (uint32_t) entry_point );
-  PPC_CONTEXT_SET_MSR( the_context, msr_value );
-
-#ifndef __SPE__
-#if (PPC_ABI == PPC_ABI_SVR4)
-  /*
-   * SVR4 says R2 is for 'system-reserved' use; it cannot hurt to
-   * propagate R2 to all task contexts.
-   */
-  { uint32_t    r2 = 0;
-    unsigned    r13 = 0;
-    __asm__ volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
-
-    the_context->gpr2 = r2;
-    the_context->gpr13 = r13;
-  }
-#elif (PPC_ABI == PPC_ABI_EABI)
-  { uint32_t    r2 = 0;
-    unsigned    r13 = 0;
-    __asm__ volatile ("mr %0,2; mr %1,13" : "=r" ((r2)), "=r" ((r13)));
-
-    the_context->gpr2 = r2;
-    the_context->gpr13 = r13;
-  }
-#else
-#error unsupported PPC_ABI
-#endif
-#endif /* __SPE__ */
+  the_ppc_context = ppc_get_context( the_context );
+  the_ppc_context->gpr1 = sp;
+  the_ppc_context->msr = msr_value;
+  the_ppc_context->lr = (uint32_t) entry_point;
 
 #ifdef __ALTIVEC__
   _CPU_Context_initialize_altivec(the_context);
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
index 3568a0e..04fb8b1 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
@@ -23,7 +23,7 @@
  *  COPYRIGHT (c) 1989-1997.
  *  On-Line Applications Research Corporation (OAR).
  *
- *  Copyright (c) 2011 embedded brains GmbH.
+ *  Copyright (c) 2011-2012 embedded brains GmbH.
  *
  *  The license and distribution terms for this file may in
  *  the file LICENSE in this distribution or at
@@ -35,55 +35,33 @@
 #include <rtems/score/cpu.h>
 #include <bspopts.h>
 
-#if BSP_DATA_CACHE_ENABLED && PPC_CACHE_ALIGNMENT == 32
-  #define DATA_CACHE_ALIGNMENT(reg) \
-    li reg, PPC_CACHE_ALIGNMENT
-  #define DATA_CACHE_ZERO(rega, regb) \
-    dcbz rega, regb
+#if PPC_DEFAULT_CACHE_LINE_SIZE != 32
+  #error "unexpected PPC_DEFAULT_CACHE_LINE_SIZE value"
+#endif
+
+#ifdef BSP_USE_DATA_CACHE_BLOCK_TOUCH
   #define DATA_CACHE_TOUCH(rega, regb) \
-    dcbt rega, regb
-  #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset) \
-    li reg, offset; dcbz reg, r3; dcbt reg, r4
+	dcbt rega, regb
 #else
-  #define DATA_CACHE_ALIGNMENT(reg)
-  #define DATA_CACHE_ZERO(rega, regb)
   #define DATA_CACHE_TOUCH(rega, regb)
+#endif
+
+#if BSP_DATA_CACHE_ENABLED && PPC_CACHE_ALIGNMENT == 32
   #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset) \
-    li reg, offset
+	li reg, offset; dcbz reg, r3; DATA_CACHE_TOUCH(reg, r4)
+#else
+  #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset)
 #endif
 
+#define PPC_CONTEXT_CACHE_LINE_0 32
+#define PPC_CONTEXT_CACHE_LINE_1 64
+#define PPC_CONTEXT_CACHE_LINE_2 96
+#define PPC_CONTEXT_CACHE_LINE_3 128
+#define PPC_CONTEXT_CACHE_LINE_4 160
+
 /*
  * Offsets for various Contexts
  */
-	.set	GP_1, 0
-	.set	GP_2, (GP_1 + 4)
-	.set	GP_13, (GP_2 + 4)
-	.set	GP_14, (GP_13 + 4)
-
-	.set	GP_15, (GP_14 + 4)
-	.set	GP_16, (GP_15 + 4)
-	.set	GP_17, (GP_16 + 4)
-	.set	GP_18, (GP_17 + 4)
-
-	.set	GP_19, (GP_18 + 4)
-	.set	GP_20, (GP_19 + 4)
-	.set	GP_21, (GP_20 + 4)
-	.set	GP_22, (GP_21 + 4)
-
-	.set	GP_23, (GP_22 + 4)
-	.set	GP_24, (GP_23 + 4)
-	.set	GP_25, (GP_24 + 4)
-	.set	GP_26, (GP_25 + 4)
-
-	.set	GP_27, (GP_26 + 4)
-	.set	GP_28, (GP_27 + 4)
-	.set	GP_29, (GP_28 + 4)
-	.set	GP_30, (GP_29 + 4)
-
-	.set	GP_31, (GP_30 + 4)
-	.set	GP_CR, (GP_31 + 4)
-	.set	GP_PC, (GP_CR + 4)
-	.set	GP_MSR, (GP_PC + 4)
 
 #if (PPC_HAS_DOUBLE==1)
 	.set	FP_SIZE,	8
@@ -129,38 +107,6 @@
 	.set	FP_31, (FP_30 + FP_SIZE)
 	.set	FP_FPSCR, (FP_31 + FP_SIZE)
 
-	.set	IP_LINK, 0
-	.set	IP_0, (IP_LINK + 8)
-	.set	IP_2, (IP_0 + 4)
-
-	.set	IP_3, (IP_2 + 4)
-	.set	IP_4, (IP_3 + 4)
-	.set	IP_5, (IP_4 + 4)
-	.set	IP_6, (IP_5 + 4)
-
-	.set	IP_7, (IP_6 + 4)
-	.set	IP_8, (IP_7 + 4)
-	.set	IP_9, (IP_8 + 4)
-	.set	IP_10, (IP_9 + 4)
-
-	.set	IP_11, (IP_10 + 4)
-	.set	IP_12, (IP_11 + 4)
-	.set	IP_13, (IP_12 + 4)
-	.set	IP_28, (IP_13 + 4)
-
-	.set	IP_29, (IP_28 + 4)
-	.set	IP_30, (IP_29 + 4)
-	.set	IP_31, (IP_30 + 4)
-	.set	IP_CR, (IP_31 + 4)
-
-	.set	IP_CTR, (IP_CR + 4)
-	.set	IP_XER, (IP_CTR + 4)
-	.set	IP_LR, (IP_XER + 4)
-	.set	IP_PC, (IP_LR + 4)
-
-	.set	IP_MSR, (IP_PC + 4)
-	.set	IP_END, (IP_MSR + 16)
-
 	BEGIN_CODE
 /*
  *  _CPU_Context_save_fp_context
@@ -300,111 +246,15 @@ PROC (_CPU_Context_restore_fp):
 #endif
 	blr
 
-/*  _CPU_Context_switch
- *
- *  This routine performs a normal non-FP context switch.
- */
 	ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
 	PUBLIC_PROC (_CPU_Context_switch)
 PROC (_CPU_Context_switch):
-#ifndef __SPE__
+
+#ifdef BSP_USE_SYNC_IN_CONTEXT_SWITCH
 	sync
 	isync
-	/* This assumes that all the registers are in the given order */
-	DATA_CACHE_ALIGNMENT(r5)
-	addi	r9,r3,-4
-	DATA_CACHE_ZERO(r5, r9)
-#ifdef RTEMS_MULTIPROCESSING
-	/*
-	 * We have to clear the reservation of the executing thread.  See also
-	 * Book E section 6.1.6.2 "Atomic Update Primitives".
-	 */
-	li	r10, GP_1 + 4
-	stwcx.	r1, r9, r10
-#endif
-	stw	r1, GP_1+4(r9)
-	stw	r2, GP_2+4(r9)
-#if (PPC_USE_MULTIPLE == 1)
-	addi	r9, r9, GP_18+4
-	DATA_CACHE_ZERO(r5, r9)
-	stmw	r13, GP_13-GP_18(r9)
-#else
-	stw	r13, GP_13+4(r9)
-	stw	r14, GP_14+4(r9)
-	stw	r15, GP_15+4(r9)
-	stw	r16, GP_16+4(r9)
-	stw	r17, GP_17+4(r9)
-	stwu	r18, GP_18+4(r9)
-	DATA_CACHE_ZERO(r5, r9)
-	stw	r19, GP_19-GP_18(r9)
-	stw	r20, GP_20-GP_18(r9)
-	stw	r21, GP_21-GP_18(r9)
-	stw	r22, GP_22-GP_18(r9)
-	stw	r23, GP_23-GP_18(r9)
-	stw	r24, GP_24-GP_18(r9)
-	stw	r25, GP_25-GP_18(r9)
-	stw	r26, GP_26-GP_18(r9)
-	stw	r27, GP_27-GP_18(r9)
-	stw	r28, GP_28-GP_18(r9)
-	stw	r29, GP_29-GP_18(r9)
-	stw	r30, GP_30-GP_18(r9)
-	stw	r31, GP_31-GP_18(r9)
-#endif
-	DATA_CACHE_TOUCH(r0, r4)
-	mfcr	r6
-	stw	r6, GP_CR-GP_18(r9)
-	mflr	r7
-	stw	r7, GP_PC-GP_18(r9)
-	mfmsr	r8
-	stw	r8, GP_MSR-GP_18(r9)
-
-#ifdef __ALTIVEC__
-	mr      r14, r4 
-	EXTERN_PROC(_CPU_Context_switch_altivec)
-	bl		_CPU_Context_switch_altivec
-	mr      r4, r14
-	DATA_CACHE_ALIGNMENT(r5)
-#endif
-
-	DATA_CACHE_TOUCH(r5, r4)
-	lwz	r1, GP_1(r4)
-	lwz	r2, GP_2(r4)
-#if (PPC_USE_MULTIPLE == 1)
-	addi	r4, r4, GP_19
-	DATA_CACHE_TOUCH(r5, r4)
-	lmw	r13, GP_13-GP_19(r4)
-#else
-	lwz	r13, GP_13(r4)
-	lwz	r14, GP_14(r4)
-	lwz	r15, GP_15(r4)
-	lwz	r16, GP_16(r4)
-	lwz	r17, GP_17(r4)
-	lwz	r18, GP_18(r4)
-	lwzu	r19, GP_19(r4)
-	DATA_CACHE_TOUCH(r5, r4)
-	lwz	r20, GP_20-GP_19(r4)
-	lwz	r21, GP_21-GP_19(r4)
-	lwz	r22, GP_22-GP_19(r4)
-	lwz	r23, GP_23-GP_19(r4)
-	lwz	r24, GP_24-GP_19(r4)
-	lwz	r25, GP_25-GP_19(r4)
-	lwz	r26, GP_26-GP_19(r4)
-	lwz	r27, GP_27-GP_19(r4)
-	lwz	r28, GP_28-GP_19(r4)
-	lwz	r29, GP_29-GP_19(r4)
-	lwz	r30, GP_30-GP_19(r4)
-	lwz	r31, GP_31-GP_19(r4)
 #endif
-	lwz	r6, GP_CR-GP_19(r4)
-	lwz	r7, GP_PC-GP_19(r4)
-	lwz	r8, GP_MSR-GP_19(r4)
-	mtcrf	255, r6
-	mtlr	r7
-	mtmsr	r8
-	isync
 
-	blr
-#else /* __SPE__ */
 	/* Align to a cache line */
 	clrrwi	r3, r3, 5
 	clrrwi	r4, r4, 5
@@ -421,139 +271,115 @@ PROC (_CPU_Context_switch):
 	/*
 	 * We have to clear the reservation of the executing thread.  See also
 	 * Book E section 6.1.6.2 "Atomic Update Primitives".
-	 *
-	 * Here we assume PPC_CONTEXT_OFFSET_SP == PPC_CONTEXT_CACHE_LINE_0.
 	 */
+  #if PPC_CONTEXT_OFFSET_GPR1 != PPC_CONTEXT_CACHE_LINE_0 \
+    || !BSP_DATA_CACHE_ENABLED \
+    || PPC_CACHE_ALIGNMENT != 32
+	li	r10, PPC_CONTEXT_OFFSET_GPR1
+  #endif
 	stwcx.	r1, r3, r10
 #endif
-	stw	r1, PPC_CONTEXT_OFFSET_SP(r3)
+	stw	r1, PPC_CONTEXT_OFFSET_GPR1(r3)
 	stw	r5, PPC_CONTEXT_OFFSET_MSR(r3)
 	stw	r6, PPC_CONTEXT_OFFSET_LR(r3)
 	stw	r7, PPC_CONTEXT_OFFSET_CR(r3)
-	evstdd	r14, PPC_CONTEXT_OFFSET_GPR14(r3)
-	evstdd	r15, PPC_CONTEXT_OFFSET_GPR15(r3)
+	PPC_GPR_STORE	r14, PPC_CONTEXT_OFFSET_GPR14(r3)
+	PPC_GPR_STORE	r15, PPC_CONTEXT_OFFSET_GPR15(r3)
+
+#if PPC_CONTEXT_OFFSET_GPR20 == PPC_CONTEXT_CACHE_LINE_2
+	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+#endif
+
+	PPC_GPR_STORE	r16, PPC_CONTEXT_OFFSET_GPR16(r3)
+	PPC_GPR_STORE	r17, PPC_CONTEXT_OFFSET_GPR17(r3)
 
+#if PPC_CONTEXT_OFFSET_GPR26 == PPC_CONTEXT_CACHE_LINE_2
 	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+#endif
 
-	evstdd	r16, PPC_CONTEXT_OFFSET_GPR16(r3)
-	evstdd	r17, PPC_CONTEXT_OFFSET_GPR17(r3)
-	evstdd	r18, PPC_CONTEXT_OFFSET_GPR18(r3)
-	evstdd	r19, PPC_CONTEXT_OFFSET_GPR19(r3)
+	PPC_GPR_STORE	r18, PPC_CONTEXT_OFFSET_GPR18(r3)
+	PPC_GPR_STORE	r19, PPC_CONTEXT_OFFSET_GPR19(r3)
 
+#if PPC_CONTEXT_OFFSET_GPR24 == PPC_CONTEXT_CACHE_LINE_3
 	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_3)
+#endif
 
-	evstdd	r20, PPC_CONTEXT_OFFSET_GPR20(r3)
-	evstdd	r21, PPC_CONTEXT_OFFSET_GPR21(r3)
-	evstdd	r22, PPC_CONTEXT_OFFSET_GPR22(r3)
-	evstdd	r23, PPC_CONTEXT_OFFSET_GPR23(r3)
+	PPC_GPR_STORE	r20, PPC_CONTEXT_OFFSET_GPR20(r3)
+	PPC_GPR_STORE	r21, PPC_CONTEXT_OFFSET_GPR21(r3)
+	PPC_GPR_STORE	r22, PPC_CONTEXT_OFFSET_GPR22(r3)
+	PPC_GPR_STORE	r23, PPC_CONTEXT_OFFSET_GPR23(r3)
 
+#if PPC_CONTEXT_OFFSET_GPR28 == PPC_CONTEXT_CACHE_LINE_4
 	DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_4)
+#endif
 
-	evstdd	r24, PPC_CONTEXT_OFFSET_GPR24(r3)
-	evstdd	r25, PPC_CONTEXT_OFFSET_GPR25(r3)
-	evstdd	r26, PPC_CONTEXT_OFFSET_GPR26(r3)
-	evstdd	r27, PPC_CONTEXT_OFFSET_GPR27(r3)
+	PPC_GPR_STORE	r24, PPC_CONTEXT_OFFSET_GPR24(r3)
+	PPC_GPR_STORE	r25, PPC_CONTEXT_OFFSET_GPR25(r3)
+	PPC_GPR_STORE	r26, PPC_CONTEXT_OFFSET_GPR26(r3)
+	PPC_GPR_STORE	r27, PPC_CONTEXT_OFFSET_GPR27(r3)
 
-	evstdd	r28, PPC_CONTEXT_OFFSET_GPR28(r3)
-	evstdd	r29, PPC_CONTEXT_OFFSET_GPR29(r3)
-	evstdd	r30, PPC_CONTEXT_OFFSET_GPR30(r3)
-	evstdd	r31, PPC_CONTEXT_OFFSET_GPR31(r3)
+	PPC_GPR_STORE	r28, PPC_CONTEXT_OFFSET_GPR28(r3)
+	PPC_GPR_STORE	r29, PPC_CONTEXT_OFFSET_GPR29(r3)
+	PPC_GPR_STORE	r30, PPC_CONTEXT_OFFSET_GPR30(r3)
+	PPC_GPR_STORE	r31, PPC_CONTEXT_OFFSET_GPR31(r3)
 
 	/* Restore context from r4 */
 restore_context:
 
-	lwz	r1, PPC_CONTEXT_OFFSET_SP(r4)
+#ifdef __ALTIVEC__
+	mr	r14, r4 
+	.extern	_CPU_Context_switch_altivec
+	bl	_CPU_Context_switch_altivec
+	mr	r4, r14
+#endif
+
+	lwz	r1, PPC_CONTEXT_OFFSET_GPR1(r4)
 	lwz	r5, PPC_CONTEXT_OFFSET_MSR(r4)
 	lwz	r6, PPC_CONTEXT_OFFSET_LR(r4)
 	lwz	r7, PPC_CONTEXT_OFFSET_CR(r4)
 
-	evldd	r14, PPC_CONTEXT_OFFSET_GPR14(r4)
-	evldd	r15, PPC_CONTEXT_OFFSET_GPR15(r4)
+	PPC_GPR_LOAD	r14, PPC_CONTEXT_OFFSET_GPR14(r4)
+	PPC_GPR_LOAD	r15, PPC_CONTEXT_OFFSET_GPR15(r4)
 
 	DATA_CACHE_TOUCH(r0, r1)
 
-	evldd	r16, PPC_CONTEXT_OFFSET_GPR16(r4)
-	evldd	r17, PPC_CONTEXT_OFFSET_GPR17(r4)
-	evldd	r18, PPC_CONTEXT_OFFSET_GPR18(r4)
-	evldd	r19, PPC_CONTEXT_OFFSET_GPR19(r4)
+	PPC_GPR_LOAD	r16, PPC_CONTEXT_OFFSET_GPR16(r4)
+	PPC_GPR_LOAD	r17, PPC_CONTEXT_OFFSET_GPR17(r4)
+	PPC_GPR_LOAD	r18, PPC_CONTEXT_OFFSET_GPR18(r4)
+	PPC_GPR_LOAD	r19, PPC_CONTEXT_OFFSET_GPR19(r4)
 
-	evldd	r20, PPC_CONTEXT_OFFSET_GPR20(r4)
-	evldd	r21, PPC_CONTEXT_OFFSET_GPR21(r4)
-	evldd	r22, PPC_CONTEXT_OFFSET_GPR22(r4)
-	evldd	r23, PPC_CONTEXT_OFFSET_GPR23(r4)
+	PPC_GPR_LOAD	r20, PPC_CONTEXT_OFFSET_GPR20(r4)
+	PPC_GPR_LOAD	r21, PPC_CONTEXT_OFFSET_GPR21(r4)
+	PPC_GPR_LOAD	r22, PPC_CONTEXT_OFFSET_GPR22(r4)
+	PPC_GPR_LOAD	r23, PPC_CONTEXT_OFFSET_GPR23(r4)
 
-	evldd	r24, PPC_CONTEXT_OFFSET_GPR24(r4)
-	evldd	r25, PPC_CONTEXT_OFFSET_GPR25(r4)
-	evldd	r26, PPC_CONTEXT_OFFSET_GPR26(r4)
-	evldd	r27, PPC_CONTEXT_OFFSET_GPR27(r4)
+	PPC_GPR_LOAD	r24, PPC_CONTEXT_OFFSET_GPR24(r4)
+	PPC_GPR_LOAD	r25, PPC_CONTEXT_OFFSET_GPR25(r4)
+	PPC_GPR_LOAD	r26, PPC_CONTEXT_OFFSET_GPR26(r4)
+	PPC_GPR_LOAD	r27, PPC_CONTEXT_OFFSET_GPR27(r4)
 
-	evldd	r28, PPC_CONTEXT_OFFSET_GPR28(r4)
-	evldd	r29, PPC_CONTEXT_OFFSET_GPR29(r4)
-	evldd	r30, PPC_CONTEXT_OFFSET_GPR30(r4)
-	evldd	r31, PPC_CONTEXT_OFFSET_GPR31(r4)
+	PPC_GPR_LOAD	r28, PPC_CONTEXT_OFFSET_GPR28(r4)
+	PPC_GPR_LOAD	r29, PPC_CONTEXT_OFFSET_GPR29(r4)
+	PPC_GPR_LOAD	r30, PPC_CONTEXT_OFFSET_GPR30(r4)
+	PPC_GPR_LOAD	r31, PPC_CONTEXT_OFFSET_GPR31(r4)
 
 	mtcr	r7
 	mtlr	r6
 	mtmsr	r5
 
+#ifdef BSP_USE_SYNC_IN_CONTEXT_SWITCH
+	isync
+#endif
+
 	blr
-#endif /* __SPE__ */
 
-/*
- *  _CPU_Context_restore
- *
- *  This routine is generallu used only to restart self in an
- *  efficient manner.  It may simply be a label in _CPU_Context_switch.
- *
- *  NOTE: May be unnecessary to reload some registers.
- */
-/*
- * ACB: Don't worry about cache optimisation here - this is not THAT critical.
- */
-	ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
 	PUBLIC_PROC (_CPU_Context_restore)
 PROC (_CPU_Context_restore):
-#ifndef __SPE__
-	lwz	r5, GP_CR(r3)
-	lwz	r6, GP_PC(r3)
-	lwz	r7, GP_MSR(r3)
-	mtcrf	255, r5
-	mtlr	r6
-	mtmsr	r7
-	isync
-	lwz	r1, GP_1(r3)
-	lwz	r2, GP_2(r3)
-#if (PPC_USE_MULTIPLE == 1)
-	lmw	r13, GP_13(r3)
-#else
-	lwz	r13, GP_13(r3)
-	lwz	r14, GP_14(r3)
-	lwz	r15, GP_15(r3)
-	lwz	r16, GP_16(r3)
-	lwz	r17, GP_17(r3)
-	lwz	r18, GP_18(r3)
-	lwz	r19, GP_19(r3)
-	lwz	r20, GP_20(r3)
-	lwz	r21, GP_21(r3)
-	lwz	r22, GP_22(r3)
-	lwz	r23, GP_23(r3)
-	lwz	r24, GP_24(r3)
-	lwz	r25, GP_25(r3)
-	lwz	r26, GP_26(r3)
-	lwz	r27, GP_27(r3)
-	lwz	r28, GP_28(r3)
-	lwz	r29, GP_29(r3)
-	lwz	r30, GP_30(r3)
-	lwz	r31, GP_31(r3)
-#endif
-#ifdef __ALTIVEC__
-	EXTERN_PROC(_CPU_Context_restore_altivec)
-	b _CPU_Context_restore_altivec
-#endif
-	blr
-#else /* __SPE__ */
 	/* Align to a cache line */
 	clrrwi	r4, r3, 5
 
+#ifdef __ALTIVEC__
+	li	r3, 0
+#endif
+
 	b	restore_context
-#endif /* __SPE__ */
diff --git a/cpukit/score/cpu/powerpc/cpu.c b/cpukit/score/cpu/powerpc/cpu.c
index b12deae..a1ede94 100644
--- a/cpukit/score/cpu/powerpc/cpu.c
+++ b/cpukit/score/cpu/powerpc/cpu.c
@@ -10,6 +10,43 @@
  */
 
 #ifdef HAVE_CONFIG_H
-#include "config.h"
+  #include "config.h"
 #endif
 
+#include <rtems/system.h>
+#include <rtems/score/cpu.h>
+
+#define PPC_ASSERT_OFFSET(field, off) \
+  RTEMS_STATIC_ASSERT( \
+    offsetof(ppc_context, field) + PPC_DEFAULT_CACHE_LINE_SIZE \
+      == PPC_CONTEXT_OFFSET_ ## off, \
+    ppc_context_offset_ ## field \
+  )
+
+PPC_ASSERT_OFFSET(gpr1, GPR1);
+PPC_ASSERT_OFFSET(msr, MSR);
+PPC_ASSERT_OFFSET(lr, LR);
+PPC_ASSERT_OFFSET(cr, CR);
+PPC_ASSERT_OFFSET(gpr14, GPR14);
+PPC_ASSERT_OFFSET(gpr15, GPR15);
+PPC_ASSERT_OFFSET(gpr16, GPR16);
+PPC_ASSERT_OFFSET(gpr17, GPR17);
+PPC_ASSERT_OFFSET(gpr18, GPR18);
+PPC_ASSERT_OFFSET(gpr19, GPR19);
+PPC_ASSERT_OFFSET(gpr20, GPR20);
+PPC_ASSERT_OFFSET(gpr21, GPR21);
+PPC_ASSERT_OFFSET(gpr22, GPR22);
+PPC_ASSERT_OFFSET(gpr23, GPR23);
+PPC_ASSERT_OFFSET(gpr24, GPR24);
+PPC_ASSERT_OFFSET(gpr25, GPR25);
+PPC_ASSERT_OFFSET(gpr26, GPR26);
+PPC_ASSERT_OFFSET(gpr27, GPR27);
+PPC_ASSERT_OFFSET(gpr28, GPR28);
+PPC_ASSERT_OFFSET(gpr29, GPR29);
+PPC_ASSERT_OFFSET(gpr30, GPR30);
+PPC_ASSERT_OFFSET(gpr31, GPR31);
+
+RTEMS_STATIC_ASSERT(
+  sizeof(Context_Control) % PPC_DEFAULT_CACHE_LINE_SIZE == 0,
+  ppc_context_size
+);
diff --git a/cpukit/score/cpu/powerpc/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
index 3ed76df..e845362 100644
--- a/cpukit/score/cpu/powerpc/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/rtems/score/cpu.h
@@ -23,7 +23,7 @@
  *
  *  Copyright (c) 2001 Surrey Satellite Technology Limited (SSTL).
  *
- *  Copyright (c) 2010-2011 embedded brains GmbH.
+ *  Copyright (c) 2010-2012 embedded brains GmbH.
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -249,159 +249,110 @@ extern "C" {
  *  a debugger such as gdb.  But that is another problem.
  */
 
-#ifndef ASM
-
-typedef struct {
-  #ifndef __SPE__
-    uint32_t   gpr1;	/* Stack pointer for all */
-    uint32_t   gpr2;	/* Reserved SVR4, section ptr EABI + */
-    uint32_t   gpr13;	/* Section ptr SVR4/EABI */
-    uint32_t   gpr14;	/* Non volatile for all */
-    uint32_t   gpr15;	/* Non volatile for all */
-    uint32_t   gpr16;	/* Non volatile for all */
-    uint32_t   gpr17;	/* Non volatile for all */
-    uint32_t   gpr18;	/* Non volatile for all */
-    uint32_t   gpr19;	/* Non volatile for all */
-    uint32_t   gpr20;	/* Non volatile for all */
-    uint32_t   gpr21;	/* Non volatile for all */
-    uint32_t   gpr22;	/* Non volatile for all */
-    uint32_t   gpr23;	/* Non volatile for all */
-    uint32_t   gpr24;	/* Non volatile for all */
-    uint32_t   gpr25;	/* Non volatile for all */
-    uint32_t   gpr26;	/* Non volatile for all */
-    uint32_t   gpr27;	/* Non volatile for all */
-    uint32_t   gpr28;	/* Non volatile for all */
-    uint32_t   gpr29;	/* Non volatile for all */
-    uint32_t   gpr30;	/* Non volatile for all */
-    uint32_t   gpr31;	/* Non volatile for all */
-    uint32_t   cr;	/* PART of the CR is non volatile for all */
-    uint32_t   pc;	/* Program counter/Link register */
-    uint32_t   msr;	/* Initial interrupt level */
-    #ifdef __ALTIVEC__
-      /*
-       * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
-       * and padding to ensure cache-alignment.  Unfortunately, we can't verify
-       * the cache line size here in the cpukit but altivec support code will
-       * produce an error if this is ever different from 32 bytes.
-       * 
-       * Note: it is the BSP/CPU-support's responsibility to save/restore
-       *       volatile vregs across interrupts and exceptions.
-       */
-      uint8_t altivec[16*12 + 32 + 32];
-    #endif
-  #else
-    /* Non-volatile context according to E500ABIUG and EABI */
-    uint32_t context [
-      8 /* Cache line padding */
-      + 1 /* Stack pointer */
-      + 1 /* MSR */
-      + 1 /* LR */
-      + 1 /* CR */
-      + 18 * 2 /* GPR 14 to GPR 31 */
-    ];
-  #endif
-} Context_Control;
-#endif /* ASM */
-
 #ifndef __SPE__
-  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
-    do { \
-      (_context)->gpr1 = _sp; \
-    } while (0)
+  #define PPC_GPR_TYPE uint32_t
+  #define PPC_GPR_SIZE 4
+  #define PPC_GPR_LOAD lwz
+  #define PPC_GPR_STORE stw
+#else
+  #define PPC_GPR_TYPE uint64_t
+  #define PPC_GPR_SIZE 8
+  #define PPC_GPR_LOAD evldd
+  #define PPC_GPR_STORE evstdd
+#endif
 
-  #define PPC_CONTEXT_GET_CR( _context ) \
-    (_context)->cr
+#define PPC_DEFAULT_CACHE_LINE_SIZE 32
 
-  #define PPC_CONTEXT_GET_MSR( _context ) \
-    (_context)->msr
+#ifndef ASM
 
-  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
-    do { \
-      (_context)->msr = _msr; \
-    } while (0)
+/* Non-volatile context according to E500ABIUG and EABI */
+typedef struct {
+  uint32_t gpr1;
+  uint32_t msr;
+  uint32_t lr;
+  uint32_t cr;
+  PPC_GPR_TYPE gpr14;
+  PPC_GPR_TYPE gpr15;
+  PPC_GPR_TYPE gpr16;
+  PPC_GPR_TYPE gpr17;
+  PPC_GPR_TYPE gpr18;
+  PPC_GPR_TYPE gpr19;
+  PPC_GPR_TYPE gpr20;
+  PPC_GPR_TYPE gpr21;
+  PPC_GPR_TYPE gpr22;
+  PPC_GPR_TYPE gpr23;
+  PPC_GPR_TYPE gpr24;
+  PPC_GPR_TYPE gpr25;
+  PPC_GPR_TYPE gpr26;
+  PPC_GPR_TYPE gpr27;
+  PPC_GPR_TYPE gpr28;
+  PPC_GPR_TYPE gpr29;
+  PPC_GPR_TYPE gpr30;
+  PPC_GPR_TYPE gpr31;
+  #ifdef __ALTIVEC__
+    /*
+     * 12 non-volatile vector registers, cache-aligned area for vscr/vrsave
+     * and padding to ensure cache-alignment.  Unfortunately, we can't verify
+     * the cache line size here in the cpukit but altivec support code will
+     * produce an error if this is ever different from 32 bytes.
+     * 
+     * Note: it is the BSP/CPU-support's responsibility to save/restore
+     *       volatile vregs across interrupts and exceptions.
+     */
+    uint8_t altivec[16*12 + 32 + PPC_DEFAULT_CACHE_LINE_SIZE];
+  #endif
+} ppc_context;
 
-  #define PPC_CONTEXT_FIRST_SAVED_GPR 13
+typedef struct {
+  uint8_t context [
+    PPC_DEFAULT_CACHE_LINE_SIZE
+      + sizeof(ppc_context)
+      + (sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE == 0
+        ? 0
+          : PPC_DEFAULT_CACHE_LINE_SIZE
+            - sizeof(ppc_context) % PPC_DEFAULT_CACHE_LINE_SIZE)
+  ];
+} Context_Control;
 
-  #define PPC_CONTEXT_GET_FIRST_SAVED( _context ) \
-    (_context)->gpr13
+static inline ppc_context *ppc_get_context( Context_Control *context )
+{
+  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
+  uintptr_t mask = clsz - 1;
+  uintptr_t addr = (uintptr_t) context;
 
-  #define PPC_CONTEXT_GET_PC( _context ) \
-    (_context)->pc
+  return (ppc_context *) ((addr & ~mask) + clsz);
+}
 
-  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
-    do { \
-      (_context)->pc = _pc; \
-    } while (0)
+#define _CPU_Context_Get_SP( _context ) \
+  ppc_get_context(_context)->gpr1
+#endif /* ASM */
 
-  #define _CPU_Context_Get_SP( _context ) \
-    (_context)->gpr1
-#else
-  #define PPC_CONTEXT_CACHE_LINE_0 32
-  #define PPC_CONTEXT_OFFSET_SP 32
-  #define PPC_CONTEXT_OFFSET_MSR 36
-  #define PPC_CONTEXT_OFFSET_LR 40
-  #define PPC_CONTEXT_OFFSET_CR 44
-  #define PPC_CONTEXT_OFFSET_GPR14 48
-  #define PPC_CONTEXT_OFFSET_GPR15 56
-  #define PPC_CONTEXT_CACHE_LINE_1 64
-  #define PPC_CONTEXT_OFFSET_GPR16 64
-  #define PPC_CONTEXT_OFFSET_GPR17 72
-  #define PPC_CONTEXT_OFFSET_GPR18 80
-  #define PPC_CONTEXT_OFFSET_GPR19 88
-  #define PPC_CONTEXT_CACHE_LINE_2 96
-  #define PPC_CONTEXT_OFFSET_GPR20 96
-  #define PPC_CONTEXT_OFFSET_GPR21 104
-  #define PPC_CONTEXT_OFFSET_GPR22 112
-  #define PPC_CONTEXT_OFFSET_GPR23 120
-  #define PPC_CONTEXT_CACHE_LINE_3 128
-  #define PPC_CONTEXT_OFFSET_GPR24 128
-  #define PPC_CONTEXT_OFFSET_GPR25 136
-  #define PPC_CONTEXT_OFFSET_GPR26 144
-  #define PPC_CONTEXT_OFFSET_GPR27 152
-  #define PPC_CONTEXT_CACHE_LINE_4 160
-  #define PPC_CONTEXT_OFFSET_GPR28 160
-  #define PPC_CONTEXT_OFFSET_GPR29 168
-  #define PPC_CONTEXT_OFFSET_GPR30 176
-  #define PPC_CONTEXT_OFFSET_GPR31 184
-
-  #define PPC_CONTEXT_AREA( _context ) \
-    ((uint32_t *) (((uintptr_t) (_context)) & ~0x1fU))
-
-  #define PPC_CONTEXT_FIELD( _context, _offset ) \
-    PPC_CONTEXT_AREA( _context ) [(_offset) / 4]
-
-  #define PPC_CONTEXT_SET_SP( _context, _sp ) \
-    do { \
-      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP ) = _sp; \
-    } while (0)
-
-  #define PPC_CONTEXT_GET_CR( _context ) \
-    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_CR )
-
-  #define PPC_CONTEXT_GET_MSR( _context ) \
-    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_MSR )
-
-  #define PPC_CONTEXT_SET_MSR( _context, _msr ) \
-    do { \
-      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_MSR ) = _msr; \
-    } while (0)
-
-  #define PPC_CONTEXT_FIRST_SAVED_GPR 14
-
-  #define PPC_CONTEXT_GET_FIRST_SAVED( _context ) \
-    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_GPR14 )
-
-  #define PPC_CONTEXT_GET_PC( _context ) \
-    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_LR )
-
-  #define PPC_CONTEXT_SET_PC( _context, _pc ) \
-    do { \
-      PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_LR ) = _pc; \
-    } while (0)
-
-  #define _CPU_Context_Get_SP( _context ) \
-    PPC_CONTEXT_FIELD( _context, PPC_CONTEXT_OFFSET_SP )
-#endif
+#define PPC_CONTEXT_OFFSET_GPR1 32
+#define PPC_CONTEXT_OFFSET_MSR 36
+#define PPC_CONTEXT_OFFSET_LR 40
+#define PPC_CONTEXT_OFFSET_CR 44
+
+#define PPC_CONTEXT_GPR_OFFSET( gpr ) \
+  (((gpr) - 14) * PPC_GPR_SIZE + 48)
+
+#define PPC_CONTEXT_OFFSET_GPR14 PPC_CONTEXT_GPR_OFFSET( 14 )
+#define PPC_CONTEXT_OFFSET_GPR15 PPC_CONTEXT_GPR_OFFSET( 15 )
+#define PPC_CONTEXT_OFFSET_GPR16 PPC_CONTEXT_GPR_OFFSET( 16 )
+#define PPC_CONTEXT_OFFSET_GPR17 PPC_CONTEXT_GPR_OFFSET( 17 )
+#define PPC_CONTEXT_OFFSET_GPR18 PPC_CONTEXT_GPR_OFFSET( 18 )
+#define PPC_CONTEXT_OFFSET_GPR19 PPC_CONTEXT_GPR_OFFSET( 19 )
+#define PPC_CONTEXT_OFFSET_GPR20 PPC_CONTEXT_GPR_OFFSET( 20 )
+#define PPC_CONTEXT_OFFSET_GPR21 PPC_CONTEXT_GPR_OFFSET( 21 )
+#define PPC_CONTEXT_OFFSET_GPR22 PPC_CONTEXT_GPR_OFFSET( 22 )
+#define PPC_CONTEXT_OFFSET_GPR23 PPC_CONTEXT_GPR_OFFSET( 23 )
+#define PPC_CONTEXT_OFFSET_GPR24 PPC_CONTEXT_GPR_OFFSET( 24 )
+#define PPC_CONTEXT_OFFSET_GPR25 PPC_CONTEXT_GPR_OFFSET( 25 )
+#define PPC_CONTEXT_OFFSET_GPR26 PPC_CONTEXT_GPR_OFFSET( 26 )
+#define PPC_CONTEXT_OFFSET_GPR27 PPC_CONTEXT_GPR_OFFSET( 27 )
+#define PPC_CONTEXT_OFFSET_GPR28 PPC_CONTEXT_GPR_OFFSET( 28 )
+#define PPC_CONTEXT_OFFSET_GPR29 PPC_CONTEXT_GPR_OFFSET( 29 )
+#define PPC_CONTEXT_OFFSET_GPR30 PPC_CONTEXT_GPR_OFFSET( 30 )
+#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
 
 #ifndef ASM
 typedef struct {




More information about the vc mailing list