[PATCH v2 3/6] cpukit/aarch64: Add Exception Manager support

Kinsey Moore kinsey.moore at oarcorp.com
Thu Sep 23 00:16:51 UTC 2021


This adds the call and support functions necessary to add Exception
Manager support to AArch64.
---
 .../cpu/aarch64/aarch64-exception-default.S   |  81 +--------
 .../cpu/aarch64/aarch64-exception-default.c   |  62 +++++++
 .../cpu/aarch64/aarch64-exception-interrupt.S | 165 ++++++++++++++++++
 spec/build/cpukit/optexceptionmanager.yml     |   4 +
 4 files changed, 238 insertions(+), 74 deletions(-)

diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index 2a4ddbcc61..ef95619fb1 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -203,14 +203,13 @@ curr_el_spx_sync_get_pc:			/* The current PC is now in LR */
 /* Store the vector */
 	str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
 	mov x0, sp
-	blr x1
-/* bl to CEF restore routine (doesn't restore lr) */
-	bl .pop_exception_context
-	ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]	/* get lr from CEF */
-/* drop space reserved for CEF and clear exclusive */
-	add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
-	msr spsel, #1				/* switch to thread stack */
-	eret					/* exception return */
+/* Not expected to return */
+	br x1
+	nop
+	nop
+	nop
+	nop
+	nop
 	nop
 	nop
 	nop
@@ -475,69 +474,3 @@ twiddle:
 	stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
 /* Done, return to exception handler */
 	ret
-
-/*
- * Apply the exception frame to the current register status, SP points to the EF
- */
-.pop_exception_context:
-/* Pop daif and spsr */
-	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
-/* Restore daif and spsr */
-	msr DAIF, x2
-	msr SPSR_EL1, x3
-/* Pop FAR and ESR */
-	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
-/* Restore ESR and FAR */
-	msr ESR_EL1, x2
-	msr FAR_EL1, x3
-/* Pop fpcr and fpsr */
-	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
-/* Restore fpcr and fpsr */
-	msr FPSR, x2
-	msr FPCR, x3
-/* Pop VFP registers */
-	ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
-	ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
-	ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
-	ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
-	ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
-	ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
-	ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
-	ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
-	ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
-	ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
-	ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
-	ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
-	ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
-	ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
-	ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
-	ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
-/* Pop x0-x29(fp) */
-	ldp x2,  x3,  [sp, #0x10]
-	ldp x4,  x5,  [sp, #0x20]
-	ldp x6,  x7,  [sp, #0x30]
-	ldp x8,  x9,  [sp, #0x40]
-	ldp x10, x11, [sp, #0x50]
-	ldp x12, x13, [sp, #0x60]
-	ldp x14, x15, [sp, #0x70]
-	ldp x16, x17, [sp, #0x80]
-	ldp x18, x19, [sp, #0x90]
-	ldp x20, x21, [sp, #0xa0]
-	ldp x22, x23, [sp, #0xb0]
-	ldp x24, x25, [sp, #0xc0]
-	ldp x26, x27, [sp, #0xd0]
-	ldp x28, x29, [sp, #0xe0]
-/* Pop sp and ELR */
-	ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore thread SP */
-	msr spsel, #1
-	mov sp, x0
-	msr spsel, #0
-/* Restore exception LR */
-	msr ELR_EL1, x1
-	ldp x0,  x1,  [sp, #0x00]
-
-/* We must clear reservations to ensure consistency with atomic operations */
-	clrex
-
-	ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
index 2ebb3dee9f..4e7484f718 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
@@ -43,8 +43,70 @@
 
 #include <rtems/score/cpu.h>
 #include <rtems/fatal.h>
+#include <rtems/exception.h>
 
 void _AArch64_Exception_default( CPU_Exception_frame *frame )
 {
   rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
 }
+
+void Exception_Manager_Copy_CPU_Exception_frame(
+  CPU_Exception_frame *new_ef,
+  CPU_Exception_frame *old_ef
+)
+{
+  *new_ef = *old_ef;
+}
+
+#define ESR_EC_GET(reg) ( ( reg >> 26 ) & 0x3f )
+#define ESR_ISS_GET(reg) ( reg & 0x1ffffff )
+#define WnR ( 1 << 6 )
+
+Exception_Class Exception_Manager_Get_class( CPU_Exception_frame *ef )
+{
+  uint64_t EC = ESR_EC_GET( ef->register_syndrome );
+  uint64_t ISS = ESR_ISS_GET( ef->register_syndrome );
+
+  switch ( EC ) {
+    case 0x1:   /* WFI */
+    case 0x7:   /* SVE/SIMD/FP */
+    case 0xa:   /* LD64B/ST64B* */
+    case 0x18:  /* MSR/MRS/system instruction */
+    case 0x19:  /* SVE */
+      return EXCEPTION_TRAPPED_INSTRUCTION;
+    case 0x15:
+      return EXCEPTION_SUPERVISOR;
+    case 0x21:
+      return EXCEPTION_INSTRUCTION_ABORT;
+    case 0x22:
+      return EXCEPTION_PC_ALIGNMENT;
+    case 0x25:
+      return ( ISS & WnR ) ? EXCEPTION_DATA_ABORT_WRITE : EXCEPTION_DATA_ABORT_READ;
+    case 0x26:
+      return EXCEPTION_SP_ALIGNMENT;
+    case 0x2c:
+      return EXCEPTION_FPU;
+    case 0x31:
+      return EXCEPTION_BREAKPOINT;
+    case 0x33:
+      return EXCEPTION_STEP;
+    case 0x35:
+      return EXCEPTION_WATCHPOINT;
+    case 0x3c:
+      return EXCEPTION_BREAK_INSTRUCTION;
+    /* AArch32-specific, from-EL0, etc. */
+    default:
+      return EXCEPTION_UNKNOWN;
+  }
+}
+
+void Exception_Manager_Set_resume( CPU_Exception_frame *ef, void *address )
+{
+  ef->register_pc = address;
+}
+
+#define AARCH64_INSTRUCTION_SIZE 4
+void  Exception_Manager_Set_resume_next_instruction( CPU_Exception_frame *ef )
+{
+  ef->register_pc += AARCH64_INSTRUCTION_SIZE;
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index b206f5764b..4ba05a17e7 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,8 @@
 
 .globl	_AArch64_Exception_interrupt_no_nest
 .globl	_AArch64_Exception_interrupt_nest
+.globl	Exception_Manager_dispatch_and_resume
+.globl	Exception_Manager_resume
 
 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
   #ifdef RTEMS_SMP
@@ -324,3 +326,166 @@ Return to embedded exception vector code
 	pop_interrupt_context
 /* Return to vector for final cleanup */
 	ret
+
+/*
+ * This function is expected to resume execution using the CPU_Exception_frame
+ * provided in x0. This function  does not adhere to the AAPCS64 calling
+ * convention because all necessary state is contained within the exception
+ * frame.
+ */
+Exception_Manager_resume:
+/* Reset stack pointer */
+	mov	sp, x0
+
+/* call CEF restore routine (doesn't restore lr) */
+	bl .pop_exception_context
+
+/* get lr from CEF */
+	ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+	add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+
+/* switch to thread stack */
+	msr spsel, #1
+	eret
+
+/*
+ * This function is expected to undo dispatch disabling, perform dispatch, and
+ * resume execution using the CPU_Exception_frame provided in x0. This function
+ * does not adhere to the AAPCS64 calling convention because all necessary
+ * state is contained within the exception frame.
+ */
+Exception_Manager_dispatch_and_resume:
+/* Get per-CPU control of current processor */
+	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL_GET_REG
+
+/* Reset stack pointer */
+	mov	sp, x0
+
+/* Check dispatch disable and perform dispatch if necessary */
+/* Load some per-CPU variables */
+	ldr	w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+	ldrb	w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
+	ldr	w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
+	ldr	w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* Decrement levels and determine thread dispatch state */
+	eor	w1, w1, w0
+	sub	w0, w0, #1
+	orr	w1, w1, w0
+	orr	w1, w1, w2
+	sub	w3, w3, #1
+
+/* Store thread dispatch disable and ISR nest levels */
+	str	w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+	str	w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* store should_skip_thread_dispatch in x22 */
+	mov x22, x1
+
+/*
+ * It is now safe to assume that the source of the exception has been resolved.
+ * Copy the exception frame to the thread stack to be compatible with thread
+ * dispatch. This may arbitrarily clobber corruptible registers since all
+ * important state is contained in the exception frame.
+ *
+ * No need to save current LR since this will never return to the caller.
+ */
+	bl .move_exception_frame_and_switch_to_thread_stack
+
+/*
+ * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
+ * disable level.
+ */
+	cmp     x22, #0
+	bne     .Lno_need_thread_dispatch_resume
+	bl .AArch64_Perform_Thread_Dispatch
+.Lno_need_thread_dispatch_resume:
+/* call CEF restore routine (doesn't restore lr) */
+	bl .pop_exception_context
+
+/* get lr from CEF */
+	ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+	add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+	eret
+
+/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */
+.move_exception_frame_and_switch_to_thread_stack:
+	mov x1, sp                                                      /* Set x1 to the current exception frame */
+	msr spsel, #1                                                   /* switch to thread stack */
+	ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]       /* Get thread SP from exception frame since it may have been updated */
+	mov sp, x0
+	sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* reserve space for CEF */
+	mov x0, sp                                                      /* Set x0 to the new exception frame */
+	mov x20, lr                                                     /* Save LR */
+	bl Exception_Manager_Copy_CPU_Exception_frame                   /* Copy exception frame to reserved thread stack space */
+	mov lr, x20                                                     /* Restore LR */
+	msr spsel, #0							/* switch to exception stack */
+	add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE			/* release space for CEF on exception stack */
+	msr spsel, #1							/* switch to thread stack */
+	ret
+
+/*
+ * Apply the exception frame to the current register status, SP points to the EF
+ */
+.pop_exception_context:
+/* Pop daif and spsr */
+	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
+/* Restore daif and spsr */
+	msr DAIF, x2
+	msr SPSR_EL1, x3
+/* Pop FAR and ESR */
+	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
+/* Restore ESR and FAR */
+	msr ESR_EL1, x2
+	msr FAR_EL1, x3
+/* Pop fpcr and fpsr */
+	ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
+/* Restore fpcr and fpsr */
+	msr FPSR, x2
+	msr FPCR, x3
+/* Pop VFP registers */
+	ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x000)]
+	ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x020)]
+	ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x040)]
+	ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x060)]
+	ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x080)]
+	ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0a0)]
+	ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0c0)]
+	ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x0e0)]
+	ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x100)]
+	ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x120)]
+	ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x140)]
+	ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x160)]
+	ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x180)]
+	ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1a0)]
+	ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1c0)]
+	ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 0x1e0)]
+/* Pop x0-x29(fp) */
+	ldp x2,  x3,  [sp, #0x10]
+	ldp x4,  x5,  [sp, #0x20]
+	ldp x6,  x7,  [sp, #0x30]
+	ldp x8,  x9,  [sp, #0x40]
+	ldp x10, x11, [sp, #0x50]
+	ldp x12, x13, [sp, #0x60]
+	ldp x14, x15, [sp, #0x70]
+	ldp x16, x17, [sp, #0x80]
+	ldp x18, x19, [sp, #0x90]
+	ldp x20, x21, [sp, #0xa0]
+	ldp x22, x23, [sp, #0xb0]
+	ldp x24, x25, [sp, #0xc0]
+	ldp x26, x27, [sp, #0xd0]
+	ldp x28, x29, [sp, #0xe0]
+/* Pop ELR, SP already popped */
+	ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
+/* Restore exception LR */
+	msr ELR_EL1, x1
+	ldp x0,  x1,  [sp, #0x00]
+
+/* We must clear reservations to ensure consistency with atomic operations */
+	clrex
+
+	ret
diff --git a/spec/build/cpukit/optexceptionmanager.yml b/spec/build/cpukit/optexceptionmanager.yml
index 2188e16cf1..4d4e09c86a 100644
--- a/spec/build/cpukit/optexceptionmanager.yml
+++ b/spec/build/cpukit/optexceptionmanager.yml
@@ -12,6 +12,10 @@ default-by-variant: []
 description: |
   Enable the Exception Manager
 enabled-by:
+- aarch64/xilinx_zynqmp_ilp32_qemu
+- aarch64/xilinx_zynqmp_ilp32_zu3eg
+- aarch64/xilinx_zynqmp_lp64_qemu
+- aarch64/xilinx_zynqmp_lp64_zu3eg
 links: []
 name: RTEMS_EXCEPTION_MANAGER
 type: build
-- 
2.30.2



More information about the devel mailing list