[PATCH v1 3/5] cpukit/aarch64: Add Exception Manager support
Kinsey Moore
kinsey.moore at oarcorp.com
Mon Aug 23 23:50:43 UTC 2021
This adds the call and support functions necessary to add Exception
Manager support to AArch64.
---
.../cpu/aarch64/aarch64-exception-default.S | 50 +++++++++++++----
.../cpu/aarch64/aarch64-exception-default.c | 55 ++++++++++++++++++-
.../cpu/aarch64/aarch64-exception-interrupt.S | 18 ++++--
spec/build/cpukit/optexceptionmanager.yml | 4 ++
4 files changed, 110 insertions(+), 17 deletions(-)
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index 2a4ddbcc61..0065cf9e87 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -44,6 +44,8 @@
#include <rtems/asm.h>
.extern _AArch64_Exception_default
+.extern _AArch64_Wrap_Dispatch
+.extern _AArch64_Perform_Thread_Dispatch
.globl bsp_start_vector_table_begin
.globl bsp_start_vector_table_end
@@ -203,22 +205,34 @@ curr_el_spx_sync_get_pc: /* The current PC is now in LR */
/* Store the vector */
str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
mov x0, sp
- blr x1
+/* x1 contains the branch target to be wrapped, x0 contains the argument to x1 */
+ bl _AArch64_Wrap_Dispatch
+/* Save off x0 for later cmp */
+ mov x22, x0
+/*
+ * It is now safe to assume that the source of the exception has been resolved.
+ * Copy the exception frame to the thread stack to be compatible with thread
+ * dispatch. This may arbitrarily clobber corruptible registers.
+ */
+ bl .move_exception_frame_and_switch_to_thread_stack
+/*
+ * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
+ * disable level.
+ */
+ cmp x22, #0
+ bne .Lno_need_thread_dispatch
+ bl _AArch64_Perform_Thread_Dispatch
+.Lno_need_thread_dispatch:
/* bl to CEF restore routine (doesn't restore lr) */
bl .pop_exception_context
ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET] /* get lr from CEF */
/* drop space reserved for CEF and clear exclusive */
add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
- msr spsel, #1 /* switch to thread stack */
eret /* exception return */
nop
nop
nop
nop
- nop
- nop
- nop
- nop
/* Takes up the space of 2 instructions */
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
.word _AArch64_Exception_default
@@ -527,12 +541,8 @@ twiddle:
ldp x24, x25, [sp, #0xc0]
ldp x26, x27, [sp, #0xd0]
ldp x28, x29, [sp, #0xe0]
-/* Pop sp and ELR */
- ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore thread SP */
- msr spsel, #1
- mov sp, x0
- msr spsel, #0
+/* Pop ELR, SP already popped */
+ ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
/* Restore exception LR */
msr ELR_EL1, x1
ldp x0, x1, [sp, #0x00]
@@ -541,3 +551,19 @@ twiddle:
clrex
ret
+
+/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 */
+.move_exception_frame_and_switch_to_thread_stack:
+ mov x1, sp /* Set x1 to the current exception frame */
+ msr spsel, #1 /* switch to thread stack */
+ ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET] /* Get thread SP from exception frame since it may have been updated */
+ mov sp, x0
+ sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* reserve space for CEF */
+ mov x0, sp /* Set x0 to the new exception frame */
+ mov x19, lr /* Save LR */
+ bl Exception_Manager_Copy_CPU_Exception_frame /* Copy exception frame to reserved thread stack space */
+ mov lr, x19 /* Restore LR */
+ msr spsel, #0 /* switch to exception stack */
+ add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE /* release space for CEF on exception stack */
+ msr spsel, #1 /* switch to thread stack */
+ ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
index 2ebb3dee9f..e51e9453e1 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
@@ -43,8 +43,61 @@
#include <rtems/score/cpu.h>
#include <rtems/fatal.h>
+#include <rtems/exception.h>
void _AArch64_Exception_default( CPU_Exception_frame *frame )
{
- rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
+ if ( rtems_exception_manage( frame ) == false ) {
+ rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
+ }
+}
+
+void Exception_Manager_Copy_CPU_Exception_frame(
+ CPU_Exception_frame *new_ef,
+ CPU_Exception_frame *old_ef
+)
+{
+ *new_ef = *old_ef;
+}
+
+#define ESR_EC_GET(reg) ( ( reg >> 26 ) & 0x3f )
+#define ESR_ISS_GET(reg) ( reg & 0x1ffffff )
+#define WnR ( 1 << 6 )
+
+Exception_Class Exception_Manager_Get_class( CPU_Exception_frame *ef )
+{
+ uint64_t EC = ESR_EC_GET( ef->register_syndrome );
+ uint64_t ISS = ESR_ISS_GET( ef->register_syndrome );
+
+ switch ( EC ) {
+ case 0x1: /* WFI */
+ case 0x7: /* SVE/SIMD/FP */
+ case 0xa: /* LD64B/ST64B* */
+ case 0x18: /* MSR/MRS/system instruction */
+ case 0x19: /* SVE */
+ return EXCEPTION_TRAPPED_INSTRUCTION;
+ case 0x15:
+ return EXCEPTION_SUPERVISOR;
+ case 0x21:
+ return EXCEPTION_INSTRUCTION_ABORT;
+ case 0x22:
+ return EXCEPTION_PC_ALIGNMENT;
+ case 0x25:
+ return ( ISS & WnR ) ? EXCEPTION_DATA_ABORT_WRITE : EXCEPTION_DATA_ABORT_READ;
+ case 0x26:
+ return EXCEPTION_SP_ALIGNMENT;
+ case 0x2c:
+ return EXCEPTION_FPU;
+ case 0x31:
+ return EXCEPTION_BREAKPOINT;
+ case 0x33:
+ return EXCEPTION_STEP;
+ case 0x35:
+ return EXCEPTION_WATCHPOINT;
+ case 0x3c:
+ return EXCEPTION_BREAK_INSTRUCTION;
+ /* AArch32-specific, from-EL0, etc. */
+ default:
+ return EXCEPTION_UNKNOWN;
+ }
}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index cb0954a29b..942ad04866 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,8 @@
.globl _AArch64_Exception_interrupt_no_nest
.globl _AArch64_Exception_interrupt_nest
+.globl _AArch64_Wrap_Dispatch
+.globl _AArch64_Perform_Thread_Dispatch
#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
#define SELF_CPU_CONTROL_GET_REG w19
@@ -61,6 +63,14 @@
* hence the blind usage of x19, x20, and x21
*/
.AArch64_Interrupt_Handler:
+ adr x1, bsp_interrupt_dispatch
+ b _AArch64_Wrap_Dispatch
+
+/*
+ * x1 is the address of the dispatch function, x0 is a single argument that the
+ * function may or may not use
+ */
+_AArch64_Wrap_Dispatch:
/* Get per-CPU control of current processor */
GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
@@ -75,8 +85,8 @@
/* Save LR */
mov x21, LR
-/* Call BSP dependent interrupt dispatcher */
- bl bsp_interrupt_dispatch
+/* Call dispatcher */
+ blr x1
/* Restore LR */
mov LR, x21
@@ -106,7 +116,7 @@
/* NOTE: This function does not follow the AArch64 procedure call specification
* because all relevant state is known to be saved in the interrupt context,
* hence the blind usage of x19, x20, and x21 */
-.AArch64_Perform_Thread_Dispatch:
+_AArch64_Perform_Thread_Dispatch:
/* Get per-CPU control of current processor */
GET_SELF_CPU_CONTROL SELF_CPU_CONTROL_GET_REG
@@ -309,7 +319,7 @@ Return to embedded exception vector code
*/
cmp x0, #0
bne .Lno_need_thread_dispatch
- bl .AArch64_Perform_Thread_Dispatch
+ bl _AArch64_Perform_Thread_Dispatch
.Lno_need_thread_dispatch:
/*
diff --git a/spec/build/cpukit/optexceptionmanager.yml b/spec/build/cpukit/optexceptionmanager.yml
index 2188e16cf1..4d4e09c86a 100644
--- a/spec/build/cpukit/optexceptionmanager.yml
+++ b/spec/build/cpukit/optexceptionmanager.yml
@@ -12,6 +12,10 @@ default-by-variant: []
description: |
Enable the Exception Manager
enabled-by:
+- aarch64/xilinx_zynqmp_ilp32_qemu
+- aarch64/xilinx_zynqmp_ilp32_zu3eg
+- aarch64/xilinx_zynqmp_lp64_qemu
+- aarch64/xilinx_zynqmp_lp64_zu3eg
links: []
name: RTEMS_EXCEPTION_MANAGER
type: build
--
2.20.1
More information about the devel
mailing list