Blackfin interrupt handling patches
Allan Hessenflow
allanh-rtems2 at kallisti.com
Thu Aug 14 05:54:04 UTC 2008
Quite some time ago I was involved in a discussion on this list about
the Blackfin interrupt and context handling. The gist was that the
existing code has some problems; I believed I have a fix but had not had
time to adequately test it. I did provide it to another user at that
time but have not heard anything about any test results there, so I assume
he has not had time either. Anyway, I've had another request for Blackfin
RTEMS info, and decided I'll go ahead and submit the changes. I still
haven't had time to test it as thoroughly as I'd like, but I'm certain it
works better than the existing code in CVS.
The attached patch is against the cvs head as of a little earlier this
evening.
allan
--
Allan N. Hessenflow allanh at kallisti.com
-------------- next part --------------
2008-08-13 Allan Hessenflow <allanh at kallisti.com>
* cpu.c, cpu_asm.S, irq.c, rtems/score/cpu.h,
rtems/score/cpu_asm.h: reworked interrupt handling to fix
context switching.
-------------- next part --------------
Index: cpukit/score/cpu/bfin/cpu.c
===================================================================
RCS file: /usr1/CVS/rtems/cpukit/score/cpu/bfin/cpu.c,v
retrieving revision 1.6
diff -c -3 -p -r1.6 cpu.c
*** cpukit/score/cpu/bfin/cpu.c 17 Dec 2007 16:12:37 -0000 1.6
--- cpukit/score/cpu/bfin/cpu.c 14 Aug 2008 05:28:58 -0000
***************
*** 30,35 ****
--- 30,42 ----
*/
+ extern void _ISR15_Handler(void);
+ extern void _CPU_Emulation_handler(void);
+ extern void _CPU_Reset_handler(void);
+ extern void _CPU_NMI_handler(void);
+ extern void _CPU_Exception_handler(void);
+ extern void _CPU_Unhandled_Interrupt_handler(void);
+
void _CPU_Initialize(
void (*thread_dispatch) /* ignored on this CPU */
)
*************** void _CPU_Initialize(
*** 45,51 ****
* dependent variable.
*/
! _CPU_Thread_dispatch_pointer = thread_dispatch;
/*
* If there is not an easy way to initialize the FP context
--- 52,58 ----
* dependent variable.
*/
! /*_CPU_Thread_dispatch_pointer = thread_dispatch;*/
/*
* If there is not an easy way to initialize the FP context
*************** void _CPU_Initialize(
*** 55,62 ****
--- 62,92 ----
*/
/* FP context initialization support goes here */
+
+
+
+ proc_ptr ignored;
+
+ #if 0
+ /* occassionally useful debug stuff */
+ int i;
+ _CPU_ISR_install_raw_handler(0, _CPU_Emulation_handler, &ignored);
+ _CPU_ISR_install_raw_handler(1, _CPU_Reset_handler, &ignored);
+ _CPU_ISR_install_raw_handler(2, _CPU_NMI_handler, &ignored);
+ _CPU_ISR_install_raw_handler(3, _CPU_Exception_handler, &ignored);
+ for (i = 5; i < 15; i++)
+ _CPU_ISR_install_raw_handler(i, _CPU_Unhandled_Interrupt_handler, &ignored);
+ #endif
+
+ /* install handler that will be used to call _Thread_Dispatch */
+ _CPU_ISR_install_raw_handler( 15, _ISR15_Handler, &ignored );
+ /* enable self nesting */
+ __asm__ __volatile__ ("syscfg = %0" : : "d" (0x00000004));
}
+
+
+
/*PAGE
*
* _CPU_ISR_Get_level
*************** uint32_t _CPU_ISR_Get_level( void )
*** 78,84 ****
_tmpimask = *((uint32_t*)IMASK);
! return _tmpimask;
}
/*PAGE
--- 108,114 ----
_tmpimask = *((uint32_t*)IMASK);
! return (_tmpimask & 0xffe0) ? 0 : 1;
}
/*PAGE
*************** void _CPU_ISR_install_vector(
*** 136,150 ****
proc_ptr *old_handler
)
{
! *old_handler = _ISR_Vector_table[ vector ];
! /*
! * If the interrupt vector table is a table of pointer to isr entry
! * points, then we need to install the appropriate RTEMS interrupt
! * handler for this vector number.
! */
!
! _CPU_ISR_install_raw_handler( vector, _ISR_Handler, old_handler );
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
--- 166,174 ----
proc_ptr *old_handler
)
{
! proc_ptr ignored;
! *old_handler = _ISR_Vector_table[ vector ];
/*
* We put the actual user ISR address in '_ISR_vector_table'. This will
*************** void _CPU_ISR_install_vector(
*** 152,159 ****
--- 176,194 ----
*/
_ISR_Vector_table[ vector ] = new_handler;
+
+ _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
}
+ #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
+ void *_CPU_Thread_Idle_body(uint32_t ignored) {
+
+ while (1) {
+ __asm__ __volatile__("ssync; idle; ssync");
+ }
+ }
+ #endif
+
/*
* Copied from the arm port.
*/
*************** void _CPU_Context_Initialize(
*** 170,181 ****
stack_high = ((uint32_t )(stack_base) + size);
the_context->register_sp = stack_high;
- // gcc/config/bfin/bfin.h defines CPU_MINIMUM_STACK_FRAME_SIZE = 0 thus we do sp=fp
- // is this correct ?????
- the_context->register_fp = stack_high;
the_context->register_rets = (uint32_t) entry_point;
!
! //mask the interrupt level
}
--- 205,212 ----
stack_high = ((uint32_t )(stack_base) + size);
the_context->register_sp = stack_high;
the_context->register_rets = (uint32_t) entry_point;
! the_context->imask = new_level ? 0 : 0xffff;
}
Index: cpukit/score/cpu/bfin/cpu_asm.S
===================================================================
RCS file: /usr1/CVS/rtems/cpukit/score/cpu/bfin/cpu_asm.S,v
retrieving revision 1.4
diff -c -3 -p -r1.4 cpu_asm.S
*** cpukit/score/cpu/bfin/cpu_asm.S 10 Sep 2007 22:24:57 -0000 1.4
--- cpukit/score/cpu/bfin/cpu_asm.S 14 Aug 2008 05:28:58 -0000
***************
*** 4,12 ****
* in the Blackfin port of RTEMS. These algorithms must be implemented
* in assembly language
*
* Copyright (c) 2006 by Atos Automacao Industrial Ltda.
! * written by Alain Schaefer <alain.schaefer at easc.ch>
! * and Antonio Giovanini <antonio at atos.com.br>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
--- 4,17 ----
* in the Blackfin port of RTEMS. These algorithms must be implemented
* in assembly language
*
+ * Copyright (c) 2008 Kallisti Labs, Los Gatos, CA, USA
+ * written by Allan Hessenflow <allanh at kallisti.com>
+ *
+ * Based on earlier version:
+ *
* Copyright (c) 2006 by Atos Automacao Industrial Ltda.
! * written by Alain Schaefer <alain.schaefer at easc.ch>
! * and Antonio Giovanini <antonio at atos.com.br>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
***************
*** 14,21 ****
*
* $Id: cpu_asm.S,v 1.4 2007/09/10 22:24:57 joel Exp $
*/
!
!
#include <rtems/asm.h>
#include <rtems/score/cpu_asm.h>
#include <rtems/score/bfin.h>
--- 19,26 ----
*
* $Id: cpu_asm.S,v 1.4 2007/09/10 22:24:57 joel Exp $
*/
!
!
#include <rtems/asm.h>
#include <rtems/score/cpu_asm.h>
#include <rtems/score/bfin.h>
***************
*** 24,29 ****
--- 29,229 ----
#define LO(con32) ((con32) & 0xFFFF)
#define HI(con32) (((con32) >> 16) & 0xFFFF)
+
+ #if 0
+ /* some debug routines */
+ .globl __CPU_write_char;
+ __CPU_write_char:
+ p0.h = 0xffc0;
+ p0.l = 0x0400;
+ txWaitLoop:
+ r1 = w[p0 + 0x14];
+ cc = bittst(r1, 5);
+ if !cc jump txWaitLoop;
+ w[p0 + 0x00] = r0;
+ rts;
+
+ .globl __CPU_write_crlf;
+ __CPU_write_crlf:
+ r0 = '\r';
+ [--sp] = rets;
+ call __CPU_write_char;
+ rets = [sp++];
+ r0 = '\n';
+ jump __CPU_write_char;
+
+ __CPU_write_space:
+ r0 = ' ';
+ jump __CPU_write_char;
+
+ .globl __CPU_write_nybble;
+ __CPU_write_nybble:
+ r1 = 0x0f;
+ r0 = r0 & r1;
+ r0 += '0';
+ r1 = '9';
+ cc = r0 <= r1;
+ if cc jump __CPU_write_char;
+ r0 += 'a' - '0' - 10;
+ jump __CPU_write_char;
+
+ .globl __CPU_write_byte;
+ __CPU_write_byte:
+ [--sp] = r0;
+ [--sp] = rets;
+ r0 >>= 4;
+ call __CPU_write_nybble;
+ rets = [sp++];
+ r0 = [sp++];
+ jump __CPU_write_nybble;
+
+ __CPU_write_chawmp:
+ [--sp] = r0;
+ [--sp] = rets;
+ r0 >>= 8;
+ call __CPU_write_byte;
+ rets = [sp++];
+ r0 = [sp++];
+ jump __CPU_write_byte;
+
+ __CPU_write_gawble:
+ [--sp] = r0;
+ [--sp] = rets;
+ r0 >>= 16;
+ call __CPU_write_chawmp;
+ rets = [sp++];
+ r0 = [sp++];
+ jump __CPU_write_chawmp;
+
+ __CPU_dump_registers:
+ [--sp] = rets;
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = p0;
+ r0 = [sp + 8];
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = [sp + 4];
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r2;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r3;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r4;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r5;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r6;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = r7;
+ call __CPU_write_gawble;
+ call __CPU_write_crlf;
+ r0 = [sp];
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = p1;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = p2;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = p3;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = p4;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = p5;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = fp;
+ call __CPU_write_gawble;
+ call __CPU_write_space;
+ r0 = sp;
+ r0 += 16;
+ call __CPU_write_gawble;
+ call __CPU_write_crlf;
+
+ p0 = [sp++];
+ r1 = [sp++];
+ r0 = [sp++];
+ rets = [sp++];
+ rts;
+
+ .globl __CPU_Exception_handler;
+ __CPU_Exception_handler:
+ usp = sp;
+ sp.h = 0xffb0;
+ sp.l = 0x1000;
+ [--sp] = (r7:0,p5:0);
+
+ r0 = 'x';
+ call __CPU_write_char;
+ jump hcf;
+
+
+ .globl __CPU_Emulation_handler;
+ __CPU_Emulation_handler:
+ usp = sp;
+ sp.h = 0xffb0;
+ sp.l = 0x1000;
+ [--sp] = (r7:0,p5:0);
+
+ r0 = 'e';
+ call __CPU_write_char;
+ jump hcf;
+
+ .globl __CPU_Reset_handler;
+ __CPU_Reset_handler:
+ usp = sp;
+ sp.h = 0xffb0;
+ sp.l = 0x1000;
+ [--sp] = (r7:0,p5:0);
+
+ r0 = 'r';
+ call __CPU_write_char;
+ jump hcf;
+
+ .globl __CPU_NMI_handler;
+ __CPU_NMI_handler:
+ usp = sp;
+ sp.h = 0xffb0;
+ sp.l = 0x1000;
+ [--sp] = (r7:0,p5:0);
+
+ r0 = 'n';
+ call __CPU_write_char;
+ jump hcf;
+
+ .globl __CPU_Unhandled_Interrupt_handler;
+ __CPU_Unhandled_Interrupt_handler:
+ usp = sp;
+ sp.h = 0xffb0;
+ sp.l = 0x1000;
+ [--sp] = (r7:0,p5:0);
+
+ call __CPU_write_crlf;
+ r0 = 'i';
+ call __CPU_write_char;
+ p0.h = HI(IPEND);
+ p0.l = LO(IPEND);
+ r0 = [p0];
+ call __CPU_write_chawmp;
+ jump hcf;
+
+ hcf:
+ idle;
+ jump hcf;
+
+ #endif
+
+
/* _CPU_Context_switch
*
* This routine performs a normal non-FP context switch.
***************
*** 31,217 ****
* bfin Specific Information:
*
* For now we simply save all registers.
! *
*/
! .globl __CPU_Context_switch
__CPU_Context_switch:
! /* Start saving context R0 = current, R1=heir */
! /*save P0 first*/
! [FP+0x8] = P0;
! P0 = R0;
! [ P0 + R0_OFFSET ] = R0;
! [ P0 + R1_OFFSET] = R1;
! [ P0 + R2_OFFSET] = R2;
! [ P0 + R4_OFFSET] = R4;
! [ P0 + R3_OFFSET] = R3;
! [ P0 + R5_OFFSET] = R5;
! [ P0 + R6_OFFSET] = R6;
! [ P0 + R7_OFFSET] = R7;
! [ P0 + P1_OFFSET] = P1;
! /* save the original value of P0 */
! P1 = [FP+0x8];
! [ P0 + P0_OFFSET] = P1;
! [ P0 + P2_OFFSET] = P2;
! [ P0 + P3_OFFSET] = P3;
! [ P0 + P4_OFFSET] = P4;
! [ P0 + P5_OFFSET] = P5;
! [ P0 + FP_OFFSET] = FP;
! [ P0 + SP_OFFSET] = SP;
!
! /* save ASTAT */
! R0 = ASTAT;
! [P0 + ASTAT_OFFSET] = R0;
!
! /* save Loop Counters */
! R0 = LC0;
! [P0 + LC0_OFFSET] = R0;
! R0 = LC1;
! [P0 + LC1_OFFSET] = R0;
!
! /* save Accumulators */
! R0 = A0.W;
! [P0 + A0W_OFFSET] = R0;
! R0 = A0.X;
! [P0 + A0X_OFFSET] = R0;
! R0 = A1.W;
! [P0 + A1W_OFFSET] = R0;
! R0 = A1.X;
! [P0 + A1X_OFFSET] = R0;
!
! /* save Index Registers */
! R0 = I0;
! [P0 + I0_OFFSET] = R0;
! R0 = I1;
! [P0 + I1_OFFSET] = R0;
! R0 = I2;
! [P0 + I2_OFFSET] = R0;
! R0 = I3;
! [P0 + I3_OFFSET] = R0;
!
! /* save Modifier Registers */
! R0 = M0;
! [P0 + M0_OFFSET] = R0;
! R0 = M1;
! [P0 + M1_OFFSET] = R0;
! R0 = M2;
! [P0 + M2_OFFSET] = R0;
! R0 = M3;
! [P0 + M3_OFFSET] = R0;
!
! /* save Length Registers */
! R0 = L0;
! [P0 + L0_OFFSET] = R0;
! R0 = L1;
! [P0 + L1_OFFSET] = R0;
! R0 = L2;
! [P0 + L2_OFFSET] = R0;
! R0 = L3;
! [P0 + L3_OFFSET] = R0;
!
! /* Base Registers */
! R0 = B0;
! [P0 + B0_OFFSET] = R0;
! R0 = B1;
! [P0 + B1_OFFSET] = R0;
! R0 = B2;
! [P0 + B2_OFFSET] = R0;
! R0 = B3;
! [P0 + B3_OFFSET] = R0;
!
! /* save RETS */
! R0 = RETS;
! [ P0 + RETS_OFFSET] = R0;
restore:
! P0 = R1;
! R1 = [P0 + R1_OFFSET];
! R2 = [P0 + R2_OFFSET];
! R3 = [P0 + R3_OFFSET];
! R4 = [P0 + R4_OFFSET];
! R5 = [P0 + R5_OFFSET];
! R6 = [P0 + R6_OFFSET];
! R7 = [P0 + R7_OFFSET];
!
! P2 = [P0 + P2_OFFSET];
! P3 = [P0 + P3_OFFSET];
! P4 = [P0 + P4_OFFSET];
! P5 = [P0 + P5_OFFSET];
!
! /* might have to be placed more to the end */
! FP = [P0 + FP_OFFSET];
! SP = [P0 + SP_OFFSET];
!
! /* save ASTAT */
! R0 = [P0 + ASTAT_OFFSET];
! ASTAT = R0;
!
! /* save Loop Counters */
! R0 = [P0 + LC0_OFFSET];
! LC0 = R0;
! R0 = [P0 + LC1_OFFSET];
! LC1 = R0;
!
! /* save Accumulators */
! R0 = [P0 + A0W_OFFSET];
! A0.W = R0;
! R0 = [P0 + A0X_OFFSET];
! A0.X = R0;
! R0 = [P0 + A1W_OFFSET];
! A1.W = R0;
! R0 = [P0 + A1X_OFFSET];
! A1.X = R0;
!
! /* save Index Registers */
! R0 = [P0 + I0_OFFSET];
! I0 = R0;
! R0 = [P0 + I1_OFFSET];
! I1 = R0;
! R0 = [P0 + I2_OFFSET];
! I2 = R0;
! R0 = [P0 + I3_OFFSET];
! I3 = R0;
!
! /* save Modifier Registers */
! R0 = [P0 + M0_OFFSET];
! M0 = R0;
! R0 = [P0 + M1_OFFSET];
! M1 = R0;
! R0 = [P0 + M2_OFFSET];
! M2 = R0;
! R0 = [P0 + M3_OFFSET];
! M3 = R0;
!
! /* save Length Registers */
! R0 = [P0 + L0_OFFSET];
! L0 = R0;
! R0 = [P0 + L1_OFFSET];
! L1 = R0;
! R0 = [P0 + L2_OFFSET];
! L2 = R0;
! R0 = [P0 + L3_OFFSET];
! L3 = R0;
!
! /* Base Registers */
! R0 = [P0 + B0_OFFSET];
! B0 = R0;
! R0 = [P0 + B1_OFFSET];
! B1 = R0;
! R0 = [P0 + B2_OFFSET];
! B2 = R0;
! R0 = [P0 + B3_OFFSET];
! B3 = R0;
!
! /* restore RETS */
! P1 = [P0 + RETS_OFFSET];
! RETS = P1;
!
! /* now restore the P1 + P0 */
! P1 = [P0 + R1_OFFSET];
! P0 = [P0 + P0_OFFSET];
!
! rts;
!
/*
* _CPU_Context_restore
--- 231,314 ----
* bfin Specific Information:
*
* For now we simply save all registers.
! *
*/
! /* make sure this sequence stays in sync with the definition for
! Context_Control in rtems/score/cpu.h */
! .globl __CPU_Context_switch
__CPU_Context_switch:
! /* Start saving context R0 = current, R1=heir */
! p0 = r0;
! [p0++] = r4;
! [p0++] = r5;
! [p0++] = r6;
! [p0++] = r7;
!
! /* save pointer registers */
! [p0++] = p3;
! [p0++] = p4;
! [p0++] = p5;
! [p0++] = fp;
! [p0++] = sp;
!
! /* save length registers */
! r0 = l0;
! [p0++] = r0;
! r0 = l1;
! [p0++] = r0;
! r0 = l2;
! [p0++] = r0;
! r0 = l3;
! [p0++] = r0;
!
! /* save rets */
! r0 = rets;
! [p0++] = r0;
!
! /* save IMASK */
! p1.h = HI(IMASK);
! p1.l = LO(IMASK);
! r0 = [p1];
! [p0++] = r0;
+ p0 = r1;
restore:
! /* restore data registers */
! r4 = [p0++];
! r5 = [p0++];
! r6 = [p0++];
! r7 = [p0++];
!
! /* restore pointer registers */
! p3 = [p0++];
! p4 = [p0++];
! p5 = [p0++];
! fp = [p0++];
! sp = [p0++];
!
! /* restore length registers */
! r0 = [p0++];
! l0 = r0;
! r0 = [p0++];
! l1 = r0;
! r0 = [p0++];
! l2 = r0;
! r0 = [p0++];
! l3 = r0;
!
! /* restore rets */
! r0 = [p0++];
! rets = r0;
!
! /* restore IMASK */
! r0 = [p0++];
! p1.h = HI(IMASK);
! p1.l = LO(IMASK);
! [p1] = r0;
!
! rts;
!
/*
* _CPU_Context_restore
*************** restore:
*** 226,419 ****
* none
*
*/
! .globl __CPU_Context_restore
__CPU_Context_restore:
! jump restore;
!
!
!
! .globl __ISR_Thread_Dispatch
! __ISR_Thread_Dispatch:
!
! .extern __Thread_Dispatch
! R0.l = __Thread_Dispatch;
! R0.h = __Thread_Dispatch;
!
! /* Puts the address of th Thread_Dispatch function on Stack
! * Where it will be restored to the RTI register
! */
! P0 = [FP];
! /* save the old reti */
! R1 = [P0+0xc];
! [P0+0xc] = R0;
! /*
! * Overwriting the RETS Register is save because Thread_Dispatch is
! * disabled when we are between call/link or unlink/rts
! */
! [P0+0x8] = R1;
!
! /* save old rets */
!
! rts;
! .globl __ISR_Handler
__ISR_Handler:
! /* First of all check the Stackpointer and */
! /* switch to Scratchpad if necessary */
!
! /* save P0 and R0 in the scratchpad */
! USP = P0;
!
! /* load base adress of scratchpad */
! P0.H = HI(SCRATCH);
! P0.L = LO(SCRATCH);
!
! [--SP] = ASTAT; /* save cc flag*/
! /* if SP is already inside the SCRATCHPAD */
! CC=SP<P0 (iu)
! if !CC jump continue;
!
! /* set PO to top of scratchpad */
! P0.h=HI(SCRATCH_TOP);
! P0.l=LO(SCRATCH_TOP);
! /*save the old SP*/
! [P0] = SP;
! /*P0 += -4;*/
! /*set the new Stackpointer*/
! SP = P0;
! /*restore the old PO*/
!
! /* The Stackpointer is now setup as we want */
! continue:
! /* restore P0 and save some context */
! P0 = USP;
! /* save some state on the isr stack (scratchpad), this enables interrupt nesting */
! [--SP] = RETI;
! [--SP] = RETS;
! [--SP] = ASTAT;
! [--SP] = FP;
! FP = SP;
! [--SP] = (R7:0, P5:0) ;
!
!
! /* Context is saved, now check which Instruction we were executing
! * If we were between a call and link or between a unlink and rts
! * we have to disable Thread_Dispatch because correct restore of context after
! * Thread_Dispatch would not be possible. */
!
! P0 = RETI;
! R0 = P0;
! R0.L = 0x0000;
! R1.H = 0xffa0;
! R1.L = 0x0000;
! CC = R0 == R1;
! if CC jump disablethreaddispatch;
!
! R0 = W[P0](Z);
!
! /* shift 16 bits to the right (select the high nibble ) */
! /*R0 >>= 16;*/
!
! R3 = 0;
! /* Check if RETI is a LINK instruction */
! R1.h = HI(0x0000);
! R1.l = LO(0xE800);
! CC=R0==R1;
! if cc jump disablethreaddispatch;
!
! /* Check if RETI is a RTS instruction */
! R1.h = HI(0x0000);
! R1.l = LO(0x0010);
! CC=R0==R1;
! if cc jump disablethreaddispatch;
!
! jump afterthreaddispatch;
!
! disablethreaddispatch:
! /* _Thread_Dispatch_disable_level++ */
! .extern _Thread_Dispatch_disable_level
! P0.H = __Thread_Dispatch_disable_level;
! P0.L = __Thread_Dispatch_disable_level;
! R0 = [P0];
! R0 += 1;
! [P0] = R0;
! R3 = 1;
!
! afterthreaddispatch:
! /* Put R3 on the stack */
! [--SP] = R3;
!
! /* Obtain a bitlist of the pending interrupts. */
! P0.H = HI(IPEND);
! P0.L = LO(IPEND);
! R1 = [P0];
!
! /*
! * Search through the bit list stored in R0 to find the first enabled
! * bit. The offset of this bit is the index of the interrupt that is
! * to be handled.
! */
! R0 = -1;
! intloop:
! R0 += 1;
! R1 = ROT R1 by -1;
! if !cc jump intloop;
!
!
! /* pass SP as parameter to the C function */
! R1 = SP
!
! /* pass values by register as well as by stack */
! /* to comply with the c calling conventions */
! [--SP] = R0;
! [--SP] = R1;
!
! .extern _ISR_Handler2
! call _ISR_Handler2
!
! /* inc 2 to compensate the passing of arguments */
! R3 = [SP++];
! R3 = [SP++];
! /* check if _Thread_Dispatch_disable_level has been incremented */
! R3 = [SP++]
! CC=R3==0
! if cc jump dont_decrement;
! .extern _Thread_Dispatch_disable_level
! P0.H = __Thread_Dispatch_disable_level;
! P0.L = __Thread_Dispatch_disable_level;
! R0 = [P0];
! R0 += -1;
! [P0] = R0;
!
! dont_decrement:
!
! (R7:0, P5:0) = [SP++];
! FP = [SP++];
! ASTAT = [SP++];
! RETS = [SP++];
! RETI = [SP++];
! /* Interrupts are now disabled again */
!
! /*should restore the old stack !!!*/
! /*if sp now points to SCRATCH_TOP */
!
! /* load base adress of scratchpad */
! USP = P0;
! P0.H = HI(SCRATCH_TOP);
! P0.L = LO(SCRATCH_TOP);
!
! CC=SP==P0
! if !cc jump restoreP0
! /* restore the stack */
! SP=[P0];
!
! restoreP0:
! P0 = USP;
! ASTAT = [SP++]; /* restore cc flag */
!
! /*now we should be on the old "user-stack" again */
!
! /* return from interrupt, will jump to adress stored in RETI */
! RTI;
--- 323,598 ----
* none
*
*/
! .globl __CPU_Context_restore
__CPU_Context_restore:
! p0 = r0;
! jump restore;
! .globl __ISR_Handler
! .extern __CPU_Interrupt_stack_high;
! .extern __ISR_Nest_level
! .extern __Thread_Dispatch_disable_level
! .extern __Context_Switch_necessary
! .extern __ISR_Signals_to_thread_executing
__ISR_Handler:
! /* all interrupts are disabled at this point */
! /* the following few items are pushed onto the task stack for at
! most one interrupt; nested interrupts will be using the interrupt
! stack for everything. */
! [--sp] = astat;
! [--sp] = p1;
! [--sp] = p0;
! [--sp] = r1;
! [--sp] = r0;
! p0.h = __ISR_Nest_level;
! p0.l = __ISR_Nest_level;
! r0 = [p0];
! r0 += 1;
! [p0] = r0;
! cc = r0 <= 1 (iu);
! if !cc jump noStackSwitch;
! /* setup interrupt stack */
! r0 = sp;
! p0.h = __CPU_Interrupt_stack_high;
! p0.l = __CPU_Interrupt_stack_high;
! sp = [p0];
! [--sp] = r0;
! noStackSwitch:
! /* disable thread dispatch */
! p0.h = __Thread_Dispatch_disable_level;
! p0.l = __Thread_Dispatch_disable_level;
! r0 = [p0];
! r0 += 1;
! [p0] = r0;
!
! [--sp] = reti; /* interrupts are now enabled */
!
! /* figure out what vector we are */
! p0.h = HI(IPEND);
! p0.l = LO(IPEND);
! r1 = [p0];
! /* we should only get here for events that require RTI to return */
! r1 = r1 >> 5;
! r0 = 4;
! /* at least one bit must be set, so this loop will exit */
! vectorIDLoop:
! r0 += 1;
! r1 = rot r1 by -1;
! if !cc jump vectorIDLoop;
!
! [--sp] = r2;
! p0.h = __ISR_Vector_table;
! p0.l = __ISR_Vector_table;
! r2 = [p0];
! r1 = r0 << 2;
! r1 = r1 + r2;
! p0 = r1;
! p0 = [p0];
! cc = p0 == 0;
! if cc jump noHandler;
!
! /* r2, r0, r1, p0, p1, astat are already saved */
! [--sp] = a1.x;
! [--sp] = a1.w;
! [--sp] = a0.x;
! [--sp] = a0.w;
! [--sp] = r3;
! [--sp] = p3;
! [--sp] = p2;
! [--sp] = lt1;
! [--sp] = lt0;
! [--sp] = lc1;
! [--sp] = lc0;
! [--sp] = lb1;
! [--sp] = lb0;
! [--sp] = i3;
! [--sp] = i2;
! [--sp] = i1;
! [--sp] = i0;
! [--sp] = m3;
! [--sp] = m2;
! [--sp] = m1;
! [--sp] = m0;
! [--sp] = l3;
! [--sp] = l2;
! [--sp] = l1;
! [--sp] = l0;
! [--sp] = b3;
! [--sp] = b2;
! [--sp] = b1;
! [--sp] = b0;
! [--sp] = rets;
! r1 = fp; /* is this really what should be passed here? */
! /* call user isr; r0 = vector number, r1 = frame pointer */
! sp += -12; /* bizarre abi... */
! call (p0);
! sp += 12;
! rets = [sp++];
! b0 = [sp++];
! b1 = [sp++];
! b2 = [sp++];
! b3 = [sp++];
! l0 = [sp++];
! l1 = [sp++];
! l2 = [sp++];
! l3 = [sp++];
! m0 = [sp++];
! m1 = [sp++];
! m2 = [sp++];
! m3 = [sp++];
! i0 = [sp++];
! i1 = [sp++];
! i2 = [sp++];
! i3 = [sp++];
! lb0 = [sp++];
! lb1 = [sp++];
! lc0 = [sp++];
! lc1 = [sp++];
! lt0 = [sp++];
! lt1 = [sp++];
! p2 = [sp++];
! p3 = [sp++];
! r3 = [sp++];
! a0.w = [sp++];
! a0.x = [sp++];
! a1.w = [sp++];
! a1.x = [sp++];
!
! noHandler:
! r2 = [sp++];
! /* this disables interrupts again */
! reti = [sp++];
!
! p0.h = __ISR_Nest_level;
! p0.l = __ISR_Nest_level;
! r0 = [p0];
! r0 += -1;
! [p0] = r0;
! cc = r0 == 0;
! if !cc jump noStackRestore;
! sp = [sp];
! noStackRestore:
!
! /* check this stuff to insure context_switch_necessary and
! isr_signals_to_thread_executing are being handled appropriately. */
! p0.h = __Thread_Dispatch_disable_level;
! p0.l = __Thread_Dispatch_disable_level;
! r0 = [p0];
! r0 += -1;
! [p0] = r0;
! cc = r0 == 0;
! if !cc jump noDispatch
!
! /* do thread dispatch if necessary */
! p0.h = __Context_Switch_necessary;
! p0.l = __Context_Switch_necessary;
! r0 = [p0];
! cc = r0 == 0;
! p0.h = __ISR_Signals_to_thread_executing;
! p0.l = __ISR_Signals_to_thread_executing;
! if !cc jump doDispatch
! r0 = [p0];
! cc = r0 == 0;
! if cc jump noDispatch
! doDispatch:
! r0 = 0;
! [p0] = r0;
! raise 15;
! noDispatch:
! r0 = [sp++];
! r1 = [sp++];
! p0 = [sp++];
! p1 = [sp++];
! astat = [sp++];
! rti
!
!
! /* the approach here is for the main interrupt handler, when a dispatch is
! wanted, to do a "raise 15". when the main interrupt handler does its
! "rti", the "raise 15" takes effect and we end up here. we can now
! safely call _Thread_Dispatch, and do an "rti" to get back to the
! original interrupted function. this does require self-nesting to be
! enabled; the maximum nest depth is the number of tasks. */
! .global __ISR15_Handler
! .extern __Thread_Dispatch
! __ISR15_Handler:
! [--sp] = reti;
! [--sp] = rets;
! [--sp] = astat;
! [--sp] = a1.x;
! [--sp] = a1.w;
! [--sp] = a0.x;
! [--sp] = a0.w;
! [--sp] = r3;
! [--sp] = r2;
! [--sp] = r1;
! [--sp] = r0;
! [--sp] = p3;
! [--sp] = p2;
! [--sp] = p1;
! [--sp] = p0;
! [--sp] = lt1;
! [--sp] = lt0;
! [--sp] = lc1;
! [--sp] = lc0;
! [--sp] = lb1;
! [--sp] = lb0;
! [--sp] = i3;
! [--sp] = i2;
! [--sp] = i1;
! [--sp] = i0;
! [--sp] = m3;
! [--sp] = m2;
! [--sp] = m1;
! [--sp] = m0;
! [--sp] = l3;
! [--sp] = l2;
! [--sp] = l1;
! [--sp] = l0;
! [--sp] = b3;
! [--sp] = b2;
! [--sp] = b1;
! [--sp] = b0;
! sp += -12; /* bizarre abi... */
! call __Thread_Dispatch;
! sp += 12;
! b0 = [sp++];
! b1 = [sp++];
! b2 = [sp++];
! b3 = [sp++];
! l0 = [sp++];
! l1 = [sp++];
! l2 = [sp++];
! l3 = [sp++];
! m0 = [sp++];
! m1 = [sp++];
! m2 = [sp++];
! m3 = [sp++];
! i0 = [sp++];
! i1 = [sp++];
! i2 = [sp++];
! i3 = [sp++];
! lb0 = [sp++];
! lb1 = [sp++];
! lc0 = [sp++];
! lc1 = [sp++];
! lt0 = [sp++];
! lt1 = [sp++];
! p0 = [sp++];
! p1 = [sp++];
! p2 = [sp++];
! p3 = [sp++];
! r0 = [sp++];
! r1 = [sp++];
! r2 = [sp++];
! r3 = [sp++];
! a0.w = [sp++];
! a0.x = [sp++];
! a1.w = [sp++];
! a1.x = [sp++];
! astat = [sp++];
! rets = [sp++];
! reti = [sp++];
! rti;
Index: cpukit/score/cpu/bfin/irq.c
===================================================================
RCS file: /usr1/CVS/rtems/cpukit/score/cpu/bfin/irq.c,v
retrieving revision 1.3
diff -c -3 -p -r1.3 irq.c
*** cpukit/score/cpu/bfin/irq.c 17 Dec 2007 16:12:37 -0000 1.3
--- cpukit/score/cpu/bfin/irq.c 14 Aug 2008 05:28:58 -0000
***************
*** 11,16 ****
--- 11,18 ----
* $Id: irq.c,v 1.3 2007/12/17 16:12:37 joel Exp $
*/
+
+ #if 0 /* this file no longer used */
#include <rtems/system.h>
#include <rtems/score/cpu.h>
*************** uint32_t SIC_IAR_Value ( uint8_t Vector
*** 104,106 ****
--- 106,111 ----
return 0x88888888;
}
}
+
+ #endif /* 0 */
+
Index: cpukit/score/cpu/bfin/rtems/score/cpu.h
===================================================================
RCS file: /usr1/CVS/rtems/cpukit/score/cpu/bfin/rtems/score/cpu.h,v
retrieving revision 1.12
diff -c -3 -p -r1.12 cpu.h
*** cpukit/score/cpu/bfin/rtems/score/cpu.h 31 Jul 2008 14:55:34 -0000 1.12
--- cpukit/score/cpu/bfin/rtems/score/cpu.h 14 Aug 2008 05:28:59 -0000
*************** extern "C" {
*** 105,111 ****
*
* XXX document implementation including references if appropriate
*/
! #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
/*
* Does the CPU follow the simple vectored interrupt model?
--- 105,124 ----
*
* XXX document implementation including references if appropriate
*/
! #define CPU_HAS_SOFTWARE_INTERRUPT_STACK TRUE
!
! /*
! * Does the CPU follow the simple vectored interrupt model?
! *
! * If TRUE, then RTEMS allocates the vector table it internally manages.
! * If FALSE, then the BSP is assumed to allocate and manage the vector
! * table
! *
! * BFIN Specific Information:
! *
! * XXX document implementation including references if appropriate
! */
! #define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
/*
* Does the CPU follow the simple vectored interrupt model?
*************** extern "C" {
*** 152,158 ****
*
* XXX document implementation including references if appropriate
*/
! #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
/**
* Does the RTEMS invoke the user's ISR with the vector number and
--- 165,171 ----
*
* XXX document implementation including references if appropriate
*/
! #define CPU_ALLOCATE_INTERRUPT_STACK TRUE
/**
* Does the RTEMS invoke the user's ISR with the vector number and
*************** extern "C" {
*** 305,311 ****
*
* XXX document implementation including references if appropriate
*/
! #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
/**
* Does the stack grow up (toward higher addresses) or down
--- 318,324 ----
*
* XXX document implementation including references if appropriate
*/
! #define CPU_PROVIDES_IDLE_THREAD_BODY TRUE
/**
* Does the stack grow up (toward higher addresses) or down
*************** extern "C" {
*** 453,541 ****
* that must be saved during a voluntary context switch from one thread
* to another.
*/
typedef struct {
- /* we are saving all registers, maybe we should not */
-
- uint32_t register_r0;
- uint32_t register_r1;
- uint32_t register_r2;
- uint32_t register_r3;
uint32_t register_r4;
uint32_t register_r5;
uint32_t register_r6;
uint32_t register_r7;
! uint32_t register_p0;
! uint32_t register_p1;
! uint32_t register_p2;
uint32_t register_p3;
uint32_t register_p4;
uint32_t register_p5;
uint32_t register_fp;
uint32_t register_sp;
- uint32_t register_i0;
- uint32_t register_i1;
- uint32_t register_i2;
- uint32_t register_i3;
-
- uint32_t register_m0;
- uint32_t register_m1;
- uint32_t register_m2;
- uint32_t register_m3;
-
- uint32_t register_b0;
- uint32_t register_b1;
- uint32_t register_b2;
- uint32_t register_b3;
-
uint32_t register_l0;
uint32_t register_l1;
uint32_t register_l2;
uint32_t register_l3;
- uint32_t register_a0dotx;
- uint32_t register_a0dotw;
- uint32_t register_a1dotx;
- uint32_t register_a1dotw;
- uint32_t register_astat;
uint32_t register_rets;
! uint32_t register_lc0;
! uint32_t register_lt0;
! uint32_t register_lb0;
! uint32_t register_lc1;
! uint32_t register_lt1;
! uint32_t register_lb1;
!
! /*BFIN_CYCLES_REGNUM,
! BFIN_CYCLES2_REGNUM, */
!
! uint32_t register_usp;
! uint32_t register_seqstat;
! uint32_t register_syscfg;
! uint32_t register_reti;
! uint32_t register_retx;
! uint32_t register_retn;
! uint32_t register_rete;
!
! uint32_t register_pc;
!
! /*
! Pseudo Registers
! BFIN_PC_REGNUM,
! BFIN_CC_REGNUM,
! BFIN_EXTRA1, Address of .text section.
! BFIN_EXTRA2, Address of .data section.
! BFIN_EXTRA3, Address of .bss section.
!
! BFIN_FDPIC_EXEC_REGNUM,
! BFIN_FDPIC_INTERP_REGNUM,
!
! MMRs
! BFIN_IPEND_REGNUM,
!
! LAST ENTRY SHOULD NOT BE CHANGED.
! BFIN_NUM_REGS The number of all registers.
! */
} Context_Control;
#define _CPU_Context_Get_SP( _context ) \
--- 466,494 ----
* that must be saved during a voluntary context switch from one thread
* to another.
*/
+
+ /* make sure this stays in sync with the assembly function
+ __CPU_Context_switch in cpu_asm.S */
typedef struct {
uint32_t register_r4;
uint32_t register_r5;
uint32_t register_r6;
uint32_t register_r7;
!
uint32_t register_p3;
uint32_t register_p4;
uint32_t register_p5;
uint32_t register_fp;
uint32_t register_sp;
uint32_t register_l0;
uint32_t register_l1;
uint32_t register_l2;
uint32_t register_l3;
uint32_t register_rets;
!
! uint32_t imask;
} Context_Control;
#define _CPU_Context_Get_SP( _context ) \
*************** SCORE_EXTERN void *_CPU_In
*** 624,630 ****
*
* XXX document implementation including references if appropriate
*/
! SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();
/*
* Nothing prevents the porter from declaring more CPU specific variables.
--- 577,583 ----
*
* XXX document implementation including references if appropriate
*/
! /* SCORE_EXTERN void (*_CPU_Thread_dispatch_pointer)();*/
/*
* Nothing prevents the porter from declaring more CPU specific variables.
*************** SCORE_EXTERN void (*_CPU_Threa
*** 790,798 ****
*/
#define _CPU_ISR_Disable( _level ) \
{ \
! asm volatile ("cli %0 \n" \
! : "=d" (_level) ); \
! \
}
--- 743,749 ----
*/
#define _CPU_ISR_Disable( _level ) \
{ \
! asm volatile ("cli %0 \n" : "=d" (_level) ); \
}
*************** SCORE_EXTERN void (*_CPU_Threa
*** 808,817 ****
*
* XXX document implementation including references if appropriate
*/
! #define _CPU_ISR_Enable( _level ) \
! { \
! asm volatile ("STI %0 \n" \
! : : "d" (_level) ); \
}
/**
--- 759,766 ----
*
* XXX document implementation including references if appropriate
*/
! #define _CPU_ISR_Enable( _level ) { \
! __asm__ __volatile__ ("sti %0 \n" : : "d" (_level) ); \
}
/**
*************** SCORE_EXTERN void (*_CPU_Threa
*** 827,839 ****
*
* XXX document implementation including references if appropriate
*/
! #define _CPU_ISR_Flash( _level )
! /* { \
! asm volatile ("cli %0;\n" \
! "ssync; \n" \
! "sti %1; \n" \
! : "=r" (_level) : "0"(_level) ); \
! }*/
/**
* @ingroup CPUInterrupt
--- 776,785 ----
*
* XXX document implementation including references if appropriate
*/
! #define _CPU_ISR_Flash( _level ) { \
! __asm__ __volatile__ ("sti %0; ssync; sti %1" \
! : : "d"(0xffff), "d"(_level)); \
! }
/**
* @ingroup CPUInterrupt
*************** SCORE_EXTERN void (*_CPU_Threa
*** 854,862 ****
*/
#define _CPU_ISR_Set_level( _new_level ) \
{ \
! if ( _new_level ) asm volatile ( "cli R0;" : : : "R0" ); \
! else asm volatile ( "R0.l = 0xFFFF;\n"\
! "sti R0;" : : : "R0" ); \
}
--- 800,806 ----
*/
#define _CPU_ISR_Set_level( _new_level ) \
{ \
! __asm__ __volatile__ ( "sti %0" : : "d"(_new_level ? 0 : 0xffff) ); \
}
*************** void _CPU_Context_Initialize(
*** 1001,1008 ****
asm volatile ( "cli R1; \
R1 = %0; \
_halt: \
jump _halt;"\
! : "=r" (_error) ); \
}
/* end of Fatal Error manager macros */
--- 945,953 ----
asm volatile ( "cli R1; \
R1 = %0; \
_halt: \
+ idle; \
jump _halt;"\
! : : "r" (_error) ); \
}
/* end of Fatal Error manager macros */
Index: cpukit/score/cpu/bfin/rtems/score/cpu_asm.h
===================================================================
RCS file: /usr1/CVS/rtems/cpukit/score/cpu/bfin/rtems/score/cpu_asm.h,v
retrieving revision 1.2
diff -c -3 -p -r1.2 cpu_asm.h
*** cpukit/score/cpu/bfin/rtems/score/cpu_asm.h 17 Dec 2007 16:12:37 -0000 1.2
--- cpukit/score/cpu/bfin/rtems/score/cpu_asm.h 14 Aug 2008 05:28:59 -0000
***************
*** 20,88 ****
#ifndef _RTEMS_SCORE_CPU_ASM_H
#define _RTEMS_SCORE_CPU_ASM_H
- /* offsets for the registers in the thread context */
- #define R0_OFFSET 0
- #define R1_OFFSET 4
- #define R2_OFFSET 8
- #define R3_OFFSET 12
- #define R4_OFFSET 16
- #define R5_OFFSET 20
- #define R6_OFFSET 24
- #define R7_OFFSET 28
- #define P0_OFFSET 32
- #define P1_OFFSET 36
- #define P2_OFFSET 40
- #define P3_OFFSET 44
- #define P4_OFFSET 48
- #define P5_OFFSET 52
- #define FP_OFFSET 56
- #define SP_OFFSET 60
-
- #define I0_OFFSET 64
- #define I1_OFFSET 68
- #define I2_OFFSET 72
- #define I3_OFFSET 76
-
- #define M0_OFFSET 80
- #define M1_OFFSET 84
- #define M2_OFFSET 88
- #define M3_OFFSET 92
-
- #define B0_OFFSET 96
- #define B1_OFFSET 100
- #define B2_OFFSET 104
- #define B3_OFFSET 108
-
- #define L0_OFFSET 112
- #define L1_OFFSET 116
- #define L2_OFFSET 120
- #define L3_OFFSET 124
-
- #define A0X_OFFSET 128
- #define A0W_OFFSET 132
- #define A1X_OFFSET 136
- #define A1W_OFFSET 140
-
- #define ASTAT_OFFSET 144
- #define RETS_OFFSET 148
- #define LC0_OFFSET 152
- #define LT0_OFFSET 156
-
- #define LB0_OFFSET 160
- #define LC1_OFFSET 164
- #define LT1_OFFSET 168
- #define LB1_OFFSET 172
-
- #define USP_OFFSET 174
- #define SEQSTAT_OFFSET 178
- #define SYSCFG_OFFSET 182
- #define RETI_OFFSET 184
-
- #define RETX_OFFSET 188
- #define RETN_OFFSET 192
- #define RETE_OFFSET 296
-
- #define PC_OFFSET 200
#endif
--- 20,25 ----
More information about the users
mailing list