[PATCH 18/26] score: Allow interrupts during thread dispatch

Sebastian Huber sebastian.huber at embedded-brains.de
Tue Nov 15 13:51:50 UTC 2016


Use a processor-specific interrupt frame during context switches in case
the executing thread is longer executes on the processor and the heir
thread is about to start execution.  During this period we must not use
a thread stack for interrupt processing.

Update #2809.
---
 c/src/lib/libbsp/sparc/shared/irq_asm.S           | 19 +++++++++------
 c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S | 29 +++++++++++++----------
 cpukit/score/cpu/arm/cpu_asm.S                    | 10 +++++---
 cpukit/score/include/rtems/score/userextimpl.h    | 13 +++++++++-
 cpukit/score/src/threaddispatch.c                 | 17 +------------
 cpukit/score/src/threadhandler.c                  |  3 ---
 cpukit/score/src/threadloadenv.c                  | 16 +------------
 7 files changed, 50 insertions(+), 57 deletions(-)

diff --git a/c/src/lib/libbsp/sparc/shared/irq_asm.S b/c/src/lib/libbsp/sparc/shared/irq_asm.S
index 7a595d1..a842a62 100644
--- a/c/src/lib/libbsp/sparc/shared/irq_asm.S
+++ b/c/src/lib/libbsp/sparc/shared/irq_asm.S
@@ -178,17 +178,22 @@ done_flushing:
         mov     %g4, %wim
 
 #if defined(RTEMS_SMP)
-        ! The executing context no longer executes on this processor
+        /*
+         * The executing thread no longer executes on this processor.  Switch
+         * the stack to the temporary interrupt stack of this processor.  Mark
+         * the context of the executing thread as not executing.
+         */
+        add     %g6, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE, %sp
         st      %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
 
         ! Try to update the is executing indicator of the heir context
         mov     1, %g1
 
-try_update_is_executing:
+.Ltry_update_is_executing:
 
         swap    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
         cmp     %g1, 0
-        bne     check_is_executing
+        bne     .Lcheck_is_executing
 
         ! The next load is in a delay slot, which is all right
 #endif
@@ -225,12 +230,12 @@ try_update_is_executing:
         nop                                   ! delay slot
 
 #if defined(RTEMS_SMP)
-check_is_executing:
+.Lcheck_is_executing:
 
         ! Check the is executing indicator of the heir context
         ld      [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
         cmp     %g1, 0
-        beq     try_update_is_executing
+        beq     .Ltry_update_is_executing
          mov    1, %g1
 
         ! We may have a new heir
@@ -242,7 +247,7 @@ check_is_executing:
         ! Update the executing only if necessary to avoid cache line
         ! monopolization.
         cmp     %g2, %g4
-        beq     try_update_is_executing
+        beq     .Ltry_update_is_executing
          mov    1, %g1
 
         ! Calculate the heir context pointer
@@ -252,7 +257,7 @@ check_is_executing:
         ! Update the executing
         st      %g4, [%g6 + PER_CPU_OFFSET_EXECUTING]
 
-        ba      try_update_is_executing
+        ba      .Ltry_update_is_executing
          mov    1, %g1
 #endif
 
diff --git a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
index 7f3c036..e5b4fcd 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
@@ -402,23 +402,30 @@ PROC (_CPU_Context_switch):
 #endif
 
 #ifdef RTEMS_SMP
-	/* The executing context no longer executes on this processor */
+	/*
+	 * The executing thread no longer executes on this processor.  Switch
+	 * the stack to the temporary interrupt stack of this processor.  Mark
+	 * the context of the executing thread as not executing.
+	 */
 	msync
+
+	GET_SELF_CPU_CONTROL	r12
+	addi	r1, r12, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE
 	li	r6, 0
 	stw	r6, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
 
-check_is_executing:
+.Lcheck_is_executing:
 
 	/* Check the is executing indicator of the heir context */
 	addi	r6, r5, PPC_CONTEXT_OFFSET_IS_EXECUTING
 	lwarx	r7, r0, r6
 	cmpwi	r7, 0
-	bne	get_potential_new_heir
+	bne	.Lget_potential_new_heir
 
 	/* Try to update the is executing indicator of the heir context */
 	li	r7, 1
 	stwcx.	r7, r0, r6
-	bne	get_potential_new_heir
+	bne	.Lget_potential_new_heir
 	isync
 #endif
 
@@ -537,22 +544,20 @@ PROC (_CPU_Context_restore):
 	b	restore_context
 
 #ifdef RTEMS_SMP
-get_potential_new_heir:
-
-	GET_SELF_CPU_CONTROL	r6
+.Lget_potential_new_heir:
 
 	/* We may have a new heir */
 
 	/* Read the executing and heir */
-	lwz	r7, PER_CPU_OFFSET_EXECUTING(r6)
-	lwz	r8, PER_CPU_OFFSET_HEIR(r6)
+	lwz	r7, PER_CPU_OFFSET_EXECUTING(r12)
+	lwz	r8, PER_CPU_OFFSET_HEIR(r12)
 
 	/*
 	 * Update the executing only if necessary to avoid cache line
 	 * monopolization.
 	 */
 	cmpw	r7, r8
-	beq	check_is_executing
+	beq	.Lcheck_is_executing
 
 	/* Calculate the heir context pointer */
 	sub	r7, r4, r7
@@ -560,7 +565,7 @@ get_potential_new_heir:
 	clrrwi	r5, r4, PPC_DEFAULT_CACHE_LINE_POWER
 
 	/* Update the executing */
-	stw	r8, PER_CPU_OFFSET_EXECUTING(r6)
+	stw	r8, PER_CPU_OFFSET_EXECUTING(r12)
 
-	b	check_is_executing
+	b	.Lcheck_is_executing
 #endif
diff --git a/cpukit/score/cpu/arm/cpu_asm.S b/cpukit/score/cpu/arm/cpu_asm.S
index cf94822..1ad3a51 100644
--- a/cpukit/score/cpu/arm/cpu_asm.S
+++ b/cpukit/score/cpu/arm/cpu_asm.S
@@ -69,8 +69,14 @@ DEFINE_FUNCTION_ARM(_CPU_Context_switch)
 #endif
 
 #ifdef RTEMS_SMP
-	/* The executing context no longer executes on this processor */
+	/*
+	 * The executing thread no longer executes on this processor.  Switch
+	 * the stack to the temporary interrupt stack of this processor.  Mark
+	 * the context of the executing thread as not executing.
+	 */
 	dmb
+	GET_SELF_CPU_CONTROL	r2
+	add	sp, r2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
 	mov	r3, #0
 	strb	r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
 
@@ -128,8 +134,6 @@ DEFINE_FUNCTION_ARM(_CPU_Context_restore)
 #ifdef RTEMS_SMP
 .L_get_potential_new_heir:
 
-	GET_SELF_CPU_CONTROL	r2
-
 	/* We may have a new heir */
 
 	/* Read the executing and heir */
diff --git a/cpukit/score/include/rtems/score/userextimpl.h b/cpukit/score/include/rtems/score/userextimpl.h
index e175c9f..e6692c8 100644
--- a/cpukit/score/include/rtems/score/userextimpl.h
+++ b/cpukit/score/include/rtems/score/userextimpl.h
@@ -261,8 +261,16 @@ static inline void _User_extensions_Thread_switch(
   const Chain_Node    *node = _Chain_Immutable_first( chain );
 
   if ( node != tail ) {
-    Per_CPU_Control *cpu_self = _Per_CPU_Get();
+    Per_CPU_Control *cpu_self;
+#if defined(RTEMS_SMP)
+    ISR_Level        level;
+#endif
+
+    cpu_self = _Per_CPU_Get();
 
+#if defined(RTEMS_SMP)
+    _ISR_Local_disable( level );
+#endif
     _Per_CPU_Acquire( cpu_self );
 
     while ( node != tail ) {
@@ -275,6 +283,9 @@ static inline void _User_extensions_Thread_switch(
     }
 
     _Per_CPU_Release( cpu_self );
+#if defined(RTEMS_SMP)
+    _ISR_Local_enable( level );
+#endif
   }
 }
 
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index c96299c..f2c2490 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -174,14 +174,7 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
     if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
       heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();
 
-    /*
-     * On SMP the complete context switch must be atomic with respect to one
-     * processor.  See also _Thread_Handler() since _Context_switch() may branch
-     * to this function.
-     */
-#if !defined( RTEMS_SMP )
     _ISR_Local_enable( level );
-#endif
 
     _User_extensions_Thread_switch( executing, heir );
     _Thread_Save_fp( executing );
@@ -195,16 +188,8 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
      */
     cpu_self = _Per_CPU_Get();
 
-#if !defined( RTEMS_SMP )
     _ISR_Local_disable( level );
-#endif
-  } while (
-#if defined( RTEMS_SMP )
-    false
-#else
-    cpu_self->dispatch_necessary
-#endif
-  );
+  } while ( cpu_self->dispatch_necessary );
 
 post_switch:
   _Assert( cpu_self->thread_dispatch_disable_level == 1 );
diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c
index 397e0cf..a8d6580 100644
--- a/cpukit/score/src/threadhandler.c
+++ b/cpukit/score/src/threadhandler.c
@@ -38,9 +38,6 @@ void _Thread_Handler( void )
   _Context_Initialization_at_thread_begin();
   executing = _Thread_Executing;
 
-  /* On SMP we enter _Thread_Handler() with interrupts disabled */
-  _SMP_Assert( _ISR_Get_level() != 0 );
-
   /*
    * have to put level into a register for those cpu's that use
    * inline asm here
diff --git a/cpukit/score/src/threadloadenv.c b/cpukit/score/src/threadloadenv.c
index 43564af..22606dd 100644
--- a/cpukit/score/src/threadloadenv.c
+++ b/cpukit/score/src/threadloadenv.c
@@ -25,8 +25,6 @@ void _Thread_Load_environment(
   Thread_Control *the_thread
 )
 {
-  uint32_t isr_level;
-
 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
   if ( the_thread->Start.fp_context ) {
     the_thread->fp_context = the_thread->Start.fp_context;
@@ -38,25 +36,13 @@ void _Thread_Load_environment(
   the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
   the_thread->budget_callout   = the_thread->Start.budget_callout;
 
-#if defined( RTEMS_SMP )
-  /*
-   * On SMP we have to start the threads with interrupts disabled, see also
-   * _Thread_Handler() and _Thread_Dispatch().  In _Thread_Handler() the
-   * _ISR_Set_level() is used to set the desired interrupt state of the thread.
-   */
-  isr_level = CPU_MODES_INTERRUPT_MASK;
-#else
-  isr_level = the_thread->Start.isr_level;
-#endif
-
   _Context_Initialize(
     &the_thread->Registers,
     the_thread->Start.Initial_stack.area,
     the_thread->Start.Initial_stack.size,
-    isr_level,
+    the_thread->Start.isr_level,
     _Thread_Handler,
     the_thread->is_fp,
     the_thread->Start.tls_area
   );
-
 }
-- 
1.8.4.5




More information about the devel mailing list