[rtems commit] score: Fix context switch extensions (SMP)
Sebastian Huber
sebh at rtems.org
Mon Mar 2 06:52:03 UTC 2020
Module: rtems
Branch: master
Commit: fcb11510c6b38a7a1f4da7205acc5461a4e18214
Changeset: http://git.rtems.org/rtems/commit/?id=fcb11510c6b38a7a1f4da7205acc5461a4e18214
Author: Sebastian Huber <sebastian.huber at embedded-brains.de>
Date: Wed Feb 26 10:02:37 2020 +0100
score: Fix context switch extensions (SMP)
In uniprocessor and SMP configurations, the context switch extensions
were called during _Thread_Do_dispatch():
void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
{
Thread_Control *executing;
executing = cpu_self->executing;
...
do {
Thread_Control *heir;
heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
...
_User_extensions_Thread_switch( executing, heir );
...
_Context_Switch( &executing->Registers, &heir->Registers );
...
} while ( cpu_self->dispatch_necessary );
...
}
In uniprocessor configurations, this is fine and the context switch
extensions are called for all thread switches except the very first
thread switch to the initialization thread. However, in SMP
configurations, the context switch may be invalidated and updated in the
low-level _Context_Switch() routine. See:
https://docs.rtems.org/branches/master/c-user/symmetric_multiprocessing_services.html#thread-dispatch-details
In case such an update happens, a thread will execute on the processor
which was not seen in the previous call of the context switch
extensions. This can confuse for example event record consumers which
use events generated by a context switch extension.
Fixing this is not straight forward. The context switch extensions call
must move after the low-level context switch. The problem here is that
we may end up in _Thread_Handler(). Adding the context switch
extensions call to _Thread_Handler() covers now also the thread switch
to the initialization thread. We also have to save the last executing
thread (ancestor) of the processor. Registers or the stack cannot be
used for this purpose. We have to add it to the per-processor
information. Existing extensions may be affected, since now context
switch extensions use the stack of the heir thread. The stack checker
is affected by this.
Calling the thread switch extensions in the low-level context switch is
difficult since at this point an intermediate stack is used which is
only large enough to enable servicing of interrupts.
Update #3885.
---
cpukit/include/rtems/score/percpu.h | 7 +++++++
cpukit/include/rtems/score/userextimpl.h | 11 +++++++++++
cpukit/libmisc/stackchk/check.c | 26 +++++++++++++++++++++++---
cpukit/score/src/threadcreateidle.c | 3 +++
cpukit/score/src/threaddispatch.c | 5 +++++
cpukit/score/src/threadhandler.c | 4 ++++
cpukit/score/src/userextaddset.c | 21 +++++++++++++++++++++
testsuites/sptests/spextensions01/init.c | 4 ++++
8 files changed, 78 insertions(+), 3 deletions(-)
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 7c95a96..31bc2b0 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -529,6 +529,13 @@ typedef struct Per_CPU_Control {
} Scheduler;
/**
+ * @brief The ancestor of the executing thread.
+ *
+ * This member is used by _User_extensions_Thread_switch().
+ */
+ struct _Thread_Control *ancestor;
+
+ /**
* @brief Begin of the per-CPU data area.
*
* Contains items defined via PER_CPU_DATA_ITEM().
diff --git a/cpukit/include/rtems/score/userextimpl.h b/cpukit/include/rtems/score/userextimpl.h
index 8b456c0..23ee957 100644
--- a/cpukit/include/rtems/score/userextimpl.h
+++ b/cpukit/include/rtems/score/userextimpl.h
@@ -386,7 +386,16 @@ static inline void _User_extensions_Thread_switch(
_ISR_lock_ISR_disable( &lock_context );
_Per_CPU_Acquire( cpu_self, &lock_context );
+ executing = cpu_self->ancestor;
+ cpu_self->ancestor = heir;
node = _Chain_Immutable_first( chain );
+
+ /*
+ * An executing thread equal to the heir thread may happen in two
+ * situations. Firstly, in case context switch extensions are created after
+ * system initialization. Secondly, during a thread self restart.
+ */
+ if ( executing != heir ) {
#endif
while ( node != tail ) {
@@ -398,6 +407,8 @@ static inline void _User_extensions_Thread_switch(
}
#if defined(RTEMS_SMP)
+ }
+
_Per_CPU_Release( cpu_self, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
#endif
diff --git a/cpukit/libmisc/stackchk/check.c b/cpukit/libmisc/stackchk/check.c
index eec3a91..0c859f6 100644
--- a/cpukit/libmisc/stackchk/check.c
+++ b/cpukit/libmisc/stackchk/check.c
@@ -295,8 +295,8 @@ static void Stack_check_report_blown_task(
* rtems_stack_checker_switch_extension
*/
void rtems_stack_checker_switch_extension(
- Thread_Control *running RTEMS_UNUSED,
- Thread_Control *heir RTEMS_UNUSED
+ Thread_Control *running,
+ Thread_Control *heir
)
{
bool sp_ok;
@@ -306,6 +306,22 @@ void rtems_stack_checker_switch_extension(
/*
* Check for an out of bounds stack pointer or an overwrite
*/
+#if defined(RTEMS_SMP)
+ sp_ok = Stack_check_Frame_pointer_in_range( heir );
+
+ if ( !sp_ok ) {
+ pattern_ok = Stack_check_Is_sanity_pattern_valid(
+ &heir->Start.Initial_stack
+ );
+ Stack_check_report_blown_task( heir, pattern_ok );
+ }
+
+ pattern_ok = Stack_check_Is_sanity_pattern_valid( &running->Start.Initial_stack );
+
+ if ( !pattern_ok ) {
+ Stack_check_report_blown_task( running, pattern_ok );
+ }
+#else
sp_ok = Stack_check_Frame_pointer_in_range( running );
pattern_ok = Stack_check_Is_sanity_pattern_valid( &running->Start.Initial_stack );
@@ -313,6 +329,7 @@ void rtems_stack_checker_switch_extension(
if ( !sp_ok || !pattern_ok ) {
Stack_check_report_blown_task( running, pattern_ok );
}
+#endif
stack = &Stack_check_Interrupt_stack[ _SMP_Get_current_processor() ];
@@ -329,7 +346,10 @@ void rtems_stack_checker_switch_extension(
*/
bool rtems_stack_checker_is_blown( void )
{
- rtems_stack_checker_switch_extension( _Thread_Get_executing(), NULL );
+ Thread_Control *executing;
+
+ executing = _Thread_Get_executing();
+ rtems_stack_checker_switch_extension( executing, executing );
/*
* The Stack Pointer and the Pattern Area are OK so return false.
diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c
index 52c3ad4..d713150 100644
--- a/cpukit/score/src/threadcreateidle.c
+++ b/cpukit/score/src/threadcreateidle.c
@@ -75,6 +75,9 @@ static void _Thread_Create_idle_for_CPU( Per_CPU_Control *cpu )
*/
cpu->heir =
cpu->executing = idle;
+#if defined(RTEMS_SMP)
+ cpu->ancestor = idle;
+#endif
idle->is_idle = true;
idle->Start.Entry.adaptor = _Thread_Entry_adaptor_idle;
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index cd32f5a..3b611f7 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -297,10 +297,15 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
_ISR_Local_enable( level );
+#if !defined(RTEMS_SMP)
_User_extensions_Thread_switch( executing, heir );
+#endif
_Thread_Save_fp( executing );
_Context_Switch( &executing->Registers, &heir->Registers );
_Thread_Restore_fp( executing );
+#if defined(RTEMS_SMP)
+ _User_extensions_Thread_switch( NULL, executing );
+#endif
/*
* We have to obtain this value again after the context switch since the
diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c
index 6ddb303..4b8e386 100644
--- a/cpukit/score/src/threadhandler.c
+++ b/cpukit/score/src/threadhandler.c
@@ -97,6 +97,10 @@ void _Thread_Handler( void )
*/
_Thread_Restore_fp( executing );
+#if defined(RTEMS_SMP)
+ _User_extensions_Thread_switch( NULL, executing );
+#endif
+
/*
* Do not use the level of the thread control block, since it has a
* different format.
diff --git a/cpukit/score/src/userextaddset.c b/cpukit/score/src/userextaddset.c
index 2b13dfa..70e9fb0 100644
--- a/cpukit/score/src/userextaddset.c
+++ b/cpukit/score/src/userextaddset.c
@@ -20,8 +20,28 @@
#endif
#include <rtems/score/userextimpl.h>
+#include <rtems/score/smp.h>
#include <rtems/score/percpu.h>
+static void _User_extensions_Set_ancestors( void )
+{
+#if defined(RTEMS_SMP)
+ if ( _Chain_Is_empty( &_User_extensions_Switches_list ) ) {
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+
+ cpu_max = _SMP_Get_processor_maximum();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ cpu->ancestor = cpu->executing;
+ }
+ }
+#endif
+}
+
void _User_extensions_Add_set(
User_extensions_Control *the_extension
)
@@ -45,6 +65,7 @@ void _User_extensions_Add_set(
the_extension->Callouts.thread_switch;
_Per_CPU_Acquire_all( &lock_context );
+ _User_extensions_Set_ancestors();
_Chain_Initialize_node( &the_extension->Switch.Node );
_Chain_Append_unprotected(
&_User_extensions_Switches_list,
diff --git a/testsuites/sptests/spextensions01/init.c b/testsuites/sptests/spextensions01/init.c
index f9e3c11..7ce1da5 100644
--- a/testsuites/sptests/spextensions01/init.c
+++ b/testsuites/sptests/spextensions01/init.c
@@ -453,8 +453,12 @@ static void test(void)
#endif
active_extensions = 4;
+#ifdef RTEMS_SMP
+ assert(counter == 12);
+#else
assert(counter == 10);
counter = 12;
+#endif
sc = rtems_task_create(
rtems_build_name('W', 'O', 'R', 'K'),
More information about the vc
mailing list