[PATCH 13/30] score: Protect thread CPU by thread scheduler lock

Sebastian Huber sebastian.huber at embedded-brains.de
Mon Oct 31 08:51:44 UTC 2016


Update #2556.
---
 cpukit/score/include/rtems/score/schedulerimpl.h   | 13 +++++---
 .../score/include/rtems/score/schedulersmpimpl.h   | 36 ++++++++++++++--------
 2 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 7c74765..6d2f5a1 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -1102,10 +1102,11 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
  * @param[in] is_scheduled This node is scheduled.
  * @param[in] get_idle_thread Function to get an idle thread.
  *
- * @retval true Continue with the blocking operation.
- * @retval false Otherwise.
+ * @retval thread_cpu The processor of the thread.  Indicates to continue with
+ *   the blocking operation.
+ * @retval NULL Otherwise.
  */
-RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
+RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
   Scheduler_Context         *context,
   Thread_Control            *thread,
   Scheduler_Node            *node,
@@ -1116,15 +1117,17 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
   ISR_lock_Context  lock_context;
   Thread_Control   *old_user;
   Thread_Control   *new_user;
+  Per_CPU_Control  *thread_cpu;
 
   _Thread_Scheduler_acquire_critical( thread, &lock_context );
+  thread_cpu = _Thread_Get_CPU( thread );
   _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
   _Thread_Scheduler_release_critical( thread, &lock_context );
 
   if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
     _Assert( thread == _Scheduler_Node_get_user( node ) );
 
-    return true;
+    return thread_cpu;
   }
 
   new_user = NULL;
@@ -1166,7 +1169,7 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
     _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
   }
 
-  return false;
+  return NULL;
 }
 
 /**
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index e5423ff..6b1ccc3 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -319,8 +319,9 @@ typedef Thread_Control *( *Scheduler_SMP_Enqueue_scheduled )(
 
 typedef void ( *Scheduler_SMP_Allocate_processor )(
   Scheduler_Context *context,
-  Thread_Control    *scheduled,
-  Thread_Control    *victim
+  Thread_Control    *scheduled_thread,
+  Thread_Control    *victim_thread,
+  Per_CPU_Control   *victim_cpu
 );
 
 static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
@@ -470,11 +471,11 @@ static inline void _Scheduler_SMP_Release_idle_thread(
 static inline void _Scheduler_SMP_Allocate_processor_lazy(
   Scheduler_Context *context,
   Thread_Control    *scheduled_thread,
-  Thread_Control    *victim_thread
+  Thread_Control    *victim_thread,
+  Per_CPU_Control   *victim_cpu
 )
 {
   Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
-  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
   Per_CPU_Control *cpu_self = _Per_CPU_Get();
   Thread_Control *heir;
 
@@ -511,10 +512,10 @@ static inline void _Scheduler_SMP_Allocate_processor_lazy(
 static inline void _Scheduler_SMP_Allocate_processor_exact(
   Scheduler_Context *context,
   Thread_Control    *scheduled_thread,
-  Thread_Control    *victim_thread
+  Thread_Control    *victim_thread,
+  Per_CPU_Control   *victim_cpu
 )
 {
-  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
   Per_CPU_Control *cpu_self = _Per_CPU_Get();
 
   (void) context;
@@ -527,6 +528,7 @@ static inline void _Scheduler_SMP_Allocate_processor(
   Scheduler_Context                *context,
   Scheduler_Node                   *scheduled,
   Thread_Control                   *victim_thread,
+  Per_CPU_Control                  *victim_cpu,
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
@@ -534,7 +536,12 @@ static inline void _Scheduler_SMP_Allocate_processor(
 
   _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
 
-  ( *allocate_processor )( context, scheduled_thread, victim_thread );
+  ( *allocate_processor )(
+    context,
+    scheduled_thread,
+    victim_thread,
+    victim_cpu
+  );
 }
 
 static inline Thread_Control *_Scheduler_SMP_Preempt(
@@ -546,11 +553,13 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
 {
   Thread_Control   *victim_thread;
   ISR_lock_Context  lock_context;
+  Per_CPU_Control  *victim_cpu;
 
   victim_thread = _Scheduler_Node_get_user( victim );
   _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
 
   _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
+  victim_cpu = _Thread_Get_CPU( victim_thread );
   _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
   _Thread_Scheduler_release_critical( victim_thread, &lock_context );
 
@@ -558,6 +567,7 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
     context,
     scheduled,
     victim_thread,
+    victim_cpu,
     allocate_processor
   );
 
@@ -823,6 +833,7 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
 static inline void _Scheduler_SMP_Schedule_highest_ready(
   Scheduler_Context                *context,
   Scheduler_Node                   *victim,
+  Per_CPU_Control                  *victim_cpu,
   Scheduler_SMP_Extract             extract_from_ready,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Move                move_from_ready_to_scheduled,
@@ -846,6 +857,7 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
         context,
         highest_ready,
         _Scheduler_Node_get_user( victim ),
+        victim_cpu,
         allocate_processor
       );
 
@@ -885,28 +897,28 @@ static inline void _Scheduler_SMP_Block(
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
-  Scheduler_SMP_Node_state node_state;
-  bool                     block;
+  Scheduler_SMP_Node_state  node_state;
+  Per_CPU_Control          *thread_cpu;
 
   node_state = _Scheduler_SMP_Node_state( node );
   _Assert( node_state != SCHEDULER_SMP_NODE_BLOCKED );
 
-  block = _Scheduler_Block_node(
+  thread_cpu = _Scheduler_Block_node(
     context,
     thread,
     node,
     node_state == SCHEDULER_SMP_NODE_SCHEDULED,
     _Scheduler_SMP_Get_idle_thread
   );
-  if ( block ) {
+  if ( thread_cpu != NULL ) {
     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
 
     if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
       _Scheduler_SMP_Extract_from_scheduled( node );
-
       _Scheduler_SMP_Schedule_highest_ready(
         context,
         node,
+        thread_cpu,
         extract_from_ready,
         get_highest_ready,
         move_from_ready_to_scheduled,
-- 
1.8.4.5




More information about the devel mailing list