[rtems commit] score: Use thread state lock for current state

Sebastian Huber sebh at rtems.org
Thu May 12 11:34:39 UTC 2016


Module:    rtems
Branch:    master
Commit:    bd12dda405e1bab16c522f7ef0dd2b455230d269
Changeset: http://git.rtems.org/rtems/commit/?id=bd12dda405e1bab16c522f7ef0dd2b455230d269

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed May 11 11:54:49 2016 +0200

score: Use thread state lock for current state

In addition protect scheduler of thread by thread state lock.  Enables
use of scheduler per-instance locks.

Update #2555.

---

 cpukit/libmisc/cpuuse/cpuusagereset.c            |  13 ++-
 cpukit/rtems/src/taskmode.c                      |   4 +-
 cpukit/score/include/rtems/score/schedulerimpl.h | 134 +++++++++++++++--------
 cpukit/score/include/rtems/score/thread.h        |   7 +-
 cpukit/score/src/schedulercbsunblock.c           |   7 +-
 cpukit/score/src/threadchangepriority.c          |   4 +-
 cpukit/score/src/threadclearstate.c              |   4 +-
 cpukit/score/src/threadgetcputimeused.c          |  11 +-
 cpukit/score/src/threadsetstate.c                |   4 +-
 cpukit/score/src/threadyield.c                   |   4 +-
 testsuites/smptests/smpscheduler03/init.c        |  56 +++++++---
 testsuites/tmtests/tm27/task1.c                  |  18 ++-
 12 files changed, 182 insertions(+), 84 deletions(-)

diff --git a/cpukit/libmisc/cpuuse/cpuusagereset.c b/cpukit/libmisc/cpuuse/cpuusagereset.c
index 6ef50f9..74f273a 100644
--- a/cpukit/libmisc/cpuuse/cpuusagereset.c
+++ b/cpukit/libmisc/cpuuse/cpuusagereset.c
@@ -28,11 +28,18 @@ static void CPU_usage_Per_thread_handler(
   Thread_Control *the_thread
 )
 {
-  ISR_lock_Context lock_context;
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         state_lock_context;
+  ISR_lock_Context         scheduler_lock_context;
+
+  _Thread_State_acquire( the_thread, &state_lock_context );
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
 
-  _Scheduler_Acquire( the_thread, &lock_context );
   _Timestamp_Set_to_zero( &the_thread->cpu_time_used );
-  _Scheduler_Release( the_thread, &lock_context );
+
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( the_thread, &state_lock_context );
 }
 
 /*
diff --git a/cpukit/rtems/src/taskmode.c b/cpukit/rtems/src/taskmode.c
index 131632d..a345409 100644
--- a/cpukit/rtems/src/taskmode.c
+++ b/cpukit/rtems/src/taskmode.c
@@ -120,9 +120,9 @@ rtems_status_code rtems_task_mode(
     Per_CPU_Control  *cpu_self;
 
     cpu_self = _Thread_Dispatch_disable();
-    _Scheduler_Acquire( executing, &lock_context );
+    _Thread_State_acquire( executing, &lock_context );
     _Scheduler_Schedule( executing );
-    _Scheduler_Release( executing, &lock_context );
+    _Thread_State_release( executing, &lock_context );
     _Thread_Dispatch_enable( cpu_self );
   }
 
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 5cf3503..c888237 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -10,7 +10,7 @@
 /*
  *  Copyright (C) 2010 Gedare Bloom.
  *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
- *  Copyright (c) 2014-2015 embedded brains GmbH
+ *  Copyright (c) 2014, 2016 embedded brains GmbH
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -118,6 +118,42 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
 }
 #endif
 
+ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
+
+/**
+ * @brief Acquires the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context to use for
+ *   _Scheduler_Release_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
+  const Scheduler_Control *scheduler,
+  ISR_lock_Context        *lock_context
+)
+{
+  (void) scheduler;
+  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
+}
+
+/**
+ * @brief Releases the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context used for
+ *   _Scheduler_Acquire_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
+  const Scheduler_Control *scheduler,
+  ISR_lock_Context        *lock_context
+)
+{
+  (void) scheduler;
+  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
+}
+
 /**
  * The preferred method to add a new scheduler is to define the jump table
  * entries and add a case to the _Scheduler_Initialize routine.
@@ -143,9 +179,15 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         lock_context;
+
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &lock_context );
 
   ( *scheduler->Operations.schedule )( scheduler, the_thread );
+
+  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 #if defined(RTEMS_SMP)
@@ -252,10 +294,16 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         lock_context;
 #if defined(RTEMS_SMP)
-  Thread_Control *needs_help;
+  Thread_Control          *needs_help;
+#endif
+
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &lock_context );
 
+#if defined(RTEMS_SMP)
   needs_help =
 #endif
   ( *scheduler->Operations.yield )( scheduler, the_thread );
@@ -263,6 +311,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
 #if defined(RTEMS_SMP)
   _Scheduler_Ask_for_help_if_necessary( needs_help );
 #endif
+
+  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 /**
@@ -277,9 +327,15 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         lock_context;
+
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &lock_context );
 
   ( *scheduler->Operations.block )( scheduler, the_thread );
+
+  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 /**
@@ -294,10 +350,16 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         lock_context;
 #if defined(RTEMS_SMP)
-  Thread_Control *needs_help;
+  Thread_Control          *needs_help;
+#endif
 
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+#if defined(RTEMS_SMP)
   needs_help =
 #endif
   ( *scheduler->Operations.unblock )( scheduler, the_thread );
@@ -305,6 +367,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
 #if defined(RTEMS_SMP)
   _Scheduler_Ask_for_help_if_necessary( needs_help );
 #endif
+
+  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 /**
@@ -329,14 +393,20 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
   bool                     prepend_it
 )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
+  const Scheduler_Control *own_scheduler;
+  ISR_lock_Context         lock_context;
 #if defined(RTEMS_SMP)
-  Thread_Control *needs_help;
+  Thread_Control          *needs_help;
+#endif
 
+  own_scheduler = _Scheduler_Get_own( the_thread );
+  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
+
+#if defined(RTEMS_SMP)
   needs_help =
 #endif
-  ( *scheduler->Operations.change_priority )(
-    scheduler,
+  ( *own_scheduler->Operations.change_priority )(
+    own_scheduler,
     the_thread,
     new_priority,
     prepend_it
@@ -345,6 +415,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
 #if defined(RTEMS_SMP)
   _Scheduler_Ask_for_help_if_necessary( needs_help );
 #endif
+
+  _Scheduler_Release_critical( own_scheduler, &lock_context );
 }
 
 /**
@@ -394,13 +466,19 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(
   Priority_Control  new_priority
 )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         lock_context;
+
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &lock_context );
 
   ( *scheduler->Operations.update_priority )(
     scheduler,
     the_thread,
     new_priority
   );
+
+  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 /**
@@ -1341,8 +1419,6 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
 }
 #endif
 
-ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
-
 RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
   Thread_Control *new_heir,
   bool            force_dispatch
@@ -1367,36 +1443,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
   }
 }
 
-/**
- * @brief Acquires the scheduler instance of the thread.
- *
- * @param[in] the_thread The thread.
- * @param[in] lock_context The lock context for _Scheduler_Release().
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Acquire(
-  Thread_Control   *the_thread,
-  ISR_lock_Context *lock_context
-)
-{
-  (void) the_thread;
-  _ISR_lock_ISR_disable_and_acquire( &_Scheduler_Lock, lock_context );
-}
-
-/**
- * @brief Releases the scheduler instance of the thread.
- *
- * @param[in] the_thread The thread.
- * @param[in] lock_context The lock context used for _Scheduler_Acquire().
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Release(
-  Thread_Control   *the_thread,
-  ISR_lock_Context *lock_context
-)
-{
-  (void) the_thread;
-  _ISR_lock_Release_and_ISR_enable( &_Scheduler_Lock, lock_context );
-}
-
 /** @} */
 
 #ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index d2a6d4b..15b068d 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -718,8 +718,11 @@ struct _Thread_Control {
    * The lock of this thread queue is used for various purposes.  It protects
    * the following fields
    *
-   * - RTEMS_API_Control::Signal, and
-   * - Thread_Control::Post_switch_actions.
+   * - RTEMS_API_Control::Signal,
+   * - Thread_Control::current_state,
+   * - Thread_Control::Post_switch_actions,
+   * - Thread_Control::Scheduler::control, and
+   * - Thread_Control::Scheduler::own_control.
    *
    * @see _Thread_State_acquire().
    */
diff --git a/cpukit/score/src/schedulercbsunblock.c b/cpukit/score/src/schedulercbsunblock.c
index 9170889..7898588 100644
--- a/cpukit/score/src/schedulercbsunblock.c
+++ b/cpukit/score/src/schedulercbsunblock.c
@@ -56,7 +56,12 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
       the_thread->real_priority = new_priority;
       if ( the_thread->current_priority != new_priority ) {
         the_thread->current_priority = new_priority;
-        _Scheduler_Change_priority(the_thread, new_priority, true);
+        _Scheduler_EDF_Change_priority(
+          scheduler,
+          the_thread,
+          new_priority,
+          true
+        );
       }
     }
   }
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index 35e5e5b..152646f 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -64,7 +64,7 @@ void _Thread_Change_priority(
 
     _Thread_Lock_release( lock, &lock_context );
 
-    _Scheduler_Acquire( the_thread, &lock_context );
+    _Thread_State_acquire( the_thread, &lock_context );
 
     if ( the_thread->priority_generation == my_generation ) {
       if ( _States_Is_ready( the_thread->current_state ) ) {
@@ -78,7 +78,7 @@ void _Thread_Change_priority(
       }
     }
 
-    _Scheduler_Release( the_thread, &lock_context );
+    _Thread_State_release( the_thread, &lock_context );
   } else {
     _Thread_Lock_release( lock, &lock_context );
   }
diff --git a/cpukit/score/src/threadclearstate.c b/cpukit/score/src/threadclearstate.c
index ae54e3a..3da3538 100644
--- a/cpukit/score/src/threadclearstate.c
+++ b/cpukit/score/src/threadclearstate.c
@@ -32,7 +32,7 @@ States_Control _Thread_Clear_state(
 
   _Assert( state != 0 );
 
-  _Scheduler_Acquire( the_thread, &lock_context );
+  _Thread_State_acquire( the_thread, &lock_context );
 
   previous_state = the_thread->current_state;
 
@@ -47,7 +47,7 @@ States_Control _Thread_Clear_state(
     }
   }
 
-  _Scheduler_Release( the_thread, &lock_context );
+  _Thread_State_release( the_thread, &lock_context );
 
   return previous_state;
 }
diff --git a/cpukit/score/src/threadgetcputimeused.c b/cpukit/score/src/threadgetcputimeused.c
index 6bfe8ea..9026007 100644
--- a/cpukit/score/src/threadgetcputimeused.c
+++ b/cpukit/score/src/threadgetcputimeused.c
@@ -33,9 +33,13 @@ void _Thread_Get_CPU_time_used(
   Timestamp_Control *cpu_time_used
 )
 {
-  ISR_lock_Context lock_context;
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context         state_lock_context;
+  ISR_lock_Context         scheduler_lock_context;
 
-  _Scheduler_Acquire( the_thread, &lock_context );
+  _Thread_State_acquire( the_thread, &state_lock_context );
+  scheduler = _Scheduler_Get( the_thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
 
   if ( _Thread_Is_scheduled( the_thread ) ) {
     _Thread_Update_CPU_time_used( the_thread, _Thread_Get_CPU( the_thread ) );
@@ -43,5 +47,6 @@ void _Thread_Get_CPU_time_used(
 
   *cpu_time_used = the_thread->cpu_time_used;
 
-  _Scheduler_Release( the_thread, &lock_context );
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( the_thread, &state_lock_context );
 }
diff --git a/cpukit/score/src/threadsetstate.c b/cpukit/score/src/threadsetstate.c
index 5112827..3aaa463 100644
--- a/cpukit/score/src/threadsetstate.c
+++ b/cpukit/score/src/threadsetstate.c
@@ -36,7 +36,7 @@ States_Control _Thread_Set_state(
 
   _Assert( state != 0 );
 
-  _Scheduler_Acquire( the_thread, &lock_context );
+  _Thread_State_acquire( the_thread, &lock_context );
 
   previous_state = the_thread->current_state;
   next_state = _States_Set( state, previous_state);
@@ -46,7 +46,7 @@ States_Control _Thread_Set_state(
     _Scheduler_Block( the_thread );
   }
 
-  _Scheduler_Release( the_thread, &lock_context );
+  _Thread_State_release( the_thread, &lock_context );
 
   return previous_state;
 }
diff --git a/cpukit/score/src/threadyield.c b/cpukit/score/src/threadyield.c
index 7f1c175..cfd8118 100644
--- a/cpukit/score/src/threadyield.c
+++ b/cpukit/score/src/threadyield.c
@@ -31,11 +31,11 @@ void _Thread_Yield( Thread_Control *executing )
 {
   ISR_lock_Context lock_context;
 
-  _Scheduler_Acquire( executing, &lock_context );
+  _Thread_State_acquire( executing, &lock_context );
 
   if ( _States_Is_ready( executing->current_state ) ) {
     _Scheduler_Yield( executing );
   }
 
-  _Scheduler_Release( executing, &lock_context );
+  _Thread_State_release( executing, &lock_context );
 }
diff --git a/testsuites/smptests/smpscheduler03/init.c b/testsuites/smptests/smpscheduler03/init.c
index 4a3aa54..1888048 100644
--- a/testsuites/smptests/smpscheduler03/init.c
+++ b/testsuites/smptests/smpscheduler03/init.c
@@ -193,11 +193,15 @@ static Thread_Control *change_priority_op(
   bool prepend_it
 )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get(thread);
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context state_lock_context;
+  ISR_lock_Context scheduler_lock_context;
   Thread_Control *needs_help;
-  ISR_lock_Context lock_context;
 
-  _Scheduler_Acquire(thread, &lock_context);
+  _Thread_State_acquire( thread, &state_lock_context );
+  scheduler = _Scheduler_Get( thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
+
   thread->current_priority = new_priority;
   needs_help = (*scheduler->Operations.change_priority)(
     scheduler,
@@ -205,7 +209,9 @@ static Thread_Control *change_priority_op(
     new_priority,
     prepend_it
   );
-  _Scheduler_Release(thread, &lock_context);
+
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( thread, &state_lock_context );
 
   return needs_help;
 }
@@ -302,13 +308,19 @@ static void test_change_priority_op(void)
 
 static Thread_Control *yield_op(Thread_Control *thread)
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get(thread);
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context state_lock_context;
+  ISR_lock_Context scheduler_lock_context;
   Thread_Control *needs_help;
-  ISR_lock_Context lock_context;
 
-  _Scheduler_Acquire(thread, &lock_context);
+  _Thread_State_acquire( thread, &state_lock_context );
+  scheduler = _Scheduler_Get( thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
+
   needs_help = (*scheduler->Operations.yield)(scheduler, thread);
-  _Scheduler_Release(thread, &lock_context);
+
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( thread, &state_lock_context );
 
   return needs_help;
 }
@@ -429,23 +441,35 @@ static void test_yield_op(void)
 
 static void block_op(Thread_Control *thread)
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get(thread);
-  ISR_lock_Context lock_context;
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context state_lock_context;
+  ISR_lock_Context scheduler_lock_context;
+
+  _Thread_State_acquire( thread, &state_lock_context );
+  scheduler = _Scheduler_Get( thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
 
-  _Scheduler_Acquire(thread, &lock_context);
   (*scheduler->Operations.block)(scheduler, thread);
-  _Scheduler_Release(thread, &lock_context);
+
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( thread, &state_lock_context );
 }
 
 static Thread_Control *unblock_op(Thread_Control *thread)
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get(thread);
+  const Scheduler_Control *scheduler;
+  ISR_lock_Context state_lock_context;
+  ISR_lock_Context scheduler_lock_context;
   Thread_Control *needs_help;
-  ISR_lock_Context lock_context;
 
-  _Scheduler_Acquire(thread, &lock_context);
+  _Thread_State_acquire( thread, &state_lock_context );
+  scheduler = _Scheduler_Get( thread );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
+
   needs_help = (*scheduler->Operations.unblock)(scheduler, thread);
-  _Scheduler_Release(thread, &lock_context);
+
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( thread, &state_lock_context );
 
   return needs_help;
 }
diff --git a/testsuites/tmtests/tm27/task1.c b/testsuites/tmtests/tm27/task1.c
index d351b41..77072bb 100644
--- a/testsuites/tmtests/tm27/task1.c
+++ b/testsuites/tmtests/tm27/task1.c
@@ -225,9 +225,15 @@ rtems_task Task_2(
 )
 {
   Thread_Control *executing = _Thread_Get_executing();
-  Scheduler_priority_Context *scheduler_context =
-    _Scheduler_priority_Get_context( _Scheduler_Get( executing ) );
-  ISR_lock_Context lock_context;
+  const Scheduler_Control    *scheduler;
+  Scheduler_priority_Context *scheduler_context;
+  ISR_lock_Context state_lock_context;
+  ISR_lock_Context scheduler_lock_context;
+
+  _Thread_State_acquire( executing, &state_lock_context );
+  scheduler = _Scheduler_Get( executing );
+  scheduler_context = _Scheduler_priority_Get_context( scheduler );
+  _Thread_State_release( executing, &state_lock_context );
 
 #if (MUST_WAIT_FOR_INTERRUPT == 1)
   while ( Interrupt_occurred == 0 );
@@ -256,14 +262,16 @@ rtems_task Task_2(
    *  Switch back to the other task to exit the test.
    */
 
-  _Scheduler_Acquire( executing, &lock_context );
+  _Thread_State_acquire( executing, &state_lock_context );
+  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
 
   _Thread_Executing =
         (Thread_Control *) _Chain_First(&scheduler_context->Ready[LOW_PRIORITY]);
 
   _Thread_Dispatch_necessary = 1;
 
-  _Scheduler_Release( executing, &lock_context );
+  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+  _Thread_State_release( executing, &state_lock_context );
 
   _Thread_Dispatch();
 



More information about the vc mailing list