[rtems commit] score: Add new SMP scheduler helping protocol

Sebastian Huber sebh at rtems.org
Wed Nov 2 09:08:44 UTC 2016


Module:    rtems
Branch:    master
Commit:    351c14dfd00e1bdaced2823242532cab4bccb58c
Changeset: http://git.rtems.org/rtems/commit/?id=351c14dfd00e1bdaced2823242532cab4bccb58c

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Sep 27 11:33:36 2016 +0200

score: Add new SMP scheduler helping protocol

Update #2556.

---

 cpukit/score/include/rtems/score/percpu.h          |   8 +
 cpukit/score/include/rtems/score/scheduler.h       |  92 ++++++++++
 cpukit/score/include/rtems/score/schedulerimpl.h   | 158 +++++++++++++----
 .../rtems/score/schedulerpriorityaffinitysmp.h     |  22 +++
 .../include/rtems/score/schedulerprioritysmp.h     |  24 ++-
 .../score/include/rtems/score/schedulersimplesmp.h |  24 ++-
 .../score/include/rtems/score/schedulersmpimpl.h   | 173 +++++++++++++++++-
 .../score/include/rtems/score/schedulerstrongapa.h |  22 +++
 cpukit/score/include/rtems/score/thread.h          |  15 ++
 cpukit/score/include/rtems/score/threadimpl.h      |  24 +++
 cpukit/score/src/schedulerdefaultaskforhelp.c      |  39 ++++-
 cpukit/score/src/schedulerpriorityaffinitysmp.c    |  70 +++++++-
 cpukit/score/src/schedulerprioritysmp.c            |  70 +++++++-
 cpukit/score/src/schedulersimplesmp.c              |  84 ++++++++-
 cpukit/score/src/schedulersmpdebug.c               |   6 +-
 cpukit/score/src/schedulerstrongapa.c              |  70 +++++++-
 cpukit/score/src/smp.c                             |   1 +
 cpukit/score/src/threaddispatch.c                  |  78 ++++++++-
 cpukit/score/src/threadscheduler.c                 |  49 ++++++
 testsuites/smptests/smpmutex01/init.c              | 193 ++++++++++++++++++++-
 20 files changed, 1160 insertions(+), 62 deletions(-)

diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
index cb28cd9..3e0c348 100644
--- a/cpukit/score/include/rtems/score/percpu.h
+++ b/cpukit/score/include/rtems/score/percpu.h
@@ -23,6 +23,7 @@
   #include <rtems/asm.h>
 #else
   #include <rtems/score/assert.h>
+  #include <rtems/score/chain.h>
   #include <rtems/score/isrlock.h>
   #include <rtems/score/smp.h>
   #include <rtems/score/smplock.h>
@@ -395,6 +396,13 @@ typedef struct Per_CPU_Control {
     #endif
 
     /**
+     * @brief Chain of threads in need for help.
+     *
+     * This field is protected by the Per_CPU_Control::Lock lock.
+     */
+    Chain_Control Threads_in_need_for_help;
+
+    /**
      * @brief Bit field for SMP messages.
      *
      * This bit field is not protected locks.  Atomic operations are used to
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index ad04e7f..d13277a 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -104,6 +104,52 @@ typedef struct {
 
 #if defined(RTEMS_SMP)
   /**
+   * @brief Ask for help operation.
+   *
+   * @param[in] scheduler The scheduler instance to ask for help.
+   * @param[in] the_thread The thread needing help.
+   * @param[in] node The scheduler node.
+   *
+   * @retval true Ask for help was successful.
+   * @retval false Otherwise.
+   */
+  bool ( *ask_for_help )(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node
+  );
+
+  /**
+   * @brief Reconsider help operation.
+   *
+   * @param[in] scheduler The scheduler instance to reconsider the help
+   *   request.
+   * @param[in] the_thread The thread reconsidering a help request.
+   * @param[in] node The scheduler node.
+   */
+  void ( *reconsider_help_request )(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node
+  );
+
+  /**
+   * @brief Withdraw node operation.
+   *
+   * @param[in] scheduler The scheduler instance to withdraw the node.
+   * @param[in] the_thread The thread using the node.
+   * @param[in] node The scheduler node to withdraw.
+   * @param[in] next_state The next thread scheduler state in case the node is
+   *   scheduled.
+   */
+  void ( *withdraw_node )(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node,
+    Thread_Scheduler_state   next_state
+  );
+
+  /**
    * Ask for help operation.
    *
    * @param[in] scheduler The scheduler of the thread offering help.
@@ -322,6 +368,49 @@ Priority_Control _Scheduler_default_Map_priority(
    * @brief Does nothing.
    *
    * @param[in] scheduler Unused.
+   * @param[in] the_thread Unused.
+   * @param[in] node Unused.
+   *
+   * @retval false Always.
+   */
+  bool _Scheduler_default_Ask_for_help(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node
+  );
+
+  /**
+   * @brief Does nothing.
+   *
+   * @param[in] scheduler Unused.
+   * @param[in] the_thread Unused.
+   * @param[in] node Unused.
+   */
+  void _Scheduler_default_Reconsider_help_request(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node
+  );
+
+  /**
+   * @brief Does nothing.
+   *
+   * @param[in] scheduler Unused.
+   * @param[in] the_thread Unused.
+   * @param[in] node Unused.
+   * @param[in] next_state Unused.
+   */
+  void _Scheduler_default_Withdraw_node(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *the_thread,
+    Scheduler_Node          *node,
+    Thread_Scheduler_state   next_state
+  );
+
+  /**
+   * @brief Does nothing.
+   *
+   * @param[in] scheduler Unused.
    * @param[in] offers_help Unused.
    * @param[in] needs_help Unused.
    *
@@ -334,6 +423,9 @@ Priority_Control _Scheduler_default_Map_priority(
   );
 
   #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+    _Scheduler_default_Ask_for_help, \
+    _Scheduler_default_Reconsider_help_request, \
+    _Scheduler_default_Withdraw_node, \
     _Scheduler_default_Ask_for_help_X,
 #else
   #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index e33e8d7..dbb17a8 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -325,19 +325,54 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
 {
+#if defined(RTEMS_SMP)
+  Chain_Node              *node;
+  const Chain_Node        *tail;
+  Scheduler_Node          *scheduler_node;
   const Scheduler_Control *scheduler;
   ISR_lock_Context         lock_context;
 
-  scheduler = _Scheduler_Get( the_thread );
+  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
   _Scheduler_Acquire_critical( scheduler, &lock_context );
+  ( *scheduler->Operations.block )(
+    scheduler,
+    the_thread,
+    scheduler_node
+  );
+  _Scheduler_Release_critical( scheduler, &lock_context );
+
+  node = _Chain_Next( node );
+
+  while ( node != tail ) {
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
 
+    _Scheduler_Acquire_critical( scheduler, &lock_context );
+    ( *scheduler->Operations.withdraw_node )(
+      scheduler,
+      the_thread,
+      scheduler_node,
+      THREAD_SCHEDULER_BLOCKED
+    );
+    _Scheduler_Release_critical( scheduler, &lock_context );
+
+    node = _Chain_Next( node );
+  }
+#else
+  const Scheduler_Control *scheduler;
+
+  scheduler = _Scheduler_Get( the_thread );
   ( *scheduler->Operations.block )(
     scheduler,
     the_thread,
     _Thread_Scheduler_get_home_node( the_thread )
   );
-
-  _Scheduler_Release_critical( scheduler, &lock_context );
+#endif
 }
 
 /**
@@ -352,33 +387,65 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
 {
+#if defined(RTEMS_SMP)
+  Chain_Node              *node;
+  const Chain_Node        *tail;
+  Scheduler_Node          *scheduler_node;
   const Scheduler_Control *scheduler;
   ISR_lock_Context         lock_context;
-#if defined(RTEMS_SMP)
   Thread_Control          *needs_help;
-#endif
 
-#if defined(RTEMS_SMP)
-  _Thread_Scheduler_process_requests( the_thread );
-#endif
+  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
 
-  scheduler = _Scheduler_Get( the_thread );
   _Scheduler_Acquire_critical( scheduler, &lock_context );
+  needs_help = ( *scheduler->Operations.unblock )(
+    scheduler,
+    the_thread,
+    scheduler_node
+  );
+  _Scheduler_Ask_for_help_if_necessary( needs_help );
+  _Scheduler_Release_critical( scheduler, &lock_context );
 
-#if defined(RTEMS_SMP)
-  needs_help =
-#endif
+  if ( needs_help != the_thread ) {
+    return;
+  }
+
+  node = _Chain_Next( node );
+
+  while ( node != tail ) {
+    bool success;
+
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+    _Scheduler_Acquire_critical( scheduler, &lock_context );
+    success = ( *scheduler->Operations.ask_for_help )(
+      scheduler,
+      the_thread,
+      scheduler_node
+    );
+    _Scheduler_Release_critical( scheduler, &lock_context );
+
+    if ( success ) {
+      break;
+    }
+
+    node = _Chain_Next( node );
+  }
+#else
+  const Scheduler_Control *scheduler;
+
+  scheduler = _Scheduler_Get( the_thread );
   ( *scheduler->Operations.unblock )(
     scheduler,
     the_thread,
     _Thread_Scheduler_get_home_node( the_thread )
   );
-
-#if defined(RTEMS_SMP)
-  _Scheduler_Ask_for_help_if_necessary( needs_help );
 #endif
-
-  _Scheduler_Release_critical( scheduler, &lock_context );
 }
 
 /**
@@ -397,33 +464,45 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
  */
 RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority( Thread_Control *the_thread )
 {
-  const Scheduler_Control *own_scheduler;
-  ISR_lock_Context         lock_context;
 #if defined(RTEMS_SMP)
-  Thread_Control          *needs_help;
-#endif
+  Chain_Node       *node;
+  const Chain_Node *tail;
 
-#if defined(RTEMS_SMP)
   _Thread_Scheduler_process_requests( the_thread );
-#endif
 
-  own_scheduler = _Scheduler_Get_own( the_thread );
-  _Scheduler_Acquire_critical( own_scheduler, &lock_context );
+  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
 
-#if defined(RTEMS_SMP)
-  needs_help =
-#endif
-  ( *own_scheduler->Operations.update_priority )(
-    own_scheduler,
+  do {
+    Scheduler_Node          *scheduler_node;
+    const Scheduler_Control *scheduler;
+    ISR_lock_Context         lock_context;
+    Thread_Control          *needs_help;
+
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+    _Scheduler_Acquire_critical( scheduler, &lock_context );
+    needs_help = ( *scheduler->Operations.update_priority )(
+      scheduler,
+      the_thread,
+      scheduler_node
+    );
+    _Scheduler_Ask_for_help_if_necessary( needs_help );
+    _Scheduler_Release_critical( scheduler, &lock_context );
+
+    node = _Chain_Next( node );
+  } while ( node != tail );
+#else
+  const Scheduler_Control *scheduler;
+
+  scheduler = _Scheduler_Get( the_thread );
+  ( *scheduler->Operations.update_priority )(
+    scheduler,
     the_thread,
     _Thread_Scheduler_get_home_node( the_thread )
   );
-
-#if defined(RTEMS_SMP)
-  _Scheduler_Ask_for_help_if_necessary( needs_help );
 #endif
-
-  _Scheduler_Release_critical( own_scheduler, &lock_context );
 }
 
 /**
@@ -1008,7 +1087,13 @@ _Scheduler_Try_to_schedule_node(
   _Thread_Scheduler_acquire_critical( user, &lock_context );
 
   if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
-    _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
+    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+      _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) );
+      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED );
+    } else {
+      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
+    }
+
     _Thread_Scheduler_release_critical( user, &lock_context );
     return action;
   }
@@ -1129,6 +1214,7 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
 
   _Thread_Scheduler_acquire_critical( thread, &lock_context );
   thread_cpu = _Thread_Get_CPU( thread );
+  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
   _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
   _Thread_Scheduler_release_critical( thread, &lock_context );
 
diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
index 359c369..ab83435 100644
--- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -57,6 +57,9 @@ extern "C" {
     _Scheduler_priority_affinity_SMP_Update_priority, \
     _Scheduler_default_Map_priority, \
     _Scheduler_default_Unmap_priority, \
+    _Scheduler_priority_affinity_SMP_Ask_for_help, \
+    _Scheduler_priority_affinity_SMP_Reconsider_help_request, \
+    _Scheduler_priority_affinity_SMP_Withdraw_node, \
     _Scheduler_priority_affinity_SMP_Ask_for_help_X, \
     _Scheduler_priority_affinity_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
@@ -128,6 +131,25 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority(
   Scheduler_Node          *node
 );
 
+bool _Scheduler_priority_affinity_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_priority_affinity_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+);
+
 Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *offers_help,
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmp.h b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
index 9042243..4b3e577 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
@@ -7,7 +7,7 @@
  */
 
 /*
- * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
+ * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
  *
  *  embedded brains GmbH
  *  Dornierstr. 4
@@ -86,6 +86,9 @@ typedef struct {
     _Scheduler_priority_SMP_Update_priority, \
     _Scheduler_default_Map_priority, \
     _Scheduler_default_Unmap_priority, \
+    _Scheduler_priority_SMP_Ask_for_help, \
+    _Scheduler_priority_SMP_Reconsider_help_request, \
+    _Scheduler_priority_SMP_Withdraw_node, \
     _Scheduler_priority_SMP_Ask_for_help_X, \
     _Scheduler_priority_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
@@ -123,6 +126,25 @@ Thread_Control *_Scheduler_priority_SMP_Update_priority(
   Scheduler_Node          *node
 );
 
+bool _Scheduler_priority_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_priority_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_priority_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+);
+
 Thread_Control *_Scheduler_priority_SMP_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *needs_help,
diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
index 2275237..2afe10b 100644
--- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
@@ -9,7 +9,7 @@
 /*
  *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
  *
- *  Copyright (c) 2013 embedded brains GmbH.
+ *  Copyright (c) 2013, 2016 embedded brains GmbH.
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -69,6 +69,9 @@ typedef struct {
     _Scheduler_simple_SMP_Update_priority, \
     _Scheduler_default_Map_priority, \
     _Scheduler_default_Unmap_priority, \
+    _Scheduler_simple_SMP_Ask_for_help, \
+    _Scheduler_simple_SMP_Reconsider_help_request, \
+    _Scheduler_simple_SMP_Withdraw_node, \
     _Scheduler_simple_SMP_Ask_for_help_X, \
     _Scheduler_simple_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
@@ -106,6 +109,25 @@ Thread_Control *_Scheduler_simple_SMP_Update_priority(
   Scheduler_Node          *node
 );
 
+bool _Scheduler_simple_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_simple_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_simple_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+);
+
 Thread_Control *_Scheduler_simple_SMP_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *offers_help,
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 6b1ccc3..8f9bf2f 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -7,7 +7,7 @@
  */
 
 /*
- * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
+ * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
  *
  *  embedded brains GmbH
  *  Dornierstr. 4
@@ -27,6 +27,7 @@
 #include <rtems/score/assert.h>
 #include <rtems/score/chainimpl.h>
 #include <rtems/score/schedulersimpleimpl.h>
+#include <rtems/bspIo.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -300,6 +301,12 @@ typedef void ( *Scheduler_SMP_Move )(
   Scheduler_Node    *node_to_move
 );
 
+typedef bool ( *Scheduler_SMP_Ask_for_help )(
+  Scheduler_Context *context,
+  Thread_Control    *thread,
+  Scheduler_Node    *node
+);
+
 typedef void ( *Scheduler_SMP_Update )(
   Scheduler_Context *context,
   Scheduler_Node    *node_to_update,
@@ -559,8 +566,22 @@ static inline Thread_Control *_Scheduler_SMP_Preempt(
   _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
 
   _Thread_Scheduler_acquire_critical( victim_thread, &lock_context );
+
   victim_cpu = _Thread_Get_CPU( victim_thread );
-  _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+
+  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
+    _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
+
+    if ( victim_thread->Scheduler.helping_nodes > 0 ) {
+      _Per_CPU_Acquire( victim_cpu );
+      _Chain_Append_unprotected(
+        &victim_cpu->Threads_in_need_for_help,
+        &victim_thread->Scheduler.Help_node
+      );
+      _Per_CPU_Release( victim_cpu );
+    }
+  }
+
   _Thread_Scheduler_release_critical( victim_thread, &lock_context );
 
   _Scheduler_SMP_Allocate_processor(
@@ -589,6 +610,9 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
   (void) order;
 
   _Assert( &lowest_scheduled->Node != _Chain_Tail( scheduled ) );
+  _Assert(
+    _Chain_Next( &lowest_scheduled->Node ) == _Chain_Tail( scheduled )
+  );
 
   return lowest_scheduled;
 }
@@ -901,7 +925,6 @@ static inline void _Scheduler_SMP_Block(
   Per_CPU_Control          *thread_cpu;
 
   node_state = _Scheduler_SMP_Node_state( node );
-  _Assert( node_state != SCHEDULER_SMP_NODE_BLOCKED );
 
   thread_cpu = _Scheduler_Block_node(
     context,
@@ -910,6 +933,7 @@ static inline void _Scheduler_SMP_Block(
     node_state == SCHEDULER_SMP_NODE_SCHEDULED,
     _Scheduler_SMP_Get_idle_thread
   );
+
   if ( thread_cpu != NULL ) {
     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
 
@@ -924,7 +948,7 @@ static inline void _Scheduler_SMP_Block(
         move_from_ready_to_scheduled,
         allocate_processor
       );
-    } else {
+    } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
       ( *extract_from_ready )( context, node );
     }
   }
@@ -996,7 +1020,8 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority(
   Scheduler_SMP_Enqueue            enqueue_fifo,
   Scheduler_SMP_Enqueue            enqueue_lifo,
   Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_fifo,
-  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
+  Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo,
+  Scheduler_SMP_Ask_for_help       ask_for_help
 )
 {
   Thread_Control          *needs_help;
@@ -1007,7 +1032,10 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority(
   new_priority = _Scheduler_Node_get_priority( node, &prepend_it );
 
   if ( new_priority == _Scheduler_SMP_Node_priority( node ) ) {
-    /* Nothing to do */
+    if ( _Thread_Is_ready( thread ) ) {
+      ( *ask_for_help )( context, thread, node );
+    }
+
     return NULL;
   }
 
@@ -1036,6 +1064,10 @@ static inline Thread_Control *_Scheduler_SMP_Update_priority(
   } else {
     ( *update )( context, node, new_priority );
 
+    if ( _Thread_Is_ready( thread ) ) {
+      ( *ask_for_help )( context, thread, node );
+    }
+
     needs_help = NULL;
   }
 
@@ -1149,6 +1181,135 @@ static inline void _Scheduler_SMP_Insert_scheduled_fifo(
   );
 }
 
+static inline bool _Scheduler_SMP_Ask_for_help(
+  Scheduler_Context                  *context,
+  Thread_Control                     *thread,
+  Scheduler_Node                     *node,
+  Chain_Node_order                    order,
+  Scheduler_SMP_Insert                insert_ready,
+  Scheduler_SMP_Insert                insert_scheduled,
+  Scheduler_SMP_Move                  move_from_scheduled_to_ready,
+  Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
+  Scheduler_SMP_Allocate_processor    allocate_processor
+)
+{
+  Scheduler_Node   *lowest_scheduled;
+  ISR_lock_Context  lock_context;
+  bool              success;
+
+  lowest_scheduled = ( *get_lowest_scheduled )( context, node, order );
+
+  _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+  if (
+    thread->Scheduler.state == THREAD_SCHEDULER_READY
+      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED
+  ) {
+    if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
+      _Thread_Scheduler_cancel_need_for_help(
+        thread,
+        _Thread_Get_CPU( thread )
+      );
+      _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
+      _Thread_Scheduler_release_critical( thread, &lock_context );
+
+      _Scheduler_SMP_Preempt(
+        context,
+        node,
+        lowest_scheduled,
+        allocate_processor
+      );
+
+      ( *insert_scheduled )( context, node );
+      ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+      _Scheduler_Release_idle_thread(
+        context,
+        lowest_scheduled,
+        _Scheduler_SMP_Release_idle_thread
+      );
+      success = true;
+    } else {
+      _Thread_Scheduler_release_critical( thread, &lock_context );
+      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+      ( *insert_ready )( context, node );
+      success = false;
+    }
+  } else {
+    _Thread_Scheduler_release_critical( thread, &lock_context );
+    success = false;
+  }
+
+  return success;
+}
+
+static inline void _Scheduler_SMP_Reconsider_help_request(
+  Scheduler_Context     *context,
+  Thread_Control        *thread,
+  Scheduler_Node        *node,
+  Scheduler_SMP_Extract  extract_from_ready
+)
+{
+  ISR_lock_Context lock_context;
+
+  _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+  if (
+    thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
+      && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
+  ) {
+    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+    ( *extract_from_ready )( context, node );
+  }
+
+  _Thread_Scheduler_release_critical( thread, &lock_context );
+}
+
+static inline void _Scheduler_SMP_Withdraw_node(
+  Scheduler_Context                *context,
+  Thread_Control                   *thread,
+  Scheduler_Node                   *node,
+  Thread_Scheduler_state            next_state,
+  Scheduler_SMP_Extract             extract_from_ready,
+  Scheduler_SMP_Get_highest_ready   get_highest_ready,
+  Scheduler_SMP_Move                move_from_ready_to_scheduled,
+  Scheduler_SMP_Allocate_processor  allocate_processor
+)
+{
+  ISR_lock_Context         lock_context;
+  Scheduler_SMP_Node_state node_state;
+
+  _Thread_Scheduler_acquire_critical( thread, &lock_context );
+
+  node_state = _Scheduler_SMP_Node_state( node );
+  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+
+  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+    Per_CPU_Control *thread_cpu;
+
+    thread_cpu = _Thread_Get_CPU( thread );
+    _Scheduler_Thread_change_state( thread, next_state );
+    _Thread_Scheduler_release_critical( thread, &lock_context );
+
+    _Scheduler_SMP_Extract_from_scheduled( node );
+    _Scheduler_SMP_Schedule_highest_ready(
+      context,
+      node,
+      thread_cpu,
+      extract_from_ready,
+      get_highest_ready,
+      move_from_ready_to_scheduled,
+      allocate_processor
+    );
+  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+    _Thread_Scheduler_release_critical( thread, &lock_context );
+    ( *extract_from_ready )( context, node );
+  } else {
+    _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
+    _Thread_Scheduler_release_critical( thread, &lock_context );
+  }
+}
+
 /** @} */
 
 #ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/schedulerstrongapa.h b/cpukit/score/include/rtems/score/schedulerstrongapa.h
index fd6d6ec..b8a5f2f 100644
--- a/cpukit/score/include/rtems/score/schedulerstrongapa.h
+++ b/cpukit/score/include/rtems/score/schedulerstrongapa.h
@@ -86,6 +86,9 @@ typedef struct {
     _Scheduler_strong_APA_Update_priority, \
     _Scheduler_default_Map_priority, \
     _Scheduler_default_Unmap_priority, \
+    _Scheduler_strong_APA_Ask_for_help, \
+    _Scheduler_strong_APA_Reconsider_help_request, \
+    _Scheduler_strong_APA_Withdraw_node, \
     _Scheduler_strong_APA_Ask_for_help_X, \
     _Scheduler_strong_APA_Node_initialize, \
     _Scheduler_default_Node_destroy, \
@@ -123,6 +126,25 @@ Thread_Control *_Scheduler_strong_APA_Update_priority(
   Scheduler_Node          *node
 );
 
+bool _Scheduler_strong_APA_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_strong_APA_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+);
+
+void _Scheduler_strong_APA_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+);
+
 Thread_Control *_Scheduler_strong_APA_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *needs_help,
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 236eaed..304f904 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -310,6 +310,21 @@ typedef struct {
   Chain_Control Scheduler_nodes;
 
   /**
+   * @brief Node for the Per_CPU_Control::Threads_in_need_for_help chain.
+   *
+   * This chain is protected by the Per_CPU_Control::Lock lock of the assigned
+   * processor.
+   */
+  Chain_Node Help_node;
+
+  /**
+   * @brief Count of nodes scheduler nodes minus one.
+   *
+   * This chain is protected by the thread state lock.
+   */
+  size_t helping_nodes;
+
+  /**
    * @brief List of pending scheduler node requests.
    *
    * This list is protected by the thread scheduler lock.
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 957fd55..19fad0b 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -81,6 +81,9 @@ extern Thread_Control *_Thread_Allocated_fp;
 #if defined(RTEMS_SMP)
 #define THREAD_RESOURCE_NODE_TO_THREAD( node ) \
   RTEMS_CONTAINER_OF( node, Thread_Control, Resource_node )
+
+#define THREAD_OF_SCHEDULER_HELP_NODE( node ) \
+  RTEMS_CONTAINER_OF( node, Thread_Control, Scheduler.Help_node )
 #endif
 
 typedef bool ( *Thread_Visitor )( Thread_Control *the_thread, void *arg );
@@ -993,6 +996,23 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
   return owns_resources;
 }
 
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_Scheduler_cancel_need_for_help(
+  Thread_Control  *the_thread,
+  Per_CPU_Control *cpu
+)
+{
+  _Per_CPU_Acquire( cpu );
+
+  if ( !_Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) {
+    _Chain_Extract_unprotected( &the_thread->Scheduler.Help_node );
+    _Chain_Set_off_chain( &the_thread->Scheduler.Help_node );
+  }
+
+  _Per_CPU_Release( cpu );
+}
+#endif
+
 RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
   const Thread_Control *the_thread
 )
@@ -1051,7 +1071,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Scheduler_release_critical(
   _ISR_lock_Release( &the_thread->Scheduler.Lock, lock_context );
 }
 
+#if defined(RTEMS_SMP)
+void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread );
+
 void _Thread_Scheduler_process_requests( Thread_Control *the_thread );
+#endif
 
 RTEMS_INLINE_ROUTINE void _Thread_Scheduler_add_request(
   Thread_Control         *the_thread,
diff --git a/cpukit/score/src/schedulerdefaultaskforhelp.c b/cpukit/score/src/schedulerdefaultaskforhelp.c
index dd60d44..9219a02 100644
--- a/cpukit/score/src/schedulerdefaultaskforhelp.c
+++ b/cpukit/score/src/schedulerdefaultaskforhelp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 embedded brains GmbH
+ * Copyright (c) 2014, 2016 embedded brains GmbH
  *
  * The license and distribution terms for this file may be
  * found in the file LICENSE in this distribution or at
@@ -12,6 +12,43 @@
 
 #include <rtems/score/scheduler.h>
 
+bool _Scheduler_default_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  (void) scheduler;
+  (void) the_thread;
+  (void) node;
+
+  return false;
+}
+
+void _Scheduler_default_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  (void) scheduler;
+  (void) the_thread;
+  (void) node;
+}
+
+void _Scheduler_default_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+)
+{
+  (void) scheduler;
+  (void) the_thread;
+  (void) node;
+  (void) next_state;
+}
+
 Thread_Control *_Scheduler_default_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *offers_help,
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index f684b1a..466c399 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -499,6 +499,25 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
   );
 }
 
+static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
+  Scheduler_Context *context,
+  Thread_Control    *the_thread,
+  Scheduler_Node    *node
+)
+{
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    the_thread,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
+    _Scheduler_priority_SMP_Insert_ready_lifo,
+    _Scheduler_SMP_Insert_scheduled_lifo,
+    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+    _Scheduler_SMP_Get_lowest_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
+  );
+}
+
 /*
  * This is the public scheduler specific Change Priority operation.
  */
@@ -520,7 +539,8 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority(
     _Scheduler_priority_affinity_SMP_Enqueue_fifo,
     _Scheduler_priority_affinity_SMP_Enqueue_lifo,
     _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
-    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
+    _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
+    _Scheduler_priority_affinity_SMP_Do_ask_for_help
   );
 
   /*
@@ -531,6 +551,54 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Update_priority(
   return displaced;
 }
 
+bool _Scheduler_priority_affinity_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_priority_affinity_SMP_Do_ask_for_help( context, the_thread, node );
+}
+
+void _Scheduler_priority_affinity_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Reconsider_help_request(
+    context,
+    the_thread,
+    node,
+    _Scheduler_priority_SMP_Extract_from_ready
+  );
+}
+
+void _Scheduler_priority_affinity_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Withdraw_node(
+    context,
+    the_thread,
+    node,
+    next_state,
+    _Scheduler_priority_SMP_Extract_from_ready,
+    _Scheduler_priority_affinity_SMP_Get_highest_ready,
+    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
+  );
+}
+
 Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help_X(
   const Scheduler_Control *scheduler,
   Thread_Control          *offers_help,
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index f80c3b8..7b498d3 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -229,6 +229,25 @@ Thread_Control *_Scheduler_priority_SMP_Unblock(
   );
 }
 
+static bool _Scheduler_priority_SMP_Do_ask_for_help(
+  Scheduler_Context *context,
+  Thread_Control    *the_thread,
+  Scheduler_Node    *node
+)
+{
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    the_thread,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
+    _Scheduler_priority_SMP_Insert_ready_lifo,
+    _Scheduler_SMP_Insert_scheduled_lifo,
+    _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+    _Scheduler_SMP_Get_lowest_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
+  );
+}
+
 Thread_Control *_Scheduler_priority_SMP_Update_priority(
   const Scheduler_Control *scheduler,
   Thread_Control          *thread,
@@ -246,7 +265,56 @@ Thread_Control *_Scheduler_priority_SMP_Update_priority(
     _Scheduler_priority_SMP_Enqueue_fifo,
     _Scheduler_priority_SMP_Enqueue_lifo,
     _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
-    _Scheduler_priority_SMP_Enqueue_scheduled_lifo
+    _Scheduler_priority_SMP_Enqueue_scheduled_lifo,
+    _Scheduler_priority_SMP_Do_ask_for_help
+  );
+}
+
+bool _Scheduler_priority_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_priority_SMP_Do_ask_for_help( context, the_thread, node );
+}
+
+void _Scheduler_priority_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Reconsider_help_request(
+    context,
+    the_thread,
+    node,
+    _Scheduler_priority_SMP_Extract_from_ready
+  );
+}
+
+void _Scheduler_priority_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Withdraw_node(
+    context,
+    the_thread,
+    node,
+    next_state,
+    _Scheduler_priority_SMP_Extract_from_ready,
+    _Scheduler_priority_SMP_Get_highest_ready,
+    _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
   );
 }
 
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 28410ea..d8f576f 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -7,7 +7,7 @@
  */
 
 /*
- * Copyright (c) 2013-2014 embedded brains GmbH.
+ * Copyright (c) 2013, 2016 embedded brains GmbH.
  *
  * The license and distribution terms for this file may be
  * found in the file LICENSE in this distribution or at
@@ -49,10 +49,10 @@ void _Scheduler_simple_SMP_Node_initialize(
   Priority_Control         priority
 )
 {
-  Scheduler_SMP_Node *the_node;
+  Scheduler_SMP_Node *smp_node;
 
-  the_node = _Scheduler_SMP_Node_downcast( node );
-  _Scheduler_SMP_Node_initialize( scheduler, the_node, the_thread, priority );
+  smp_node = _Scheduler_SMP_Node_downcast( node );
+  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
 }
 
 static void _Scheduler_simple_SMP_Do_update(
@@ -61,12 +61,12 @@ static void _Scheduler_simple_SMP_Do_update(
   Priority_Control   new_priority
 )
 {
-  Scheduler_SMP_Node *the_node;
+  Scheduler_SMP_Node *smp_node;
 
   (void) context;
 
-  the_node = _Scheduler_SMP_Node_downcast( node );
-  _Scheduler_SMP_Node_update_priority( the_node, new_priority );
+  smp_node = _Scheduler_SMP_Node_downcast( node );
+  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
 }
 
 static Scheduler_Node *_Scheduler_simple_SMP_Get_highest_ready(
@@ -296,6 +296,25 @@ Thread_Control *_Scheduler_simple_SMP_Unblock(
   );
 }
 
+static bool _Scheduler_simple_SMP_Do_ask_for_help(
+  Scheduler_Context *context,
+  Thread_Control    *the_thread,
+  Scheduler_Node    *node
+)
+{
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    the_thread,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
+    _Scheduler_simple_SMP_Insert_ready_lifo,
+    _Scheduler_SMP_Insert_scheduled_lifo,
+    _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
+    _Scheduler_SMP_Get_lowest_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
+  );
+}
+
 Thread_Control *_Scheduler_simple_SMP_Update_priority(
   const Scheduler_Control *scheduler,
   Thread_Control          *thread,
@@ -313,7 +332,56 @@ Thread_Control *_Scheduler_simple_SMP_Update_priority(
     _Scheduler_simple_SMP_Enqueue_fifo,
     _Scheduler_simple_SMP_Enqueue_lifo,
     _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
-    _Scheduler_simple_SMP_Enqueue_scheduled_lifo
+    _Scheduler_simple_SMP_Enqueue_scheduled_lifo,
+    _Scheduler_simple_SMP_Do_ask_for_help
+  );
+}
+
+bool _Scheduler_simple_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_simple_SMP_Do_ask_for_help( context, the_thread, node );
+}
+
+void _Scheduler_simple_SMP_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Reconsider_help_request(
+    context,
+    the_thread,
+    node,
+    _Scheduler_simple_SMP_Extract_from_ready
+  );
+}
+
+void _Scheduler_simple_SMP_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Withdraw_node(
+    context,
+    the_thread,
+    node,
+    next_state,
+    _Scheduler_simple_SMP_Extract_from_ready,
+    _Scheduler_simple_SMP_Get_highest_ready,
+    _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
   );
 }
 
diff --git a/cpukit/score/src/schedulersmpdebug.c b/cpukit/score/src/schedulersmpdebug.c
index 4a45d20..1ccebba 100644
--- a/cpukit/score/src/schedulersmpdebug.c
+++ b/cpukit/score/src/schedulersmpdebug.c
@@ -35,8 +35,8 @@
  */
 const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ] = {
   /* FROM / TO       BLOCKED SCHEDULED READY */
-  /* BLOCKED    */ { false,  true,     true },
-  /* SCHEDULED  */ { true,   false,    true },
+  /* BLOCKED    */ { true,   true,     true },
+  /* SCHEDULED  */ { true,   true,     true },
   /* READY      */ { true,   true,     true }
 };
 
@@ -46,7 +46,7 @@ const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ] = {
  */
 const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ] = {
   /* FROM / TO       BLOCKED SCHEDULED READY */
-  /* BLOCKED    */ { false,  true,     true },
+  /* BLOCKED    */ { true,   true,     true },
   /* SCHEDULED  */ { true,   false,    true },
   /* READY      */ { true,   true,     false }
 };
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index 7dd65c0..dca5e26 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -355,6 +355,25 @@ Thread_Control *_Scheduler_strong_APA_Unblock(
   );
 }
 
+static bool _Scheduler_strong_APA_Do_ask_for_help(
+  Scheduler_Context *context,
+  Thread_Control    *the_thread,
+  Scheduler_Node    *node
+)
+{
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    the_thread,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
+    _Scheduler_strong_APA_Insert_ready_lifo,
+    _Scheduler_SMP_Insert_scheduled_lifo,
+    _Scheduler_strong_APA_Move_from_scheduled_to_ready,
+    _Scheduler_SMP_Get_lowest_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
+  );
+}
+
 Thread_Control *_Scheduler_strong_APA_Update_priority(
   const Scheduler_Control *scheduler,
   Thread_Control          *the_thread,
@@ -372,7 +391,56 @@ Thread_Control *_Scheduler_strong_APA_Update_priority(
     _Scheduler_strong_APA_Enqueue_fifo,
     _Scheduler_strong_APA_Enqueue_lifo,
     _Scheduler_strong_APA_Enqueue_scheduled_fifo,
-    _Scheduler_strong_APA_Enqueue_scheduled_lifo
+    _Scheduler_strong_APA_Enqueue_scheduled_lifo,
+    _Scheduler_strong_APA_Do_ask_for_help
+  );
+}
+
+bool _Scheduler_strong_APA_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node );
+}
+
+void _Scheduler_strong_APA_Reconsider_help_request(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Reconsider_help_request(
+    context,
+    the_thread,
+    node,
+    _Scheduler_strong_APA_Extract_from_ready
+  );
+}
+
+void _Scheduler_strong_APA_Withdraw_node(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *the_thread,
+  Scheduler_Node          *node,
+  Thread_Scheduler_state   next_state
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Withdraw_node(
+    context,
+    the_thread,
+    node,
+    next_state,
+    _Scheduler_strong_APA_Extract_from_ready,
+    _Scheduler_strong_APA_Get_highest_ready,
+    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+    _Scheduler_SMP_Allocate_processor_lazy
   );
 }
 
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index b8d9cb6..f383f6d 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -89,6 +89,7 @@ void _SMP_Handler_initialize( void )
     _ISR_lock_Initialize( &cpu->Watchdog.Lock, "Watchdog" );
     _SMP_ticket_lock_Initialize( &cpu->Lock );
     _SMP_lock_Stats_initialize( &cpu->Lock_stats, "Per-CPU" );
+    _Chain_Initialize_empty( &cpu->Threads_in_need_for_help );
   }
 
   /*
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index 08e96bc..36d2910 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -23,6 +23,7 @@
 #include <rtems/score/threaddispatch.h>
 #include <rtems/score/assert.h>
 #include <rtems/score/isr.h>
+#include <rtems/score/schedulerimpl.h>
 #include <rtems/score/threadimpl.h>
 #include <rtems/score/todimpl.h>
 #include <rtems/score/userextimpl.h>
@@ -39,6 +40,77 @@ Thread_Control *_Thread_Allocated_fp;
 
 CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
 
+#if defined(RTEMS_SMP)
+static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
+{
+  return executing->Scheduler.helping_nodes > 0
+    && _Thread_Is_ready( executing );
+}
+#endif
+
+static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
+{
+#if defined(RTEMS_SMP)
+  _Per_CPU_Acquire( cpu_self );
+
+  while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
+    Chain_Node       *node;
+    Thread_Control   *the_thread;
+    ISR_lock_Context  lock_context;
+
+    node = _Chain_Get_first_unprotected( &cpu_self->Threads_in_need_for_help );
+    _Chain_Set_off_chain( node );
+    the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
+
+    _Per_CPU_Release( cpu_self );
+    _Thread_State_acquire( the_thread, &lock_context );
+    _Thread_Scheduler_ask_for_help( the_thread );
+    _Thread_State_release( the_thread, &lock_context );
+    _Per_CPU_Acquire( cpu_self );
+  }
+
+  _Per_CPU_Release( cpu_self );
+#else
+  (void) cpu_self;
+#endif
+}
+
+static void _Thread_Post_switch_cleanup( Thread_Control *executing )
+{
+#if defined(RTEMS_SMP)
+  Chain_Node       *node;
+  const Chain_Node *tail;
+
+  if ( !_Thread_Can_ask_for_help( executing ) ) {
+    return;
+  }
+
+  node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
+
+  do {
+    Scheduler_Node          *scheduler_node;
+    const Scheduler_Control *scheduler;
+    ISR_lock_Context         lock_context;
+
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+    _Scheduler_Acquire_critical( scheduler, &lock_context );
+    ( *scheduler->Operations.reconsider_help_request )(
+      scheduler,
+      executing,
+      scheduler_node
+    );
+    _Scheduler_Release_critical( scheduler, &lock_context );
+
+    node = _Chain_Next( node );
+  } while ( node != tail );
+#else
+  (void) executing;
+#endif
+}
+
 static Thread_Action *_Thread_Get_post_switch_action(
   Thread_Control *executing
 )
@@ -54,6 +126,7 @@ static void _Thread_Run_post_switch_actions( Thread_Control *executing )
   Thread_Action    *action;
 
   _Thread_State_acquire( executing, &lock_context );
+  _Thread_Post_switch_cleanup( executing );
   action = _Thread_Get_post_switch_action( executing );
 
   while ( action != NULL ) {
@@ -77,7 +150,10 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
   executing = cpu_self->executing;
 
   do {
-    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
+    Thread_Control *heir;
+
+    _Thread_Preemption_intervention( cpu_self );
+    heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
 
     /*
      *  When the heir and executing are the same, then we are being
diff --git a/cpukit/score/src/threadscheduler.c b/cpukit/score/src/threadscheduler.c
index b73598e..b0cf571 100644
--- a/cpukit/score/src/threadscheduler.c
+++ b/cpukit/score/src/threadscheduler.c
@@ -20,6 +20,39 @@
 #include <rtems/score/schedulerimpl.h>
 
 #if defined(RTEMS_SMP)
+void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread )
+{
+  Chain_Node       *node;
+  const Chain_Node *tail;
+
+  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+  do {
+    Scheduler_Node          *scheduler_node;
+    const Scheduler_Control *scheduler;
+    ISR_lock_Context         lock_context;
+    bool                     success;
+
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+    _Scheduler_Acquire_critical( scheduler, &lock_context );
+    success = ( *scheduler->Operations.ask_for_help )(
+      scheduler,
+      the_thread,
+      scheduler_node
+    );
+    _Scheduler_Release_critical( scheduler, &lock_context );
+
+    if ( success ) {
+      break;
+    }
+
+    node = _Chain_Next( node );
+  } while ( node != tail );
+}
+
 void _Thread_Scheduler_process_requests( Thread_Control *the_thread )
 {
   ISR_lock_Context  lock_context;
@@ -48,11 +81,13 @@ void _Thread_Scheduler_process_requests( Thread_Control *the_thread )
 #endif
 
       if ( request == SCHEDULER_NODE_REQUEST_ADD ) {
+        ++the_thread->Scheduler.helping_nodes;
         _Chain_Append_unprotected(
           &the_thread->Scheduler.Scheduler_nodes,
           &scheduler_node->Thread.Scheduler_node.Chain
         );
       } else if ( request == SCHEDULER_NODE_REQUEST_REMOVE ) {
+        --the_thread->Scheduler.helping_nodes;
         _Chain_Extract_unprotected(
           &scheduler_node->Thread.Scheduler_node.Chain
         );
@@ -70,11 +105,25 @@ void _Thread_Scheduler_process_requests( Thread_Control *the_thread )
     scheduler_node = remove;
 
     while ( scheduler_node != NULL ) {
+      const Scheduler_Control *scheduler;
+      ISR_lock_Context         lock_context;
+
       next = scheduler_node->Thread.Scheduler_node.next;
 #if defined(RTEMS_DEBUG)
       scheduler_node->Thread.Scheduler_node.next = NULL;
 #endif
 
+      scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+      _Scheduler_Acquire_critical( scheduler, &lock_context );
+      ( *scheduler->Operations.withdraw_node )(
+        scheduler,
+        the_thread,
+        scheduler_node,
+        THREAD_SCHEDULER_READY
+      );
+      _Scheduler_Release_critical( scheduler, &lock_context );
+
       scheduler_node = next;
     }
   } else {
diff --git a/testsuites/smptests/smpmutex01/init.c b/testsuites/smptests/smpmutex01/init.c
index f595755..8e499b4 100644
--- a/testsuites/smptests/smpmutex01/init.c
+++ b/testsuites/smptests/smpmutex01/init.c
@@ -42,7 +42,11 @@ typedef enum {
   REQ_MTX_2_OBTAIN = RTEMS_EVENT_5,
   REQ_MTX_2_RELEASE = RTEMS_EVENT_6,
   REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_7,
-  REQ_SEM_RELEASE = RTEMS_EVENT_8
+  REQ_SEM_RELEASE = RTEMS_EVENT_8,
+  REQ_SET_DONE = RTEMS_EVENT_9,
+  REQ_WAIT_FOR_DONE = RTEMS_EVENT_10,
+  REQ_SEND_EVENT_2 = RTEMS_EVENT_11,
+  REQ_SEND_EVENT_3 = RTEMS_EVENT_12
 } request_id;
 
 typedef enum {
@@ -63,12 +67,22 @@ typedef struct {
   rtems_id mtx_2;
   rtems_id sem;
   rtems_id tasks[TASK_COUNT];
+  Atomic_Uint done;
+  task_id id_2;
+  rtems_event_set events_2;
+  task_id id_3;
+  rtems_event_set events_3;
   int generation[TASK_COUNT];
   int expected_generation[TASK_COUNT];
 } test_context;
 
 static test_context test_instance;
 
+static void assert_cpu(uint32_t expected_cpu)
+{
+  rtems_test_assert(rtems_get_current_processor() == expected_cpu);
+}
+
 static void test_task_get_priority_not_defined(test_context *ctx)
 {
   rtems_status_code sc;
@@ -123,6 +137,55 @@ static void send_event(test_context *ctx, task_id id, rtems_event_set events)
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 }
 
+static void set_event_2(
+  test_context *ctx,
+  task_id id_2,
+  rtems_event_set events_2
+)
+{
+  ctx->id_2 = id_2;
+  ctx->events_2 = events_2;
+}
+
+static void set_event_3(
+  test_context *ctx,
+  task_id id_3,
+  rtems_event_set events_3
+)
+{
+  ctx->id_3 = id_3;
+  ctx->events_3 = events_3;
+}
+
+static void clear_done(test_context *ctx)
+{
+  _Atomic_Store_uint(&ctx->done, 0, ATOMIC_ORDER_RELAXED);
+}
+
+static void set_done(test_context *ctx)
+{
+  _Atomic_Store_uint(&ctx->done, 1, ATOMIC_ORDER_RELEASE);
+}
+
+static bool is_done(test_context *ctx)
+{
+  return _Atomic_Load_uint(&ctx->done, ATOMIC_ORDER_ACQUIRE) != 0;
+}
+
+static void wait_for_done(test_context *ctx)
+{
+  while (!is_done(ctx)) {
+    /* Wait */
+  }
+}
+
+static void request_pre_emption(test_context *ctx, task_id id)
+{
+  clear_done(ctx);
+  send_event(ctx, id, REQ_SET_DONE);
+  wait_for_done(ctx);
+}
+
 static rtems_event_set wait_for_events(void)
 {
   rtems_event_set events;
@@ -157,7 +220,16 @@ static void sync_with_helper(test_context *ctx)
 static void request(test_context *ctx, task_id id, request_id req)
 {
   send_event(ctx, id, req);
-  sync_with_helper(ctx);
+  clear_done(ctx);
+
+  if (rtems_get_current_processor() == 0) {
+    id = H_B;
+  } else {
+    id = H_A;
+  }
+
+  send_event(ctx, id, REQ_SET_DONE);
+  wait_for_done(ctx);
 }
 
 static void obtain(test_context *ctx)
@@ -241,6 +313,14 @@ static void check_generations(test_context *ctx, task_id a, task_id b)
   }
 }
 
+static void set_prio(test_context *ctx, task_id id, rtems_task_priority prio)
+{
+  rtems_status_code sc;
+
+  sc = rtems_task_set_priority(ctx->tasks[id], prio, &prio);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
 static void assert_prio(
   test_context *ctx,
   task_id id,
@@ -303,6 +383,10 @@ static void helper(rtems_task_argument arg)
     if ((events & REQ_SEM_RELEASE) != 0) {
       sem_release(ctx);
     }
+
+    if ((events & REQ_SET_DONE) != 0) {
+      set_done(ctx);
+    }
   }
 }
 
@@ -344,6 +428,22 @@ static void worker(rtems_task_argument arg)
       ++ctx->generation[id];
       sem_release(ctx);
     }
+
+    if ((events & REQ_SEND_EVENT_2) != 0) {
+      send_event(ctx, ctx->id_2, ctx->events_2);
+    }
+
+    if ((events & REQ_SEND_EVENT_3) != 0) {
+      send_event(ctx, ctx->id_3, ctx->events_3);
+    }
+
+    if ((events & REQ_SET_DONE) != 0) {
+      set_done(ctx);
+    }
+
+    if ((events & REQ_WAIT_FOR_DONE) != 0) {
+      wait_for_done(ctx);
+    }
   }
 }
 
@@ -672,6 +772,92 @@ static void test_dequeue_order_two_scheduler_instances(test_context *ctx)
   check_generations(ctx, B_5_1, NONE);
 }
 
+static void test_omip_pre_emption(test_context *ctx)
+{
+  assert_cpu(0);
+  obtain(ctx);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+
+  request(ctx, B_5_0, REQ_MTX_OBTAIN);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
+  check_generations(ctx, NONE, NONE);
+
+  request_pre_emption(ctx, A_1);
+  assert_cpu(1);
+
+  request_pre_emption(ctx, B_4);
+  assert_cpu(0);
+
+  request_pre_emption(ctx, A_1);
+  assert_cpu(1);
+
+  release(ctx);
+  assert_cpu(0);
+  sync_with_helper(ctx);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+  check_generations(ctx, B_5_0, NONE);
+
+  request(ctx, B_5_0, REQ_MTX_RELEASE);
+  assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
+  assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
+  check_generations(ctx, B_5_0, NONE);
+}
+
+static void test_omip_rescue(test_context *ctx)
+{
+  assert_cpu(0);
+  obtain(ctx);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+
+  clear_done(ctx);
+  set_event_3(ctx, H_B, REQ_SET_DONE);
+  set_event_2(ctx, B_5_0, REQ_SEND_EVENT_3 | REQ_MTX_OBTAIN);
+  send_event(ctx, A_1, REQ_SEND_EVENT_2 | REQ_WAIT_FOR_DONE);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
+  assert_cpu(1);
+
+  release(ctx);
+  assert_cpu(0);
+  sync_with_helper(ctx);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+  check_generations(ctx, B_5_0, NONE);
+
+  request(ctx, B_5_0, REQ_MTX_RELEASE);
+  assert_prio_by_scheduler(ctx, B_5_0, SCHED_A, PRIO_NONE);
+  assert_prio_by_scheduler(ctx, B_5_0, SCHED_B, 5);
+  check_generations(ctx, B_5_0, NONE);
+}
+
+static void test_omip_timeout(test_context *ctx)
+{
+  assert_cpu(0);
+  obtain(ctx);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+
+  clear_done(ctx);
+  set_event_3(ctx, H_B, REQ_SET_DONE);
+  set_event_2(ctx, B_5_0, REQ_SEND_EVENT_3 | REQ_MTX_OBTAIN_TIMEOUT);
+  send_event(ctx, A_1, REQ_SEND_EVENT_2 | REQ_WAIT_FOR_DONE);
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, 5);
+  assert_cpu(1);
+
+  wait();
+  assert_prio_by_scheduler(ctx, M, SCHED_A, 3);
+  assert_prio_by_scheduler(ctx, M, SCHED_B, PRIO_NONE);
+  check_generations(ctx, B_5_0, NONE);
+  assert_cpu(0);
+
+  release(ctx);
+}
+
 static void test(void)
 {
   test_context *ctx = &test_instance;
@@ -685,6 +871,9 @@ static void test(void)
   test_simple_inheritance_two_scheduler_instances(ctx);
   test_nested_inheritance_two_scheduler_instances(ctx);
   test_dequeue_order_two_scheduler_instances(ctx);
+  test_omip_pre_emption(ctx);
+  test_omip_rescue(ctx);
+  test_omip_timeout(ctx);
 }
 
 static void Init(rtems_task_argument arg)




More information about the vc mailing list