[PATCH 1/2] score: Implement scheduler helping protocol

Sebastian Huber sebastian.huber at embedded-brains.de
Tue Jul 8 15:05:52 UTC 2014


The following scheduler operations return a thread in need for help

    - unblock,
    - change priority, and
    - yield.

A thread in need for help is a thread that encounters a scheduler state
change from scheduled to ready or a thread that cannot be scheduled in
an unblock operation.  Such a thread can ask threads which depend on
resources owned by this thread for help.

Add a new ask for help scheduler operation.  This operation is used by
_Scheduler_Ask_for_help() to help threads in need for help returned by
the operations mentioned above.  This operation is also used by
_Scheduler_Thread_change_resource_root() in case the root of a resource
sub-tree changes.  A use case is the ownership change of a resource.

In case it is not possible to schedule a thread in need for help, then
the corresponding scheduler node will be placed into the set of ready
scheduler nodes of the scheduler instance.  Once a state change from
ready to scheduled happens for this scheduler node it may be used to
schedule the thread in need for help.
---
 cpukit/score/Makefile.am                           |    4 +-
 cpukit/score/include/rtems/score/mrspimpl.h        |   30 +-
 cpukit/score/include/rtems/score/scheduler.h       |   47 ++
 cpukit/score/include/rtems/score/schedulercbs.h    |    1 +
 cpukit/score/include/rtems/score/scheduleredf.h    |    1 +
 cpukit/score/include/rtems/score/schedulerimpl.h   |  578 +++++++++++++++++++-
 .../score/include/rtems/score/schedulerpriority.h  |    1 +
 .../rtems/score/schedulerpriorityaffinitysmp.h     |    7 +
 .../include/rtems/score/schedulerprioritysmp.h     |    7 +
 .../include/rtems/score/schedulerprioritysmpimpl.h |   22 +
 cpukit/score/include/rtems/score/schedulersimple.h |    1 +
 .../score/include/rtems/score/schedulersimplesmp.h |    7 +
 cpukit/score/include/rtems/score/schedulersmp.h    |    5 +
 .../score/include/rtems/score/schedulersmpimpl.h   |  369 ++++++++++---
 cpukit/score/include/rtems/score/thread.h          |   53 ++
 cpukit/score/include/rtems/score/threadimpl.h      |   10 +
 cpukit/score/src/schedulerchangeroot.c             |   85 +++
 cpukit/score/src/schedulerdefaultaskforhelp.c      |   26 +
 cpukit/score/src/schedulerpriorityaffinitysmp.c    |   38 ++-
 cpukit/score/src/schedulerprioritysmp.c            |   31 +-
 cpukit/score/src/schedulersimplesmp.c              |   53 ++-
 cpukit/score/src/schedulersmpdebug.c               |   54 ++
 cpukit/score/src/schedulersmpstartidle.c           |    1 +
 cpukit/score/src/schedulersmpvalidstatechanges.c   |   38 --
 cpukit/score/src/threadinitialize.c                |    3 +
 doc/user/smp.t                                     |   73 +++
 26 files changed, 1391 insertions(+), 154 deletions(-)
 create mode 100644 cpukit/score/src/schedulerchangeroot.c
 create mode 100644 cpukit/score/src/schedulerdefaultaskforhelp.c
 create mode 100644 cpukit/score/src/schedulersmpdebug.c
 delete mode 100644 cpukit/score/src/schedulersmpvalidstatechanges.c

diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 6caefb5..e4c373c 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -133,13 +133,15 @@ endif
 if HAS_SMP
 libscore_a_SOURCES += src/percpustatewait.c
 libscore_a_SOURCES += src/profilingsmplock.c
-libscore_a_SOURCES += src/schedulersmpvalidstatechanges.c
+libscore_a_SOURCES += src/schedulerchangeroot.c
 libscore_a_SOURCES += src/schedulerpriorityaffinitysmp.c
 libscore_a_SOURCES += src/schedulerprioritysmp.c
 libscore_a_SOURCES += src/schedulersimplesmp.c
+libscore_a_SOURCES += src/schedulersmpdebug.c
 libscore_a_SOURCES += src/smp.c
 libscore_a_SOURCES += src/cpuset.c
 libscore_a_SOURCES += src/cpusetprintsupport.c
+libscore_a_SOURCES += src/schedulerdefaultaskforhelp.c
 libscore_a_SOURCES += src/schedulerdefaultgetaffinity.c
 libscore_a_SOURCES += src/schedulerdefaultsetaffinity.c
 libscore_a_SOURCES += src/schedulersmpstartidle.c
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index 6aa45a8..4aaa50b 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -42,25 +42,6 @@ extern "C" {
 
 #define MRSP_RIVAL_STATE_TIMEOUT 0x2U
 
-RTEMS_INLINE_ROUTINE bool _MRSP_Set_root_visitor(
-  Resource_Node *node,
-  void *arg
-)
-{
-  _Resource_Node_set_root( node, arg );
-
-  return false;
-}
-
-RTEMS_INLINE_ROUTINE void _MRSP_Set_root(
-  Resource_Node *top,
-  Resource_Node *root
-)
-{
-  _Resource_Node_set_root( top, root );
-  _Resource_Iterate( top, _MRSP_Set_root_visitor, root );
-}
-
 RTEMS_INLINE_ROUTINE void _MRSP_Elevate_priority(
   MRSP_Control     *mrsp,
   Thread_Control   *new_owner,
@@ -197,9 +178,10 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
   _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
   previous_help_state =
     _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
-  _MRSP_Set_root(
-    &executing->Resource_node,
-    _Resource_Node_get_root( owner )
+
+  _Scheduler_Thread_change_resource_root(
+    executing,
+    _Thread_Resource_node_to_thread( _Resource_Node_get_root( owner ) )
   );
 
   if ( timeout > 0 ) {
@@ -241,7 +223,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
     _Resource_Node_extract( &executing->Resource_node );
     _Resource_Node_set_dependency( &executing->Resource_node, NULL );
     _Scheduler_Thread_change_help_state( executing, previous_help_state );
-    _MRSP_Set_root( &executing->Resource_node, &executing->Resource_node );
+    _Scheduler_Thread_change_resource_root( executing, executing );
     _MRSP_Restore_priority( mrsp, executing, initial_priority );
 
     status = MRSP_TIMEOUT;
@@ -334,7 +316,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
     _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
     _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
     _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
-    _MRSP_Set_root( &new_owner->Resource_node, &new_owner->Resource_node );
+    _Scheduler_Thread_change_resource_root( new_owner, new_owner );
     _MRSP_Add_state( rival, MRSP_RIVAL_STATE_NEW_OWNER );
   }
 
diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index 993ae55..8da988f 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -90,6 +90,31 @@ typedef struct {
     bool
   );
 
+#if defined(RTEMS_SMP)
+  /**
+   * Ask for help operation.
+   *
+   * @param[in] scheduler The scheduler of the thread offering help.
+   * @param[in] offers_help The thread offering help.
+   * @param[in] needs_help The thread needing help.
+   *
+   * @retval needs_help It was not possible to schedule the thread needing
+   *   help, so it is returned to continue the search for help.
+   * @retval next_needs_help It was possible to schedule the thread needing
+   *   help, but this displaced another thread eligible to ask for help.  So
+   *   this thread is returned to start a new search for help.
+   * @retval NULL It was possible to schedule the thread needing help, and no
+   *   other thread needs help as a result.
+   *
+   * @see _Scheduler_Ask_for_help().
+   */
+  Thread_Control *( *ask_for_help )(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *offers_help,
+    Thread_Control          *needs_help
+  );
+#endif
+
   /** @see _Scheduler_Node_initialize() */
   void ( *node_initialize )( const Scheduler_Control *, Thread_Control * );
 
@@ -375,6 +400,28 @@ extern const Scheduler_Control _Scheduler_Table[];
   extern const Scheduler_Assignment _Scheduler_Assignments[];
 #endif
 
+#if defined(RTEMS_SMP)
+  /**
+   * @brief Does nothing.
+   *
+   * @param[in] scheduler Unused.
+   * @param[in] offers_help Unused.
+   * @param[in] needs_help Unused.
+   *
+   * @retval NULL Always.
+   */
+  Thread_Control *_Scheduler_default_Ask_for_help(
+    const Scheduler_Control *scheduler,
+    Thread_Control          *offers_help,
+    Thread_Control          *needs_help
+  );
+
+  #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
+    _Scheduler_default_Ask_for_help,
+#else
+  #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP
+#endif
+
 /**
  * @brief Does nothing.
  *
diff --git a/cpukit/score/include/rtems/score/schedulercbs.h b/cpukit/score/include/rtems/score/schedulercbs.h
index 008cc91..b3381e0 100644
--- a/cpukit/score/include/rtems/score/schedulercbs.h
+++ b/cpukit/score/include/rtems/score/schedulercbs.h
@@ -53,6 +53,7 @@ extern "C" {
     _Scheduler_EDF_Block,            /* block entry point */ \
     _Scheduler_CBS_Unblock,          /* unblock entry point */ \
     _Scheduler_EDF_Change_priority,  /* change priority entry point */ \
+    SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
     _Scheduler_CBS_Node_initialize,  /* node initialize entry point */ \
     _Scheduler_default_Node_destroy, /* node destroy entry point */ \
     _Scheduler_EDF_Update_priority,  /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/scheduleredf.h b/cpukit/score/include/rtems/score/scheduleredf.h
index 1dda767..e695691 100644
--- a/cpukit/score/include/rtems/score/scheduleredf.h
+++ b/cpukit/score/include/rtems/score/scheduleredf.h
@@ -46,6 +46,7 @@ extern "C" {
     _Scheduler_EDF_Block,            /* block entry point */ \
     _Scheduler_EDF_Unblock,          /* unblock entry point */ \
     _Scheduler_EDF_Change_priority,  /* change priority entry point */ \
+    SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
     _Scheduler_EDF_Node_initialize,  /* node initialize entry point */ \
     _Scheduler_default_Node_destroy, /* node destroy entry point */ \
     _Scheduler_EDF_Update_priority,  /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 5e4e509..face5aa 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -10,6 +10,7 @@
 /*
  *  Copyright (C) 2010 Gedare Bloom.
  *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
+ *  Copyright (c) 2014 embedded brains GmbH
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -42,6 +43,13 @@ extern "C" {
  */
 void _Scheduler_Handler_initialization( void );
 
+RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
+  const Scheduler_Control *scheduler
+)
+{
+  return scheduler->context;
+}
+
 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
   const Thread_Control *the_thread
 )
@@ -55,6 +63,19 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
 #endif
 }
 
+RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
+  const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+  return the_thread->Scheduler.own_control;
+#else
+  (void) the_thread;
+
+  return &_Scheduler_Table[ 0 ];
+#endif
+}
+
 RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
   uint32_t cpu_index
 )
@@ -78,6 +99,13 @@ RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
 }
 
 #if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
+  const Thread_Control *the_thread
+)
+{
+  return the_thread->Scheduler.own_node;
+}
+
 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
   const Scheduler_Node *node
 )
@@ -117,6 +145,39 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
 }
 
 #if defined(RTEMS_SMP)
+typedef struct {
+  Thread_Control *needs_help;
+  Thread_Control *next_needs_help;
+} Scheduler_Ask_for_help_context ;
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
+  Resource_Node *resource_node,
+  void          *arg
+)
+{
+  bool done;
+  Scheduler_Ask_for_help_context *help_context = arg;
+  Thread_Control *previous_needs_help = help_context->needs_help;
+  Thread_Control *next_needs_help;
+  Thread_Control *offers_help =
+    _Thread_Resource_node_to_thread( resource_node );
+  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
+
+  next_needs_help = ( *scheduler->Operations.ask_for_help )(
+    scheduler,
+    offers_help,
+    previous_needs_help
+  );
+
+  done = next_needs_help != previous_needs_help;
+
+  if ( done ) {
+    help_context->next_needs_help = next_needs_help;
+  }
+
+  return done;
+}
+
 /**
  * @brief Ask threads depending on resources owned by the thread for help.
  *
@@ -126,11 +187,50 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
  *
  * @param[in] needs_help The thread needing help.
  */
+RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
+  Thread_Control *needs_help
+)
+{
+  do {
+    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );
+
+    needs_help = ( *scheduler->Operations.ask_for_help )(
+      scheduler,
+      needs_help,
+      needs_help
+    );
+
+    if ( needs_help != NULL ) {
+      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };
+
+      _Resource_Iterate(
+        &needs_help->Resource_node,
+        _Scheduler_Ask_for_help_visitor,
+        &help_context
+      );
+
+      needs_help = help_context.next_needs_help;
+    }
+  } while ( needs_help != NULL );
+}
+
 RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
   Thread_Control *needs_help
 )
 {
-  (void) needs_help;
+  if (
+    needs_help != NULL
+      && _Resource_Node_owns_resources( &needs_help->Resource_node )
+  ) {
+    Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );
+
+    if (
+      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
+        || _Scheduler_Node_get_user( node ) != needs_help
+    ) {
+      _Scheduler_Ask_for_help( needs_help );
+    }
+  }
 }
 #endif
 
@@ -218,7 +318,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
   bool                     prepend_it
 )
 {
-  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+  const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
 #if defined(RTEMS_SMP)
   Thread_Control *needs_help;
 
@@ -426,6 +526,7 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Set(
   if ( current_scheduler != scheduler ) {
     _Thread_Set_state( the_thread, STATES_MIGRATING );
     _Scheduler_Node_destroy( current_scheduler, the_thread );
+    the_thread->Scheduler.own_control = scheduler;
     the_thread->Scheduler.control = scheduler;
     _Scheduler_Node_initialize( scheduler, the_thread );
     _Scheduler_Update_priority( the_thread, the_thread->current_priority );
@@ -628,13 +729,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority_if_higher(
   }
 }
 
-RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
-  const Scheduler_Control *scheduler
-)
-{
-  return scheduler->context;
-}
-
 RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
   const Scheduler_Control *scheduler
 )
@@ -721,6 +815,29 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
 }
 
 #if defined(RTEMS_SMP)
+/**
+ * @brief Gets an idle thread from the scheduler instance.
+ *
+ * @param[in] context The scheduler instance context.
+ *
+ * @retval idle An idle thread for use.  This function must always return an
+ * idle thread.  If none is available, then this is a fatal error.
+ */
+typedef Thread_Control *( *Scheduler_Get_idle_thread )(
+  Scheduler_Context *context
+);
+
+/**
+ * @brief Releases an idle thread to the scheduler instance for reuse.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] idle The idle thread to release
+ */
+typedef void ( *Scheduler_Release_idle_thread )(
+  Scheduler_Context *context,
+  Thread_Control    *idle
+);
+
 RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
   const Scheduler_Node *node
 )
@@ -735,6 +852,50 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
   return node->idle;
 }
 
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
+  Scheduler_Node *node,
+  Thread_Control *user
+)
+{
+  node->user = user;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
+  Thread_Control *the_thread,
+  Scheduler_Node *node
+)
+{
+  the_thread->Scheduler.node = node;
+}
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
+  Thread_Control       *the_thread,
+  Scheduler_Node       *node,
+  const Thread_Control *previous_user_of_node
+)
+{
+  const Scheduler_Control *scheduler =
+    _Scheduler_Get_own( previous_user_of_node );
+
+  the_thread->Scheduler.control = scheduler;
+  _Scheduler_Thread_set_node( the_thread, node );
+}
+
+extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];
+
+RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
+  Thread_Control         *the_thread,
+  Thread_Scheduler_state  new_state
+)
+{
+  _Assert(
+    _Scheduler_Thread_state_valid_state_changes
+      [ the_thread->Scheduler.state ][ new_state ]
+  );
+
+  the_thread->Scheduler.state = new_state;
+}
+
 /**
  * @brief Changes the scheduler help state of a thread.
  *
@@ -748,13 +909,410 @@ RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
   Scheduler_Help_state  new_help_state
 )
 {
-  Scheduler_Node *node = _Scheduler_Thread_get_node( the_thread );
+  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
   Scheduler_Help_state previous_help_state = node->help_state;
 
   node->help_state = new_help_state;
 
   return previous_help_state;
 }
+
+/**
+ * @brief Changes the resource tree root of a thread.
+ *
+ * For each node of the resource sub-tree specified by the top thread the
+ * scheduler asks for help.  So the root thread gains access to all scheduler
+ * nodes corresponding to the resource sub-tree.  In case a thread previously
+ * granted help is displaced by this operation, then the scheduler asks for
+ * help using its remaining resource tree.
+ *
+ * @param[in] top The thread specifying the resource sub-tree top.
+ * @param[in] root The thread specifying the new resource sub-tree root.
+ */
+void _Scheduler_Thread_change_resource_root(
+  Thread_Control *top,
+  Thread_Control *root
+);
+
+/**
+ * @brief Use an idle thread for this scheduler node.
+ *
+ * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle
+ * thread for the scheduler node owned by itself in case it executes currently
+ * using another scheduler node or in case it is in a blocking state.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to use the idle thread.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
+  Scheduler_Context         *context,
+  Scheduler_Node            *node,
+  Scheduler_Get_idle_thread  get_idle_thread
+)
+{
+  Thread_Control *idle = ( *get_idle_thread )( context );
+
+  _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER );
+  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+  _Assert(
+    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
+  );
+
+  _Scheduler_Thread_set_node( idle, node );
+
+  _Scheduler_Node_set_user( node, idle );
+  node->idle = idle;
+
+  return idle;
+}
+
+/**
+ * @brief Try to schedule this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to get scheduled.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval true This node can be scheduled.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
+  Scheduler_Context         *context,
+  Scheduler_Node            *node,
+  Scheduler_Get_idle_thread  get_idle_thread
+)
+{
+  bool schedule;
+  Thread_Control *owner;
+  Thread_Control *user;
+
+  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
+    return true;
+  }
+
+  owner = _Scheduler_Node_get_owner( node );
+  user = _Scheduler_Node_get_user( node );
+
+  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
+    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+    } else {
+      _Scheduler_Node_set_user( node, owner );
+    }
+
+    schedule = true;
+  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+    } else {
+      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+    }
+
+    schedule = true;
+  } else {
+    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
+
+    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
+      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+      schedule = true;
+    } else {
+      schedule = false;
+    }
+  }
+
+  return schedule;
+}
+
+/**
+ * @brief Release an idle thread using this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which may have an idle thread as user.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval idle The idle thread which used this node.
+ * @retval NULL This node had no idle thread as an user.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
+  Scheduler_Context             *context,
+  Scheduler_Node                *node,
+  Scheduler_Release_idle_thread  release_idle_thread
+)
+{
+  Thread_Control *idle = _Scheduler_Node_get_idle( node );
+
+  if ( idle != NULL ) {
+    Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+    node->idle = NULL;
+    _Scheduler_Node_set_user( node, owner );
+    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
+    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );
+
+    ( *release_idle_thread )( context, idle );
+  }
+
+  return idle;
+}
+
+/**
+ * @brief Block this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The node which wants to get blocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ *
+ * @retval true Continue with the blocking operation.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
+  Scheduler_Context         *context,
+  Scheduler_Node            *node,
+  bool                       is_scheduled,
+  Scheduler_Get_idle_thread  get_idle_thread
+)
+{
+  bool block;
+  Thread_Control *old_user = _Scheduler_Node_get_user( node );
+  Thread_Control *new_user;
+
+  _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
+
+  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
+    new_user = _Scheduler_Node_get_owner( node );
+
+    _Assert( new_user != old_user );
+    _Scheduler_Node_set_user( node, new_user );
+  } else if (
+    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
+      && is_scheduled
+  ) {
+    new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+  } else {
+    new_user = NULL;
+  }
+
+  if ( new_user != NULL && is_scheduled ) {
+    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
+    _Thread_Set_CPU( new_user, cpu );
+    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );
+
+    block = false;
+  } else {
+    block = true;
+  }
+
+  return block;
+}
+
+/**
+ * @brief Unblock this scheduler node.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] the_thread The thread which wants to get unblocked.
+ * @param[in] node The node which wants to get unblocked.
+ * @param[in] is_scheduled This node is scheduled.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval true Continue with the unblocking operation.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
+  Scheduler_Context             *context,
+  Thread_Control                *the_thread,
+  Scheduler_Node                *node,
+  bool                           is_scheduled,
+  Scheduler_Release_idle_thread  release_idle_thread
+)
+{
+  bool unblock;
+
+  if ( is_scheduled ) {
+    Thread_Control *old_user = _Scheduler_Node_get_user( node );
+    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+      Thread_Control *idle = _Scheduler_Release_idle_thread(
+        context,
+        node,
+        release_idle_thread
+      );
+
+      _Assert( idle != NULL );
+      (void) idle;
+    } else {
+      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+
+      _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
+      _Scheduler_Node_set_user( node, the_thread );
+    }
+
+    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
+    _Thread_Set_CPU( the_thread, cpu );
+    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
+
+    unblock = false;
+  } else {
+    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
+
+    unblock = true;
+  }
+
+  return unblock;
+}
+
+/**
+ * @brief Asks a ready scheduler node for help.
+ *
+ * @param[in] node The ready node offering help.
+ * @param[in] needs_help The thread needing help.
+ *
+ * @retval needs_help The thread needing help.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
+  Scheduler_Node *node,
+  Thread_Control *needs_help
+)
+{
+  _Scheduler_Node_set_user( node, needs_help );
+
+  return needs_help;
+}
+
+/**
+ * @brief Asks a scheduled scheduler node for help.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The scheduled node offering help.
+ * @param[in] offers_help The thread offering help.
+ * @param[in] needs_help The thread needing help.
+ * @param[in] previous_accepts_help The previous thread accepting help by this
+ *   scheduler node.
+ * @param[in] release_idle_thread Function to release an idle thread.
+ *
+ * @retval needs_help The previous thread accepting help by this scheduler node
+ *   which was displaced by the thread needing help.
+ * @retval NULL There are no more threads needing help.
+ */
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
+  Scheduler_Context             *context,
+  Scheduler_Node                *node,
+  Thread_Control                *offers_help,
+  Thread_Control                *needs_help,
+  Thread_Control                *previous_accepts_help,
+  Scheduler_Release_idle_thread  release_idle_thread
+)
+{
+  Thread_Control *next_needs_help = NULL;
+  Thread_Control *old_user = NULL;
+  Thread_Control *new_user = NULL;
+
+  if (
+    previous_accepts_help != needs_help
+      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
+  ) {
+    Thread_Control *idle = _Scheduler_Release_idle_thread(
+      context,
+      node,
+      release_idle_thread
+    );
+
+    if ( idle != NULL ) {
+      old_user = idle;
+    } else {
+      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
+      old_user = previous_accepts_help;
+    }
+
+    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+      new_user = needs_help;
+    } else {
+      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );
+
+      new_user = offers_help;
+    }
+
+    if ( previous_accepts_help != offers_help ) {
+      next_needs_help = previous_accepts_help;
+    }
+  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+    Thread_Control *idle = _Scheduler_Release_idle_thread(
+      context,
+      node,
+      release_idle_thread
+    );
+
+    if ( idle != NULL ) {
+      old_user = idle;
+    } else {
+      old_user = _Scheduler_Node_get_user( node );
+    }
+
+    new_user = needs_help;
+  } else {
+    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
+  }
+
+  if ( new_user != old_user ) {
+    Per_CPU_Control *cpu_self = _Per_CPU_Get();
+    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
+
+    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
+    _Scheduler_Thread_set_scheduler_and_node(
+      old_user,
+      _Scheduler_Thread_get_own_node( old_user ),
+      old_user
+    );
+
+    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
+    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );
+
+    _Scheduler_Node_set_user( node, new_user );
+    _Thread_Set_CPU( new_user, cpu );
+    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
+  }
+
+  return next_needs_help;
+}
+
+/**
+ * @brief Asks a blocked scheduler node for help.
+ *
+ * @param[in] context The scheduler instance context.
+ * @param[in] node The scheduled node offering help.
+ * @param[in] offers_help The thread offering help.
+ * @param[in] needs_help The thread needing help.
+ *
+ * @retval true Enqueue this scheduler node.
+ * @retval false Otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
+  Scheduler_Context *context,
+  Scheduler_Node    *node,
+  Thread_Control    *offers_help,
+  Thread_Control    *needs_help
+)
+{
+  bool enqueue;
+
+  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
+
+  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
+    _Scheduler_Node_set_user( node, needs_help );
+    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );
+
+    enqueue = true;
+  } else {
+    enqueue = false;
+  }
+
+  return enqueue;
+}
 #endif
 
 /** @} */
diff --git a/cpukit/score/include/rtems/score/schedulerpriority.h b/cpukit/score/include/rtems/score/schedulerpriority.h
index 805e302..de051a8 100644
--- a/cpukit/score/include/rtems/score/schedulerpriority.h
+++ b/cpukit/score/include/rtems/score/schedulerpriority.h
@@ -45,6 +45,7 @@ extern "C" {
     _Scheduler_priority_Block,            /* block entry point */ \
     _Scheduler_priority_Unblock,          /* unblock entry point */ \
     _Scheduler_priority_Change_priority,  /* change priority entry point */ \
+    SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
     _Scheduler_default_Node_initialize,   /* node initialize entry point */ \
     _Scheduler_default_Node_destroy,      /* node destroy entry point */ \
     _Scheduler_priority_Update_priority,  /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
index 3a23510..91ffcd2 100644
--- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -55,6 +55,7 @@ extern "C" {
     _Scheduler_priority_affinity_SMP_Block, \
     _Scheduler_priority_affinity_SMP_Unblock, \
     _Scheduler_priority_affinity_SMP_Change_priority, \
+    _Scheduler_priority_affinity_SMP_Ask_for_help, \
     _Scheduler_priority_affinity_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
     _Scheduler_priority_SMP_Update_priority, \
@@ -139,6 +140,12 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
   bool                     prepend_it
 );
 
+Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+);
+
 /** 
  * @brief Set affinity for the priority affinity SMP scheduler.
  *
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmp.h b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
index a1a1481..d8ce7dc 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
@@ -84,6 +84,7 @@ typedef struct {
     _Scheduler_priority_SMP_Block, \
     _Scheduler_priority_SMP_Unblock, \
     _Scheduler_priority_SMP_Change_priority, \
+    _Scheduler_priority_SMP_Ask_for_help, \
     _Scheduler_priority_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
     _Scheduler_priority_SMP_Update_priority, \
@@ -118,6 +119,12 @@ Thread_Control *_Scheduler_priority_SMP_Change_priority(
   bool                     prepend_it
 );
 
+Thread_Control *_Scheduler_priority_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *needs_help,
+  Thread_Control          *offers_help
+);
+
 void _Scheduler_priority_SMP_Update_priority(
   const Scheduler_Control *scheduler,
   Thread_Control *thread,
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
index 9ae0103..bb200b4 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
@@ -148,6 +148,28 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
   );
 }
 
+static inline Thread_Control *_Scheduler_priority_SMP_Get_idle_thread(
+  Scheduler_Context *context
+)
+{
+  return _Scheduler_SMP_Get_idle_thread(
+    context,
+    _Scheduler_priority_SMP_Extract_from_ready
+  );
+}
+
+static void _Scheduler_priority_SMP_Release_idle_thread(
+  Scheduler_Context *context,
+  Thread_Control    *idle
+)
+{
+  _Scheduler_SMP_Release_idle_thread(
+    context,
+    idle,
+    _Scheduler_priority_SMP_Insert_ready_fifo
+  );
+}
+
 static inline void _Scheduler_priority_SMP_Do_update(
   Scheduler_Context *context,
   Scheduler_Node *node_to_update,
diff --git a/cpukit/score/include/rtems/score/schedulersimple.h b/cpukit/score/include/rtems/score/schedulersimple.h
index c97ad2f..82b8c3d 100644
--- a/cpukit/score/include/rtems/score/schedulersimple.h
+++ b/cpukit/score/include/rtems/score/schedulersimple.h
@@ -43,6 +43,7 @@ extern "C" {
     _Scheduler_simple_Block,              /* block entry point */ \
     _Scheduler_simple_Unblock,            /* unblock entry point */ \
     _Scheduler_simple_Change_priority,    /* change priority entry point */ \
+    SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
     _Scheduler_default_Node_initialize,   /* node initialize entry point */ \
     _Scheduler_default_Node_destroy,      /* node destroy entry point */ \
     _Scheduler_default_Update_priority,   /* update priority entry point */ \
diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
index de338ab..11310c6 100644
--- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
@@ -65,6 +65,7 @@ typedef struct {
     _Scheduler_simple_SMP_Block, \
     _Scheduler_simple_SMP_Unblock, \
     _Scheduler_simple_SMP_Change_priority, \
+    _Scheduler_simple_SMP_Ask_for_help, \
     _Scheduler_simple_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
     _Scheduler_simple_SMP_Update_priority, \
@@ -99,6 +100,12 @@ Thread_Control *_Scheduler_simple_SMP_Change_priority(
   bool                     prepend_it
 );
 
+Thread_Control *_Scheduler_simple_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+);
+
 void _Scheduler_simple_SMP_Update_priority(
   const Scheduler_Control *scheduler,
   Thread_Control          *thread,
diff --git a/cpukit/score/include/rtems/score/schedulersmp.h b/cpukit/score/include/rtems/score/schedulersmp.h
index 0c51a14..a58417a 100644
--- a/cpukit/score/include/rtems/score/schedulersmp.h
+++ b/cpukit/score/include/rtems/score/schedulersmp.h
@@ -51,6 +51,11 @@ typedef struct {
    * @brief The chain of scheduled nodes.
    */
   Chain_Control Scheduled;
+
+  /**
+   * @brief Chain of the available idle threads.
+   */
+  Chain_Control Idle_threads;
 } Scheduler_SMP_Context;
 
 /**
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 55d0697..3cf7861 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -361,6 +361,7 @@ static inline void _Scheduler_SMP_Initialize(
 )
 {
   _Chain_Initialize_empty( &self->Scheduled );
+  _Chain_Initialize_empty( &self->Idle_threads );
 }
 
 static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
@@ -370,6 +371,13 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
   return (Scheduler_SMP_Node *) _Scheduler_Thread_get_node( thread );
 }
 
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
+  Thread_Control *thread
+)
+{
+  return (Scheduler_SMP_Node *) _Scheduler_Thread_get_own_node( thread );
+}
+
 static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
   Scheduler_Node *node
 )
@@ -416,6 +424,36 @@ static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
   return cpu->scheduler_context == context;
 }
 
+static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
+  Scheduler_Context     *context,
+  Scheduler_SMP_Extract  extract_from_ready
+)
+{
+  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+  Thread_Control *idle = (Thread_Control *)
+    _Chain_Get_first_unprotected( &self->Idle_threads );
+  Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
+
+  ( *extract_from_ready )( &self->Base, own_node );
+
+  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
+
+  return idle;
+}
+
+static inline void _Scheduler_SMP_Release_idle_thread(
+  Scheduler_Context    *context,
+  Thread_Control       *idle,
+  Scheduler_SMP_Insert  insert_ready
+)
+{
+  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+  Scheduler_Node *own_node = _Scheduler_Thread_get_own_node( idle );
+
+  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
+  ( *insert_ready )( context, own_node );
+}
+
 static inline void _Scheduler_SMP_Allocate_processor_lazy(
   Scheduler_Context *context,
   Thread_Control    *scheduled_thread,
@@ -468,6 +506,7 @@ static inline void _Scheduler_SMP_Allocate_processor(
     _Scheduler_SMP_Node_downcast( scheduled ),
     SCHEDULER_SMP_NODE_SCHEDULED
   );
+  _Scheduler_Thread_change_state( scheduled_thread, THREAD_SCHEDULER_SCHEDULED );
 
   ( *allocate_processor )( context, scheduled_thread, victim_thread );
 }
@@ -491,6 +530,57 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
   return lowest_scheduled;
 }
 
+static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
+  Scheduler_Context                *context,
+  Scheduler_Node                   *node,
+  Scheduler_Node                   *lowest_scheduled,
+  Scheduler_SMP_Insert              insert_scheduled,
+  Scheduler_SMP_Move                move_from_scheduled_to_ready,
+  Scheduler_SMP_Allocate_processor  allocate_processor,
+  Scheduler_Release_idle_thread     release_idle_thread
+)
+{
+  Thread_Control *user = _Scheduler_Node_get_user( node );
+  Thread_Control *lowest_scheduled_user =
+    _Scheduler_Node_get_user( lowest_scheduled );
+  Thread_Control *needs_help;
+  Thread_Control *idle;
+
+  _Scheduler_SMP_Node_change_state(
+    _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+    SCHEDULER_SMP_NODE_READY
+  );
+  _Scheduler_Thread_change_state(
+    lowest_scheduled_user,
+    THREAD_SCHEDULER_READY
+  );
+
+  _Scheduler_Thread_set_node( user, node );
+
+  _Scheduler_SMP_Allocate_processor(
+    context,
+    node,
+    lowest_scheduled,
+    allocate_processor
+  );
+
+  ( *insert_scheduled )( context, node );
+  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+  idle = _Scheduler_Release_idle_thread(
+    context,
+    lowest_scheduled,
+    release_idle_thread
+  );
+  if ( idle == NULL ) {
+    needs_help = lowest_scheduled_user;
+  } else {
+    needs_help = NULL;
+  }
+
+  return needs_help;
+}
+
 /**
  * @brief Enqueues a node according to the specified order function.
  *
@@ -513,6 +603,7 @@ static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
  *   if this pointer is passed as the second argument to the order function.
  * @param[in] allocate_processor Function to allocate a processor to a node
  *   based on the rules of the scheduler.
+ * @param[in] release_idle_thread Function to release an idle thread.
  */
 static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
   Scheduler_Context                  *context,
@@ -523,29 +614,23 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
   Scheduler_SMP_Insert                insert_scheduled,
   Scheduler_SMP_Move                  move_from_scheduled_to_ready,
   Scheduler_SMP_Get_lowest_scheduled  get_lowest_scheduled,
-  Scheduler_SMP_Allocate_processor    allocate_processor
+  Scheduler_SMP_Allocate_processor    allocate_processor,
+  Scheduler_Release_idle_thread       release_idle_thread
 )
 {
   Scheduler_Node *lowest_scheduled =
     ( *get_lowest_scheduled )( context, node, order );
 
   if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
-    _Scheduler_SMP_Node_change_state(
-      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
-      SCHEDULER_SMP_NODE_READY
-    );
-
-    _Scheduler_SMP_Allocate_processor(
+    needs_help = _Scheduler_SMP_Enqueue_to_scheduled(
       context,
       node,
       lowest_scheduled,
-      allocate_processor
+      insert_scheduled,
+      move_from_scheduled_to_ready,
+      allocate_processor,
+      release_idle_thread
     );
-
-    ( *insert_scheduled )( context, node );
-    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
-
-    needs_help = _Scheduler_Node_get_user( lowest_scheduled );
   } else {
     ( *insert_ready )( context, node );
   }
@@ -560,6 +645,8 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
  * @param[in] context The scheduler instance context.
  * @param[in] node The node to enqueue.
  * @param[in] order The order function.
+ * @param[in] extract_from_ready Function to extract a node from the set of
+ *   ready nodes.
  * @param[in] get_highest_ready Function to get the highest ready node.
  * @param[in] insert_ready Function to insert a node into the set of ready
  *   nodes.
@@ -569,48 +656,86 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_ordered(
  *   of ready nodes to the set of scheduled nodes.
  * @param[in] allocate_processor Function to allocate a processor to a node
  *   based on the rules of the scheduler.
+ * @param[in] get_idle_thread Function to get an idle thread.
+ * @param[in] release_idle_thread Function to release an idle thread.
  */
 static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
   Scheduler_Context                *context,
   Scheduler_Node                   *node,
   Chain_Node_order                  order,
+  Scheduler_SMP_Extract             extract_from_ready,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Insert              insert_ready,
   Scheduler_SMP_Insert              insert_scheduled,
   Scheduler_SMP_Move                move_from_ready_to_scheduled,
-  Scheduler_SMP_Allocate_processor  allocate_processor
+  Scheduler_SMP_Allocate_processor  allocate_processor,
+  Scheduler_Get_idle_thread         get_idle_thread,
+  Scheduler_Release_idle_thread     release_idle_thread
 )
 {
-  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
   Thread_Control *needs_help;
 
-  _Assert( highest_ready != NULL );
+  while ( true ) {
+    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
 
-  /*
-   * The node has been extracted from the scheduled chain.  We have to place
-   * it now on the scheduled or ready set.
-   */
-  if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
-    ( *insert_scheduled )( context, node );
+    _Assert( highest_ready != NULL );
 
-    needs_help = NULL;
-  } else {
-    _Scheduler_SMP_Node_change_state(
-      _Scheduler_SMP_Node_downcast( node ),
-      SCHEDULER_SMP_NODE_READY
-    );
+    /*
+     * The node has been extracted from the scheduled chain.  We have to place
+     * it now on the scheduled or ready set.
+     */
+    if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
+      ( *insert_scheduled )( context, node );
 
-    _Scheduler_SMP_Allocate_processor(
-      context,
-      highest_ready,
-      node,
-      allocate_processor
-    );
+      needs_help = NULL;
 
-    ( *insert_ready )( context, node );
-    ( *move_from_ready_to_scheduled )( context, highest_ready );
+      break;
+    } else if (
+      _Scheduler_Try_to_schedule_node(
+        context,
+        highest_ready,
+        get_idle_thread
+      )
+    ) {
+      Thread_Control *user = _Scheduler_Node_get_user( node );
+      Thread_Control *idle;
+
+      _Scheduler_SMP_Node_change_state(
+        _Scheduler_SMP_Node_downcast( node ),
+        SCHEDULER_SMP_NODE_READY
+      );
+      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
 
-    needs_help = _Scheduler_Node_get_user( node );
+      _Scheduler_SMP_Allocate_processor(
+        context,
+        highest_ready,
+        node,
+        allocate_processor
+      );
+
+      ( *insert_ready )( context, node );
+      ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+      idle = _Scheduler_Release_idle_thread(
+        context,
+        node,
+        release_idle_thread
+      );
+      if ( idle == NULL ) {
+        needs_help = user;
+      } else {
+        needs_help = NULL;
+      }
+
+      break;
+    } else {
+      _Scheduler_SMP_Node_change_state(
+        _Scheduler_SMP_Node_downcast( highest_ready ),
+        SCHEDULER_SMP_NODE_BLOCKED
+      );
+
+      ( *extract_from_ready )( context, highest_ready );
+    }
   }
 
   return needs_help;
@@ -626,21 +751,44 @@ static inline void _Scheduler_SMP_Extract_from_scheduled(
 static inline void _Scheduler_SMP_Schedule_highest_ready(
   Scheduler_Context                *context,
   Scheduler_Node                   *victim,
+  Scheduler_SMP_Extract             extract_from_ready,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Move                move_from_ready_to_scheduled,
-  Scheduler_SMP_Allocate_processor  allocate_processor
+  Scheduler_SMP_Allocate_processor  allocate_processor,
+  Scheduler_Get_idle_thread         get_idle_thread
 )
 {
-  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+  while ( true ) {
+    Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
 
-  _Scheduler_SMP_Allocate_processor(
-    context,
-    highest_ready,
-    victim,
-    allocate_processor
-  );
+    _Assert( highest_ready != NULL );
+
+    if (
+      _Scheduler_Try_to_schedule_node(
+        context,
+        highest_ready,
+        get_idle_thread
+      )
+    ) {
+      _Scheduler_SMP_Allocate_processor(
+        context,
+        highest_ready,
+        victim,
+        allocate_processor
+      );
+
+      ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+      break;
+    } else {
+      _Scheduler_SMP_Node_change_state(
+        _Scheduler_SMP_Node_downcast( highest_ready ),
+        SCHEDULER_SMP_NODE_BLOCKED
+      );
 
-  ( *move_from_ready_to_scheduled )( context, highest_ready );
+      ( *extract_from_ready )( context, highest_ready );
+    }
+  }
 }
 
 /**
@@ -649,10 +797,11 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
  * @param[in] context The scheduler instance context.
  * @param[in] thread The thread of the scheduling operation.
  * @param[in] extract_from_ready Function to extract a node from the set of
- * ready nodes.
+ *   ready nodes.
  * @param[in] get_highest_ready Function to get the highest ready node.
  * @param[in] move_from_ready_to_scheduled Function to move a node from the set
- * of ready nodes to the set of scheduled nodes.
+ *   of ready nodes to the set of scheduled nodes.
+ * @param[in] get_idle_thread Function to get an idle thread.
  */
 static inline void _Scheduler_SMP_Block(
   Scheduler_Context                *context,
@@ -660,40 +809,67 @@ static inline void _Scheduler_SMP_Block(
   Scheduler_SMP_Extract             extract_from_ready,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Move                move_from_ready_to_scheduled,
-  Scheduler_SMP_Allocate_processor  allocate_processor
+  Scheduler_SMP_Allocate_processor  allocate_processor,
+  Scheduler_Get_idle_thread         get_idle_thread
 )
 {
   Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
   bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
+  bool block = _Scheduler_Block_node(
+    context,
+    &node->Base,
+    is_scheduled,
+    get_idle_thread
+  );
 
-  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
+  if ( block ) {
+    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
 
-  if ( is_scheduled ) {
-    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
+    if ( is_scheduled ) {
+      _Scheduler_SMP_Extract_from_scheduled( &node->Base );
 
-    _Scheduler_SMP_Schedule_highest_ready(
-      context,
-      &node->Base,
-      get_highest_ready,
-      move_from_ready_to_scheduled,
-      allocate_processor
-    );
-  } else {
-    ( *extract_from_ready )( context, &node->Base );
+      _Scheduler_SMP_Schedule_highest_ready(
+        context,
+        &node->Base,
+        extract_from_ready,
+        get_highest_ready,
+        move_from_ready_to_scheduled,
+        allocate_processor,
+        get_idle_thread
+      );
+    } else {
+      ( *extract_from_ready )( context, &node->Base );
+    }
   }
 }
 
 static inline Thread_Control *_Scheduler_SMP_Unblock(
-  Scheduler_Context     *context,
-  Thread_Control        *thread,
-  Scheduler_SMP_Enqueue  enqueue_fifo
+  Scheduler_Context             *context,
+  Thread_Control                *thread,
+  Scheduler_SMP_Enqueue          enqueue_fifo,
+  Scheduler_Release_idle_thread  release_idle_thread
 )
 {
   Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
+  bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
+  bool unblock = _Scheduler_Unblock_node(
+    context,
+    thread,
+    &node->Base,
+    is_scheduled,
+    release_idle_thread
+  );
+  Thread_Control *needs_help;
 
-  _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+  if ( unblock ) {
+    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
 
-  return ( *enqueue_fifo )( context, &node->Base, thread );
+    needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+  } else {
+    needs_help = NULL;
+  }
+
+  return needs_help;
 }
 
 static inline Thread_Control *_Scheduler_SMP_Change_priority(
@@ -709,7 +885,7 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
   Scheduler_SMP_Enqueue_scheduled  enqueue_scheduled_lifo
 )
 {
-  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
+  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( thread );
   Thread_Control *needs_help;
 
   if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
@@ -722,7 +898,7 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
     } else {
       needs_help = ( *enqueue_scheduled_fifo )( context, &node->Base );
     }
-  } else {
+  } else if ( node->state == SCHEDULER_SMP_NODE_READY ) {
     ( *extract_from_ready )( context, &node->Base );
 
     ( *update )( context, &node->Base, new_priority );
@@ -732,11 +908,68 @@ static inline Thread_Control *_Scheduler_SMP_Change_priority(
     } else {
       needs_help = ( *enqueue_fifo )( context, &node->Base, NULL );
     }
+  } else {
+    ( *update )( context, &node->Base, new_priority );
+
+    needs_help = NULL;
   }
 
   return needs_help;
 }
 
+static inline Thread_Control *_Scheduler_SMP_Ask_for_help(
+  Scheduler_Context                  *context,
+  Thread_Control                     *offers_help,
+  Thread_Control                     *needs_help,
+  Scheduler_SMP_Enqueue               enqueue_fifo,
+  Scheduler_Release_idle_thread       release_idle_thread
+)
+{
+  Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_own_node( offers_help );
+  Thread_Control *next_needs_help = NULL;
+  Thread_Control *previous_accepts_help;
+
+  previous_accepts_help = node->Base.accepts_help;
+  node->Base.accepts_help = needs_help;
+
+  switch ( node->state ) {
+    case SCHEDULER_SMP_NODE_READY:
+      next_needs_help =
+        _Scheduler_Ask_ready_node_for_help( &node->Base, needs_help );
+      break;
+    case SCHEDULER_SMP_NODE_SCHEDULED:
+      next_needs_help = _Scheduler_Ask_scheduled_node_for_help(
+        context,
+        &node->Base,
+        offers_help,
+        needs_help,
+        previous_accepts_help,
+        release_idle_thread
+      );
+      break;
+    case SCHEDULER_SMP_NODE_BLOCKED:
+      if (
+        _Scheduler_Ask_blocked_node_for_help(
+          context,
+          &node->Base,
+          offers_help,
+          needs_help
+        )
+      ) {
+        _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+
+        next_needs_help = ( *enqueue_fifo )(
+          context,
+          &node->Base,
+          needs_help
+        );
+      }
+      break;
+  }
+
+  return next_needs_help;
+}
+
 static inline Thread_Control *_Scheduler_SMP_Yield(
   Scheduler_Context               *context,
   Thread_Control                  *thread,
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index a9a3f9f..4d758fb 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -461,19 +461,72 @@ typedef struct {
   Thread_Control    *terminator;
 } Thread_Life_control;
 
+#if defined(RTEMS_SMP)
+/**
+ * @brief The thread state with respect to the scheduler.
+ */
+typedef enum {
+  /**
+   * @brief This thread is blocked with respect to the scheduler.
+   *
+   * This thread uses no scheduler nodes.
+   */
+  THREAD_SCHEDULER_BLOCKED,
+
+  /**
+   * @brief This thread is scheduled with respect to the scheduler.
+   *
+   * This thread executes using one of its scheduler nodes.  This could be its
+   * own scheduler node or in case it owns resources taking part in the
+   * scheduler helping protocol a scheduler node of another thread.
+   */
+  THREAD_SCHEDULER_SCHEDULED,
+
+  /**
+   * @brief This thread is ready with respect to the scheduler.
+   *
+   * None of the scheduler nodes of this thread is scheduled.
+   */
+  THREAD_SCHEDULER_READY
+} Thread_Scheduler_state;
+#endif
+
 /**
  * @brief Thread scheduler control.
  */
 typedef struct {
 #if defined(RTEMS_SMP)
   /**
+   * @brief The current scheduler state of this thread.
+   */
+  Thread_Scheduler_state state;
+
+  /**
+   * @brief The own scheduler control of this thread.
+   *
+   * This field is constant after initialization.
+   */
+  const struct Scheduler_Control *own_control;
+
+  /**
    * @brief The current scheduler control of this thread.
+   *
+   * The scheduler helping protocol may change this field.
    */
   const struct Scheduler_Control *control;
+
+  /**
+   * @brief The own scheduler node of this thread.
+   *
+   * This field is constant after initialization.
+   */
+  struct Scheduler_Node *own_node;
 #endif
 
   /**
    * @brief The current scheduler node of this thread.
+   *
+   * The scheduler helping protocol may change this field.
    */
   struct Scheduler_Node *node;
 
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 4971e9d..cb7d5fe 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -828,6 +828,16 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
   return owns_resources;
 }
 
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Resource_node_to_thread(
+  Resource_Node *node
+)
+{
+  return (Thread_Control *)
+    ( (char *) node - offsetof( Thread_Control, Resource_node ) );
+}
+#endif
+
 RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
   Thread_Control  *the_thread,
   Per_CPU_Control *cpu
diff --git a/cpukit/score/src/schedulerchangeroot.c b/cpukit/score/src/schedulerchangeroot.c
new file mode 100644
index 0000000..bdb7b30
--- /dev/null
+++ b/cpukit/score/src/schedulerchangeroot.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Dornierstr. 4
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems/score/schedulerimpl.h>
+
+typedef struct {
+  Thread_Control *root;
+  Thread_Control *needs_help;
+} Scheduler_Set_root_context;
+
+RTEMS_INLINE_ROUTINE bool _Scheduler_Set_root_visitor(
+  Resource_Node *resource_node,
+  void          *arg
+)
+{
+  Scheduler_Set_root_context *ctx = arg;
+  Thread_Control *root = ctx->root;
+  Thread_Control *needs_help = root;
+  Thread_Control *offers_help =
+    _Thread_Resource_node_to_thread( resource_node );
+  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );
+  Thread_Control *needs_also_help;
+
+  _Resource_Node_set_root( resource_node, &root->Resource_node );
+
+  needs_also_help = ( *scheduler->Operations.ask_for_help )(
+    scheduler,
+    offers_help,
+    needs_help
+  );
+
+  if ( needs_also_help != needs_help && needs_also_help != NULL ) {
+    _Assert( ctx->needs_help == NULL );
+    ctx->needs_help = needs_also_help;
+  }
+
+  return false;
+}
+
+void _Scheduler_Thread_change_resource_root(
+  Thread_Control *top,
+  Thread_Control *root
+)
+{
+  Scheduler_Set_root_context ctx = { root, NULL };
+  Thread_Control *offers_help = top;
+  Scheduler_Node *offers_help_node;
+  Thread_Control *offers_also_help;
+  ISR_Level level;
+
+  _ISR_Disable( level );
+
+  offers_help_node = _Scheduler_Thread_get_node( offers_help );
+  offers_also_help = _Scheduler_Node_get_owner( offers_help_node );
+
+  if ( offers_help != offers_also_help ) {
+    _Scheduler_Set_root_visitor( &offers_also_help->Resource_node, &ctx );
+    _Assert( ctx.needs_help == offers_help );
+    ctx.needs_help = NULL;
+  }
+
+  _Scheduler_Set_root_visitor( &top->Resource_node, &ctx );
+  _Resource_Iterate( &top->Resource_node, _Scheduler_Set_root_visitor, &ctx );
+
+  if ( ctx.needs_help != NULL ) {
+    _Scheduler_Ask_for_help( ctx.needs_help );
+  }
+
+  _ISR_Enable( level );
+}
diff --git a/cpukit/score/src/schedulerdefaultaskforhelp.c b/cpukit/score/src/schedulerdefaultaskforhelp.c
new file mode 100644
index 0000000..b695248
--- /dev/null
+++ b/cpukit/score/src/schedulerdefaultaskforhelp.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems/score/scheduler.h>
+
+Thread_Control *_Scheduler_default_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+)
+{
+  (void) scheduler;
+  (void) offers_help;
+  (void) needs_help;
+
+  return NULL;
+}
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 14a022e..5c80213 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -221,7 +221,8 @@ void _Scheduler_priority_affinity_SMP_Block(
     _Scheduler_priority_SMP_Extract_from_ready,
     _Scheduler_priority_affinity_SMP_Get_highest_ready,
     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_SMP_Allocate_processor_exact,
+    _Scheduler_priority_SMP_Get_idle_thread
   );
 
   /*
@@ -303,7 +304,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_fifo(
     _Scheduler_SMP_Insert_scheduled_fifo,
     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_SMP_Allocate_processor_exact,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -387,7 +389,8 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Unblock(
   needs_help = _Scheduler_SMP_Unblock(
     context,
     thread,
-    _Scheduler_priority_affinity_SMP_Enqueue_fifo
+    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 
   /*
@@ -420,7 +423,8 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Enqueue_ordered(
     insert_scheduled,
     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_SMP_Allocate_processor_exact,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -463,11 +467,14 @@ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     context,
     node,
     order,
+    _Scheduler_priority_SMP_Extract_from_ready,
     _Scheduler_priority_affinity_SMP_Get_highest_ready,
     insert_ready,
     insert_scheduled,
     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_SMP_Allocate_processor_exact,
+    _Scheduler_priority_SMP_Get_idle_thread,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -543,6 +550,27 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Change_priority(
   return displaced;
 }
 
+Thread_Control *_Scheduler_priority_affinity_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  needs_help = _Scheduler_SMP_Ask_for_help(
+    context,
+    offers_help,
+    needs_help,
+    _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+    _Scheduler_priority_SMP_Release_idle_thread
+  );
+
+  _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
+
+  return needs_help;
+}
+
 /*
  * This is the public scheduler specific Change Priority operation.
  */
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index b642c5d..6d6f055 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -93,7 +93,8 @@ void _Scheduler_priority_SMP_Block(
     _Scheduler_priority_SMP_Extract_from_ready,
     _Scheduler_priority_SMP_Get_highest_ready,
     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_priority_SMP_Get_idle_thread
   );
 }
 
@@ -115,7 +116,8 @@ static Thread_Control *_Scheduler_priority_SMP_Enqueue_ordered(
     insert_scheduled,
     _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
     _Scheduler_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -163,11 +165,14 @@ static Thread_Control *_Scheduler_priority_SMP_Enqueue_scheduled_ordered(
     context,
     node,
     order,
+    _Scheduler_priority_SMP_Extract_from_ready,
     _Scheduler_priority_SMP_Get_highest_ready,
     insert_ready,
     insert_scheduled,
     _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_priority_SMP_Get_idle_thread,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -209,7 +214,8 @@ Thread_Control *_Scheduler_priority_SMP_Unblock(
   return _Scheduler_SMP_Unblock(
     context,
     thread,
-    _Scheduler_priority_SMP_Enqueue_fifo
+    _Scheduler_priority_SMP_Enqueue_fifo,
+    _Scheduler_priority_SMP_Release_idle_thread
   );
 }
 
@@ -236,6 +242,23 @@ Thread_Control *_Scheduler_priority_SMP_Change_priority(
   );
 }
 
+Thread_Control *_Scheduler_priority_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    offers_help,
+    needs_help,
+    _Scheduler_priority_SMP_Enqueue_fifo,
+    _Scheduler_priority_SMP_Release_idle_thread
+  );
+}
+
 Thread_Control *_Scheduler_priority_SMP_Yield(
   const Scheduler_Control *scheduler,
   Thread_Control *thread
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index ee540be..99dbbef 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -162,6 +162,28 @@ static void _Scheduler_simple_SMP_Extract_from_ready(
   _Chain_Extract_unprotected( &node_to_extract->Node );
 }
 
+static Thread_Control *_Scheduler_simple_SMP_Get_idle_thread(
+  Scheduler_Context *context
+)
+{
+  return _Scheduler_SMP_Get_idle_thread(
+    context,
+    _Scheduler_simple_SMP_Extract_from_ready
+  );
+}
+
+static void _Scheduler_simple_SMP_Release_idle_thread(
+  Scheduler_Context *context,
+  Thread_Control    *idle
+)
+{
+  _Scheduler_SMP_Release_idle_thread(
+    context,
+    idle,
+    _Scheduler_simple_SMP_Insert_ready_fifo
+  );
+}
+
 void _Scheduler_simple_SMP_Block(
   const Scheduler_Control *scheduler,
   Thread_Control *thread
@@ -175,7 +197,8 @@ void _Scheduler_simple_SMP_Block(
     _Scheduler_simple_SMP_Extract_from_ready,
     _Scheduler_simple_SMP_Get_highest_ready,
     _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_simple_SMP_Get_idle_thread
   );
 }
 
@@ -197,7 +220,8 @@ static Thread_Control *_Scheduler_simple_SMP_Enqueue_ordered(
     insert_scheduled,
     _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
     _Scheduler_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_simple_SMP_Release_idle_thread
   );
 }
 
@@ -245,11 +269,14 @@ static Thread_Control *_Scheduler_simple_SMP_Enqueue_scheduled_ordered(
     context,
     node,
     order,
+    _Scheduler_simple_SMP_Extract_from_ready,
     _Scheduler_simple_SMP_Get_highest_ready,
     insert_ready,
     insert_scheduled,
     _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_SMP_Allocate_processor_lazy,
+    _Scheduler_simple_SMP_Get_idle_thread,
+    _Scheduler_simple_SMP_Release_idle_thread
   );
 }
 
@@ -291,7 +318,8 @@ Thread_Control *_Scheduler_simple_SMP_Unblock(
   return _Scheduler_SMP_Unblock(
     context,
     thread,
-    _Scheduler_simple_SMP_Enqueue_fifo
+    _Scheduler_simple_SMP_Enqueue_fifo,
+    _Scheduler_simple_SMP_Release_idle_thread
   );
 }
 
@@ -318,6 +346,23 @@ Thread_Control *_Scheduler_simple_SMP_Change_priority(
   );
 }
 
+Thread_Control *_Scheduler_simple_SMP_Ask_for_help(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *offers_help,
+  Thread_Control          *needs_help
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  return _Scheduler_SMP_Ask_for_help(
+    context,
+    offers_help,
+    needs_help,
+    _Scheduler_simple_SMP_Enqueue_fifo,
+    _Scheduler_simple_SMP_Release_idle_thread
+  );
+}
+
 Thread_Control *_Scheduler_simple_SMP_Yield(
   const Scheduler_Control *scheduler,
   Thread_Control *thread
diff --git a/cpukit/score/src/schedulersmpdebug.c b/cpukit/score/src/schedulersmpdebug.c
new file mode 100644
index 0000000..4a45d20
--- /dev/null
+++ b/cpukit/score/src/schedulersmpdebug.c
@@ -0,0 +1,54 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreScheduler
+ *
+ * @brief Scheduler SMP Debug Implementation
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Dornierstr. 4
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/schedulerpriorityimpl.h>
+
+#if defined(RTEMS_DEBUG)
+
+/*
+ * Table with all valid state transitions for _Scheduler_Thread_change_state()
+ * in case RTEMS_DEBUG is defined.
+ */
+const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ] = {
+  /* FROM / TO       BLOCKED SCHEDULED READY */
+  /* BLOCKED    */ { false,  true,     true },
+  /* SCHEDULED  */ { true,   false,    true },
+  /* READY      */ { true,   true,     true }
+};
+
+/*
+ * Table with all valid state transitions for
+ * _Scheduler_SMP_Node_change_state() in case RTEMS_DEBUG is defined.
+ */
+const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ] = {
+  /* FROM / TO       BLOCKED SCHEDULED READY */
+  /* BLOCKED    */ { false,  true,     true },
+  /* SCHEDULED  */ { true,   false,    true },
+  /* READY      */ { true,   true,     false }
+};
+
+#endif
diff --git a/cpukit/score/src/schedulersmpstartidle.c b/cpukit/score/src/schedulersmpstartidle.c
index 6809fd8..de125d3 100644
--- a/cpukit/score/src/schedulersmpstartidle.c
+++ b/cpukit/score/src/schedulersmpstartidle.c
@@ -26,4 +26,5 @@ void _Scheduler_SMP_Start_idle(
 
   _Thread_Set_CPU( thread, cpu );
   _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node );
+  _Chain_Prepend_unprotected( &self->Idle_threads, &thread->Object.Node );
 }
diff --git a/cpukit/score/src/schedulersmpvalidstatechanges.c b/cpukit/score/src/schedulersmpvalidstatechanges.c
deleted file mode 100644
index 6a5dcc6..0000000
--- a/cpukit/score/src/schedulersmpvalidstatechanges.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * @file
- *
- * @ingroup ScoreSchedulerSMP
- *
- * @brief SMP Scheduler Implementation
- */
-
-/*
- * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
- *
- *  embedded brains GmbH
- *  Dornierstr. 4
- *  82178 Puchheim
- *  Germany
- *  <rtems at embedded-brains.de>
- *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
- */
-
-#if HAVE_CONFIG_H
-  #include "config.h"
-#endif
-
-#include <rtems/score/schedulerpriorityimpl.h>
-
-/*
- * Table with all valid state transitions.  It is used in
- * _Scheduler_SMP_Node_change_state() in case RTEMS_DEBUG is defined.
- */
-const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ] = {
-  /* FROM / TO       BLOCKED SCHEDULED READY */
-  /* BLOCKED    */ { false,  true,     true },
-  /* SCHEDULED  */ { true,   false,    true },
-  /* READY      */ { true,   true,     false }
-};
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index e6c4985..e56e4e6 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -181,7 +181,10 @@ bool _Thread_Initialize(
   }
 
 #if defined(RTEMS_SMP)
+  the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
+  the_thread->Scheduler.own_control = scheduler;
   the_thread->Scheduler.control = scheduler;
+  the_thread->Scheduler.own_node = the_thread->Scheduler.node;
   _Resource_Node_initialize( &the_thread->Resource_node );
   _CPU_Context_Set_is_executing( &the_thread->Registers, false );
 #endif
diff --git a/doc/user/smp.t b/doc/user/smp.t
index dd84c37..239a544 100644
--- a/doc/user/smp.t
+++ b/doc/user/smp.t
@@ -147,6 +147,79 @@ another processor.  So if we enable interrupts during this transition we have
 to provide an alternative task independent stack for this time frame.  This
 issue needs further investigation.
 
+ at subsection Scheduler Helping Protocol
+
+The scheduler provides a helping protocol to support locking protocols like
+ at cite{Migratory Priority Inheritance} or the @cite{Multiprocessor Resource
+Sharing Protocol}.  Each ready task can use at least one scheduler node at a
+time to gain access to a processor.  Each scheduler node has an owner, a user
+and an optional idle task.  The owner of a scheduler node is determined a task
+creation and never changes during the life time of a scheduler node.  The user
+of a scheduler node may change due to the scheduler helping protocol.  A
+scheduler node is in one of the four scheduler help states:
+
+ at table @dfn
+
+ at item help yourself
+
+This scheduler node is solely used by the owner task.  This task owns no
+resources using a helping protocol and thus does not take part in the scheduler
+helping protocol.  No help will be provided for other tasks.
+
+ at item help active owner
+
+This scheduler node is owned by a task actively owning a resource and can be
+used to help out tasks.
+
+In case this scheduler node changes its state from ready to scheduled and the
+task executes using another node, then an idle task will be provided as a user
+of this node to temporarily execute on behalf of the owner task.  Thus lower
+priority tasks are denied access to the processors of this scheduler instance.
+
+In case a task actively owning a resource performs a blocking operation, then
+an idle task will be used also in case this node is in the scheduled state.
+
+ at item help active rival
+
+This scheduler node is owned by a task actively obtaining a resource currently
+owned by another task and can be used to help out tasks.
+
+The task owning this node is ready and will give away its processor in case the
+task owning the resource asks for help.
+
+ at item help passive
+
+This scheduler node is owned by a task obtaining a resource currently owned by
+another task and can be used to help out tasks.
+
+The task owning this node is blocked.
+
+ at end table
+
+The following scheduler operations return a task in need for help
+
+ at itemize @bullet
+ at item unblock,
+ at item change priority,
+ at item yield, and
+ at item ask for help.
+ at end itemize
+
+A task in need for help is a task that encounters a scheduler state change from
+scheduled to ready or a task that cannot be scheduled in an unblock operation.
+Such a task can ask tasks which depend on resources owned by this task for
+help.
+
+In case it is not possible to schedule a task in need for help, then
+the corresponding scheduler node will be placed into the set of ready
+scheduler nodes of the scheduler instance.  Once a state change from
+ready to scheduled happens for this scheduler node it may be used to
+schedule the task in need for help.
+
+The ask for help scheduler operation is used to help tasks in need for help
+returned by the operations mentioned above.  This operation is also used in
+case the root of a resource sub-tree owned by a task changes.
+
 @subsection Critical Section Techniques and SMP
 
 As discussed earlier, SMP systems have opportunities for true parallelism
-- 
1.7.7



More information about the devel mailing list