[rtems commit] score: Decouple thread and scheduler nodes on SMP

Sebastian Huber sebh at rtems.org
Mon Jun 23 08:27:13 UTC 2014


Module:    rtems
Branch:    master
Commit:    8f0c7a46ed1edc5b2489bfd248942d6918836e3f
Changeset: http://git.rtems.org/rtems/commit/?id=8f0c7a46ed1edc5b2489bfd248942d6918836e3f

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Jun 10 16:13:37 2014 +0200

score: Decouple thread and scheduler nodes on SMP

Add a chain node to the scheduler node to decouple the thread and
scheduler nodes.  It is now possible to enqueue a thread in a thread
wait queue and use its scheduler node at the same for other threads,
e.g. a resouce owner.

---

 cpukit/score/include/rtems/score/scheduler.h       |   19 ++-
 cpukit/score/include/rtems/score/schedulerimpl.h   |   22 ++
 .../include/rtems/score/schedulerprioritysmpimpl.h |   48 ++--
 .../score/include/rtems/score/schedulersimplesmp.h |    8 +-
 cpukit/score/include/rtems/score/schedulersmp.h    |    5 +
 .../score/include/rtems/score/schedulersmpimpl.h   |  253 +++++++++++--------
 cpukit/score/src/schedulerpriorityaffinitysmp.c    |  175 +++++++-------
 cpukit/score/src/schedulerprioritysmp.c            |   43 ++--
 cpukit/score/src/schedulersimplesmp.c              |   98 +++++----
 cpukit/score/src/schedulersmpstartidle.c           |    2 +-
 10 files changed, 394 insertions(+), 279 deletions(-)

diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
index 5be2c98..831accb 100644
--- a/cpukit/score/include/rtems/score/scheduler.h
+++ b/cpukit/score/include/rtems/score/scheduler.h
@@ -165,7 +165,24 @@ struct Scheduler_Control {
  * @brief Scheduler node for per-thread data.
  */
 struct Scheduler_Node {
-  /* No fields yet */
+#if defined(RTEMS_SMP)
+  /**
+   * @brief Chain node for usage in various scheduler data structures.
+   *
+   * Strictly this is the wrong place for this field since the data structures
+   * to manage scheduler nodes belong to the particular scheduler
+   * implementation.  Currently all SMP scheduler implementations use chains.
+   * The node is here to simplify things, just like the object node in the
+   * thread control block.  It may be replaced with a union to add a red-black
+   * tree node in the future.
+   */
+  Chain_Node Node;
+
+  /**
+   * @brief The thread owning this node.
+   */
+  Thread_Control *owner;
+#endif
 };
 
 /**
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 364c658..391a8d7 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -652,6 +652,28 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Node_get(
   return the_thread->Scheduler.node;
 }
 
+RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
+  Scheduler_Node *node,
+  Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+  node->owner = the_thread;
+#else
+  (void) node;
+  (void) the_thread;
+#endif
+}
+
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
+  const Scheduler_Node *node
+)
+{
+  return node->owner;
+}
+#endif
+
 /** @} */
 
 #ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
index d3e2106..8671035 100644
--- a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
@@ -26,6 +26,7 @@
 #include <rtems/score/schedulerprioritysmp.h>
 #include <rtems/score/schedulerpriorityimpl.h>
 #include <rtems/score/schedulersimpleimpl.h>
+#include <rtems/score/schedulersmpimpl.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -50,26 +51,25 @@ static inline Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
   return (Scheduler_priority_SMP_Node *) _Scheduler_Node_get( thread );
 }
 
-static Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_downcast(
-  Scheduler_Node *node
-)
+static inline Scheduler_priority_SMP_Node *
+_Scheduler_priority_SMP_Node_downcast( Scheduler_Node *node )
 {
   return (Scheduler_priority_SMP_Node *) node;
 }
 
 static inline void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
   Scheduler_Context *context,
-  Thread_Control *scheduled_to_ready
+  Scheduler_Node    *scheduled_to_ready
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_get( scheduled_to_ready );
+    _Scheduler_priority_SMP_Node_downcast( scheduled_to_ready );
 
-  _Chain_Extract_unprotected( &scheduled_to_ready->Object.Node );
+  _Chain_Extract_unprotected( &node->Base.Base.Node );
   _Scheduler_priority_Ready_queue_enqueue_first(
-    &scheduled_to_ready->Object.Node,
+    &node->Base.Base.Node,
     &node->Ready_queue,
     &self->Bit_map
   );
@@ -77,37 +77,38 @@ static inline void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
 
 static inline void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
   Scheduler_Context *context,
-  Thread_Control *ready_to_scheduled
+  Scheduler_Node    *ready_to_scheduled
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_get( ready_to_scheduled );
+    _Scheduler_priority_SMP_Node_downcast( ready_to_scheduled );
 
   _Scheduler_priority_Ready_queue_extract(
-    &ready_to_scheduled->Object.Node,
+    &node->Base.Base.Node,
     &node->Ready_queue,
     &self->Bit_map
   );
-  _Scheduler_simple_Insert_priority_fifo(
+  _Chain_Insert_ordered_unprotected(
     &self->Base.Scheduled,
-    &ready_to_scheduled->Object.Node
+    &node->Base.Base.Node,
+    _Scheduler_SMP_Insert_priority_fifo_order
   );
 }
 
 static inline void _Scheduler_priority_SMP_Insert_ready_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *thread
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_get( thread );
+    _Scheduler_priority_SMP_Node_downcast( thread );
 
   _Scheduler_priority_Ready_queue_enqueue(
-    &thread->Object.Node,
+    &node->Base.Base.Node,
     &node->Ready_queue,
     &self->Bit_map
   );
@@ -115,16 +116,16 @@ static inline void _Scheduler_priority_SMP_Insert_ready_lifo(
 
 static inline void _Scheduler_priority_SMP_Insert_ready_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *thread
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_get( thread );
+    _Scheduler_priority_SMP_Node_downcast( thread );
 
   _Scheduler_priority_Ready_queue_enqueue_first(
-    &thread->Object.Node,
+    &node->Base.Base.Node,
     &node->Ready_queue,
     &self->Bit_map
   );
@@ -132,16 +133,16 @@ static inline void _Scheduler_priority_SMP_Insert_ready_fifo(
 
 static inline void _Scheduler_priority_SMP_Extract_from_ready(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *thread
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_get( thread );
+    _Scheduler_priority_SMP_Node_downcast( thread );
 
   _Scheduler_priority_Ready_queue_extract(
-    &thread->Object.Node,
+    &node->Base.Base.Node,
     &node->Ready_queue,
     &self->Bit_map
   );
@@ -149,15 +150,16 @@ static inline void _Scheduler_priority_SMP_Extract_from_ready(
 
 static inline void _Scheduler_priority_SMP_Do_update(
   Scheduler_Context *context,
-  Scheduler_Node *base_node,
+  Scheduler_Node *node_to_update,
   Priority_Control new_priority
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
   Scheduler_priority_SMP_Node *node =
-    _Scheduler_priority_SMP_Node_downcast( base_node );
+    _Scheduler_priority_SMP_Node_downcast( node_to_update );
 
+  _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
   _Scheduler_priority_Ready_queue_update(
     &node->Ready_queue,
     new_priority,
diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
index 29c1b3a..790cedd 100644
--- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
+++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
@@ -67,7 +67,7 @@ typedef struct {
     _Scheduler_simple_SMP_Change_priority, \
     _Scheduler_simple_SMP_Node_initialize, \
     _Scheduler_default_Node_destroy, \
-    _Scheduler_default_Update_priority, \
+    _Scheduler_simple_SMP_Update_priority, \
     _Scheduler_priority_Priority_compare, \
     _Scheduler_default_Release_job, \
     _Scheduler_default_Tick, \
@@ -99,6 +99,12 @@ void _Scheduler_simple_SMP_Change_priority(
   bool                     prepend_it
 );
 
+void _Scheduler_simple_SMP_Update_priority(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Priority_Control         new_priority
+);
+
 void _Scheduler_simple_SMP_Yield(
   const Scheduler_Control *scheduler,
   Thread_Control *thread
diff --git a/cpukit/score/include/rtems/score/schedulersmp.h b/cpukit/score/include/rtems/score/schedulersmp.h
index c71cc86..0c51a14 100644
--- a/cpukit/score/include/rtems/score/schedulersmp.h
+++ b/cpukit/score/include/rtems/score/schedulersmp.h
@@ -96,6 +96,11 @@ typedef struct {
    * @brief The state of this node.
    */
   Scheduler_SMP_Node_state state;
+
+  /**
+   * @brief The current priority of thread owning this node.
+   */
+  Priority_Control priority;
 } Scheduler_SMP_Node;
 
 void _Scheduler_SMP_Start_idle(
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index fd42f5a..bb6cfb2 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -274,49 +274,75 @@ extern "C" {
  * @{
  */
 
-typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
+typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
   Scheduler_Context *context,
-  Thread_Control    *blocking
+  Scheduler_Node    *node
 );
 
-typedef Thread_Control *( *Scheduler_SMP_Get_lowest_scheduled )(
+typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
   Scheduler_Context *context,
-  Thread_Control    *thread,
+  Scheduler_Node    *filter,
   Chain_Node_order   order
 );
 
 typedef void ( *Scheduler_SMP_Extract )(
   Scheduler_Context *context,
-  Thread_Control    *thread
+  Scheduler_Node    *node_to_extract
 );
 
 typedef void ( *Scheduler_SMP_Insert )(
   Scheduler_Context *context,
-  Thread_Control    *thread_to_insert
+  Scheduler_Node    *node_to_insert
 );
 
 typedef void ( *Scheduler_SMP_Move )(
   Scheduler_Context *context,
-  Thread_Control    *thread_to_move
+  Scheduler_Node    *node_to_move
 );
 
 typedef void ( *Scheduler_SMP_Update )(
   Scheduler_Context *context,
-  Scheduler_Node    *node,
+  Scheduler_Node    *node_to_update,
   Priority_Control   new_priority
 );
 
 typedef void ( *Scheduler_SMP_Enqueue )(
   Scheduler_Context *context,
-  Thread_Control    *thread_to_enqueue
+  Scheduler_Node    *node_to_enqueue
 );
 
 typedef void ( *Scheduler_SMP_Allocate_processor )(
-  Scheduler_SMP_Context *self,
-  Thread_Control        *scheduled,
-  Thread_Control        *victim
+  Scheduler_Context *context,
+  Scheduler_Node    *scheduled,
+  Scheduler_Node    *victim
 );
 
+static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
+  const Chain_Node *to_insert,
+  const Chain_Node *next
+)
+{
+  const Scheduler_SMP_Node *node_to_insert =
+    (const Scheduler_SMP_Node *) to_insert;
+  const Scheduler_SMP_Node *node_next =
+    (const Scheduler_SMP_Node *) next;
+
+  return node_to_insert->priority <= node_next->priority;
+}
+
+static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
+  const Chain_Node *to_insert,
+  const Chain_Node *next
+)
+{
+  const Scheduler_SMP_Node *node_to_insert =
+    (const Scheduler_SMP_Node *) to_insert;
+  const Scheduler_SMP_Node *node_next =
+    (const Scheduler_SMP_Node *) next;
+
+  return node_to_insert->priority < node_next->priority;
+}
+
 static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
   Scheduler_Context *context
 )
@@ -338,13 +364,30 @@ static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_get(
   return (Scheduler_SMP_Node *) _Scheduler_Node_get( thread );
 }
 
+static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
+  Scheduler_Node *node
+)
+{
+  return (Scheduler_SMP_Node *) node;
+}
+
 static inline void _Scheduler_SMP_Node_initialize(
-  Scheduler_SMP_Node *node
+  Scheduler_SMP_Node *node,
+  Thread_Control     *thread
 )
 {
+  _Scheduler_Node_do_initialize( &node->Base, thread );
   node->state = SCHEDULER_SMP_NODE_BLOCKED;
 }
 
+static inline void _Scheduler_SMP_Node_update_priority(
+  Scheduler_SMP_Node *node,
+  Priority_Control    new_priority
+)
+{
+  node->priority = new_priority;
+}
+
 extern const bool _Scheduler_SMP_Node_valid_state_changes[ 3 ][ 3 ];
 
 static inline void _Scheduler_SMP_Node_change_state(
@@ -360,11 +403,11 @@ static inline void _Scheduler_SMP_Node_change_state(
 }
 
 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
-  const Scheduler_SMP_Context *self,
-  const Per_CPU_Control       *cpu
+  const Scheduler_Context *context,
+  const Per_CPU_Control   *cpu
 )
 {
-  return cpu->scheduler_context == &self->Base;
+  return cpu->scheduler_context == context;
 }
 
 static inline void _Scheduler_SMP_Update_heir(
@@ -396,73 +439,76 @@ static inline void _Scheduler_SMP_Update_heir(
 }
 
 static inline void _Scheduler_SMP_Allocate_processor(
-  Scheduler_SMP_Context *self,
-  Thread_Control        *scheduled,
-  Thread_Control        *victim
+  Scheduler_Context *context,
+  Scheduler_Node    *scheduled,
+  Scheduler_Node    *victim
 )
 {
-  Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
-  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
-  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
+  Thread_Control *scheduled_thread = _Scheduler_Node_get_owner( scheduled );
+  Thread_Control *victim_thread = _Scheduler_Node_get_owner( victim );
+  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
+  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
   Per_CPU_Control *cpu_self = _Per_CPU_Get();
   Thread_Control *heir;
 
   _Scheduler_SMP_Node_change_state(
-    scheduled_node,
+    _Scheduler_SMP_Node_downcast( scheduled ),
     SCHEDULER_SMP_NODE_SCHEDULED
   );
 
   _Assert( _ISR_Get_level() != 0 );
 
-  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
-    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
-      heir = cpu_of_scheduled->heir;
-      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
+  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
+    if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
+      heir = scheduled_cpu->heir;
+      _Scheduler_SMP_Update_heir(
+        cpu_self,
+        scheduled_cpu,
+        scheduled_thread
+      );
     } else {
       /* We have to force a migration to our processor set */
-      _Assert( scheduled->Scheduler.debug_real_cpu->heir != scheduled );
-      heir = scheduled;
+      _Assert(
+        scheduled_thread->Scheduler.debug_real_cpu->heir != scheduled_thread
+      );
+      heir = scheduled_thread;
     }
   } else {
-    heir = scheduled;
+    heir = scheduled_thread;
   }
 
-  if ( heir != victim ) {
-    _Thread_Set_CPU( heir, cpu_of_victim );
-    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
+  if ( heir != victim_thread ) {
+    _Thread_Set_CPU( heir, victim_cpu );
+    _Scheduler_SMP_Update_heir( cpu_self, victim_cpu, heir );
   }
 }
 
-static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
+static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
   Scheduler_Context *context,
-  Thread_Control    *filter,
+  Scheduler_Node    *filter,
   Chain_Node_order   order
 )
 {
   Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
-  Thread_Control *lowest_ready = NULL;
   Chain_Control *scheduled = &self->Scheduled;
+  Scheduler_Node *lowest_scheduled =
+    (Scheduler_Node *) _Chain_Last( scheduled );
 
-  if ( !_Chain_Is_empty( scheduled ) ) {
-    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
-  }
+  (void) filter;
+  (void) order;
 
-  /*
-   * _Scheduler_SMP_Enqueue_ordered() assumes that get_lowest_scheduled
-   * helpers may return NULL. But this method never should.
-   */
-  _Assert( lowest_ready != NULL );
+  _Assert( lowest_scheduled != _Chain_Tail( scheduled ) );
 
-  return lowest_ready;
+  return lowest_scheduled;
 }
 
 /**
- * @brief Enqueues a thread according to the specified order function.
+ * @brief Enqueues a node according to the specified order function.
  *
- * The thread must not be in the scheduled state.
+ * The node must not be in the scheduled state.
  *
  * @param[in] context The scheduler instance context.
- * @param[in] thread The thread to enqueue.
+ * @param[in] node The node to enqueue.
  * @param[in] order The order function.
  * @param[in] insert_ready Function to insert a node into the set of ready
  *   nodes.
@@ -470,16 +516,16 @@ static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
  *   scheduled nodes.
  * @param[in] move_from_scheduled_to_ready Function to move a node from the set
  *   of scheduled nodes to the set of ready nodes.
- * @param[in] get_lowest_scheduled Function to select the thread from the
+ * @param[in] get_lowest_scheduled Function to select the node from the
  *   scheduled nodes to replace.  It may not be possible to find one, in this
  *   case a pointer must be returned so that the order functions returns false
  *   if this pointer is passed as the second argument to the order function.
- * @param[in] allocate_processor Function to allocate a processor to a thread
+ * @param[in] allocate_processor Function to allocate a processor to a node
  *   based on the rules of the scheduler.
  */
 static inline void _Scheduler_SMP_Enqueue_ordered(
   Scheduler_Context                  *context,
-  Thread_Control                     *thread,
+  Scheduler_Node                     *node,
   Chain_Node_order                    order,
   Scheduler_SMP_Insert                insert_ready,
   Scheduler_SMP_Insert                insert_scheduled,
@@ -488,32 +534,28 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
   Scheduler_SMP_Allocate_processor    allocate_processor
 )
 {
-  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
-  Thread_Control *lowest_scheduled =
-    ( *get_lowest_scheduled )( context, thread, order );
-
-  if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
-    Scheduler_SMP_Node *lowest_scheduled_node =
-      _Scheduler_SMP_Node_get( lowest_scheduled );
+  Scheduler_Node *lowest_scheduled =
+    ( *get_lowest_scheduled )( context, node, order );
 
+  if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) {
     _Scheduler_SMP_Node_change_state(
-      lowest_scheduled_node,
+      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
       SCHEDULER_SMP_NODE_READY
     );
-    ( *allocate_processor )( self, thread, lowest_scheduled );
-    ( *insert_scheduled )( &self->Base, thread );
-    ( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
+    ( *allocate_processor )( context, node, lowest_scheduled );
+    ( *insert_scheduled )( context, node );
+    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
   } else {
-    ( *insert_ready )( &self->Base, thread );
+    ( *insert_ready )( context, node );
   }
 }
 
 /**
- * @brief Enqueues a scheduled thread according to the specified order
+ * @brief Enqueues a scheduled node according to the specified order
  * function.
  *
  * @param[in] context The scheduler instance context.
- * @param[in] thread The thread to enqueue.
+ * @param[in] node The node to enqueue.
  * @param[in] order The order function.
  * @param[in] get_highest_ready Function to get the highest ready node.
  * @param[in] insert_ready Function to insert a node into the set of ready
@@ -522,12 +564,12 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
  *   scheduled nodes.
  * @param[in] move_from_ready_to_scheduled Function to move a node from the set
  *   of ready nodes to the set of scheduled nodes.
- * @param[in] allocate_processor Function to allocate a processor to a thread
+ * @param[in] allocate_processor Function to allocate a processor to a node
  *   based on the rules of the scheduler.
  */
 static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
   Scheduler_Context                *context,
-  Thread_Control                   *thread,
+  Scheduler_Node                   *node,
   Chain_Node_order                  order,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Insert              insert_ready,
@@ -536,49 +578,46 @@ static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
-  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
-  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
-  Thread_Control *highest_ready =
-    ( *get_highest_ready )( &self->Base, thread );
+  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
 
   _Assert( highest_ready != NULL );
 
   /*
-   * The thread has been extracted from the scheduled chain.  We have to place
+   * The node has been extracted from the scheduled chain.  We have to place
    * it now on the scheduled or ready set.
    */
-  if ( ( *order )( &thread->Object.Node, &highest_ready->Object.Node ) ) {
-    ( *insert_scheduled )( &self->Base, thread );
+  if ( ( *order )( &node->Node, &highest_ready->Node ) ) {
+    ( *insert_scheduled )( context, node );
   } else {
-    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
-    ( *allocate_processor) ( self, highest_ready, thread );
-    ( *insert_ready )( &self->Base, thread );
-    ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
+    _Scheduler_SMP_Node_change_state(
+      _Scheduler_SMP_Node_downcast( node ),
+      SCHEDULER_SMP_NODE_READY
+    );
+    ( *allocate_processor) ( context, highest_ready, node );
+    ( *insert_ready )( context, node );
+    ( *move_from_ready_to_scheduled )( context, highest_ready );
   }
 }
 
 static inline void _Scheduler_SMP_Extract_from_scheduled(
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
-  _Chain_Extract_unprotected( &thread->Object.Node );
+  _Chain_Extract_unprotected( &node->Node );
 }
 
 static inline void _Scheduler_SMP_Schedule_highest_ready(
   Scheduler_Context                *context,
-  Thread_Control                   *victim,
+  Scheduler_Node                   *victim,
   Scheduler_SMP_Get_highest_ready   get_highest_ready,
   Scheduler_SMP_Move                move_from_ready_to_scheduled,
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
-  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
-  Thread_Control *highest_ready =
-    ( *get_highest_ready )( &self->Base, victim );
-
-  ( *allocate_processor )( self, highest_ready, victim );
+  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
 
-  ( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
+  ( *allocate_processor )( context, highest_ready, victim );
+  ( *move_from_ready_to_scheduled )( context, highest_ready );
 }
 
 /**
@@ -607,17 +646,17 @@ static inline void _Scheduler_SMP_Block(
   _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
 
   if ( is_scheduled ) {
-    _Scheduler_SMP_Extract_from_scheduled( thread );
+    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
 
     _Scheduler_SMP_Schedule_highest_ready(
       context,
-      thread,
+      &node->Base,
       get_highest_ready,
       move_from_ready_to_scheduled,
       allocate_processor
     );
   } else {
-    ( *extract_from_ready )( context, thread );
+    ( *extract_from_ready )( context, &node->Base );
   }
 }
 
@@ -631,7 +670,7 @@ static inline void _Scheduler_SMP_Unblock(
 
   _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
 
-  ( *enqueue_fifo )( context, thread );
+  ( *enqueue_fifo )( context, &node->Base );
 }
 
 static inline void _Scheduler_SMP_Change_priority(
@@ -650,24 +689,24 @@ static inline void _Scheduler_SMP_Change_priority(
   Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
 
   if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
-    _Scheduler_SMP_Extract_from_scheduled( thread );
+    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
 
     ( *update )( context, &node->Base, new_priority );
 
     if ( prepend_it ) {
-      ( *enqueue_scheduled_lifo )( context, thread );
+      ( *enqueue_scheduled_lifo )( context, &node->Base );
     } else {
-      ( *enqueue_scheduled_fifo )( context, thread );
+      ( *enqueue_scheduled_fifo )( context, &node->Base );
     }
   } else {
-    ( *extract_from_ready )( context, thread );
+    ( *extract_from_ready )( context, &node->Base );
 
     ( *update )( context, &node->Base, new_priority );
 
     if ( prepend_it ) {
-      ( *enqueue_lifo )( context, thread );
+      ( *enqueue_lifo )( context, &node->Base );
     } else {
-      ( *enqueue_fifo )( context, thread );
+      ( *enqueue_fifo )( context, &node->Base );
     }
   }
 }
@@ -683,41 +722,41 @@ static inline void _Scheduler_SMP_Yield(
   Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
 
   if ( node->state == SCHEDULER_SMP_NODE_SCHEDULED ) {
-    _Scheduler_SMP_Extract_from_scheduled( thread );
+    _Scheduler_SMP_Extract_from_scheduled( &node->Base );
 
-    ( *enqueue_scheduled_fifo )( context, thread );
+    ( *enqueue_scheduled_fifo )( context, &node->Base );
   } else {
-    ( *extract_from_ready )( context, thread );
+    ( *extract_from_ready )( context, &node->Base );
 
-    ( *enqueue_fifo )( context, thread );
+    ( *enqueue_fifo )( context, &node->Base );
   }
 }
 
 static inline void _Scheduler_SMP_Insert_scheduled_lifo(
   Scheduler_Context *context,
-  Thread_Control    *thread
+  Scheduler_Node    *node_to_insert
 )
 {
   Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
 
   _Chain_Insert_ordered_unprotected(
     &self->Scheduled,
-    &thread->Object.Node,
-    _Scheduler_simple_Insert_priority_lifo_order
+    &node_to_insert->Node,
+    _Scheduler_SMP_Insert_priority_lifo_order
   );
 }
 
 static inline void _Scheduler_SMP_Insert_scheduled_fifo(
   Scheduler_Context *context,
-  Thread_Control    *thread
+  Scheduler_Node    *node_to_insert
 )
 {
   Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
 
   _Chain_Insert_ordered_unprotected(
     &self->Scheduled,
-    &thread->Object.Node,
-    _Scheduler_simple_Insert_priority_fifo_order
+    &node_to_insert->Node,
+    _Scheduler_SMP_Insert_priority_fifo_order
   );
 }
 
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index f5ab8cf..bc24054 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -47,11 +47,8 @@ static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
   const Chain_Node *next
 )
 {
-  const Thread_Control *thread_to_insert = (const Thread_Control *) to_insert;
-  const Thread_Control *thread_next = (const Thread_Control *) next;
-
   return next != NULL
-    && thread_to_insert->current_priority <= thread_next->current_priority;
+    && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
 }
 
 static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
@@ -59,11 +56,8 @@ static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
   const Chain_Node *next
 )
 {
-  const Thread_Control *thread_to_insert = (const Thread_Control *) to_insert;
-  const Thread_Control *thread_next = (const Thread_Control *) next;
-
   return next != NULL
-    && thread_to_insert->current_priority < thread_next->current_priority;
+    && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
 }
 
 /*
@@ -78,6 +72,14 @@ _Scheduler_priority_affinity_SMP_Node_get(
   return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Node_get( thread );
 }
 
+static Scheduler_priority_affinity_SMP_Node *
+_Scheduler_priority_affinity_SMP_Node_downcast(
+  Scheduler_Node *node
+)
+{
+  return (Scheduler_priority_affinity_SMP_Node *) node;
+}
+
 /*
  * This method initializes the scheduler control information for
  * this scheduler instance.
@@ -87,18 +89,16 @@ void _Scheduler_priority_affinity_SMP_Node_initialize(
   Thread_Control          *thread
 )
 {
-  Scheduler_SMP_Node *smp_node = _Scheduler_SMP_Node_get( thread );
-
   Scheduler_priority_affinity_SMP_Node *node =
     _Scheduler_priority_affinity_SMP_Node_get( thread );
 
   (void) scheduler;
 
+  _Scheduler_SMP_Node_initialize( &node->Base.Base, thread );
+
   /*
    *  All we add is affinity information to the basic SMP node.
    */
-  _Scheduler_SMP_Node_initialize( smp_node );
-
   node->Affinity     = *_CPU_set_Default();
   node->Affinity.set = &node->Affinity.preallocated;
 }
@@ -109,22 +109,25 @@ void _Scheduler_priority_affinity_SMP_Node_initialize(
  * attempts to prevent migrations but does not take into account affinity
  */
 static inline void _Scheduler_SMP_Allocate_processor_exact(
-   Scheduler_SMP_Context *self,
-   Thread_Control        *scheduled,
-   Thread_Control        *victim
+   Scheduler_Context *context,
+   Scheduler_Node    *scheduled,
+   Scheduler_Node    *victim
 )
 {
-   Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
-   Per_CPU_Control    *cpu_of_victim = _Thread_Get_CPU( victim );
-   Per_CPU_Control    *cpu_self = _Per_CPU_Get();
+  Thread_Control  *victim_thread = _Scheduler_Node_get_owner( victim );
+  Thread_Control  *scheduled_thread = _Scheduler_Node_get_owner( scheduled );
+  Per_CPU_Control *victim_cpu = _Thread_Get_CPU( victim_thread );
+  Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+  (void) context;
 
-   _Scheduler_SMP_Node_change_state(
-     scheduled_node,
-     SCHEDULER_SMP_NODE_SCHEDULED
-   );
+  _Scheduler_SMP_Node_change_state(
+    _Scheduler_SMP_Node_downcast( scheduled ),
+    SCHEDULER_SMP_NODE_SCHEDULED
+  );
 
-   _Thread_Set_CPU( scheduled, cpu_of_victim );
-   _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, scheduled );
+  _Thread_Set_CPU( scheduled_thread, victim_cpu );
+  _Scheduler_SMP_Update_heir( cpu_self, victim_cpu, scheduled_thread );
 }
 
 /*
@@ -134,28 +137,34 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
  * the highest ready thread must have affinity such that it can
  * be executed on the victim's processor.
  */
-static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
+static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
   Scheduler_Context *context,
-  Thread_Control    *victim
+  Scheduler_Node    *victim
 )
 {
-  Scheduler_priority_SMP_Context *self =
+  Scheduler_priority_SMP_Context       *self =
     _Scheduler_priority_SMP_Get_self( context );
-  Priority_Control                index;
-  Thread_Control                 *highest = NULL;
-  int                             victim_cpu;
+  Priority_Control                      index;
+  Scheduler_Node                       *highest = NULL;
+  Thread_Control                       *victim_thread;
+  uint32_t                              victim_cpu_index;
+  Scheduler_priority_affinity_SMP_Node *node;
 
   /*
    * This is done when we need to check if reevaluations are needed.
    */
   if ( victim == NULL ) {
-    return _Scheduler_priority_Ready_queue_first(
+    node = (Scheduler_priority_affinity_SMP_Node *)
+      _Scheduler_priority_Ready_queue_first(
         &self->Bit_map,
         &self->Ready[ 0 ]
       );
+
+    return &node->Base.Base.Base;
   }
 
-  victim_cpu = _Per_CPU_Get_index( _Thread_Get_CPU( victim ) );
+  victim_thread = _Scheduler_Node_get_owner( victim );
+  victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
 
   /**
    * @todo The deterministic priority scheduler structure is optimized
@@ -181,17 +190,13 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
           chain_node != _Chain_Immutable_tail( chain ) ;
           chain_node = _Chain_Next( chain_node ) )
     {
-      Thread_Control                       *thread;
-      Scheduler_priority_affinity_SMP_Node *node;
-
-      thread = (Thread_Control *) chain_node;
-      node = _Scheduler_priority_affinity_SMP_Node_get( thread );
+      node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
 
       /*
        * Can this thread run on this CPU?
        */
-      if ( CPU_ISSET( victim_cpu, node->Affinity.set ) ) {
-        highest = thread;
+      if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) {
+        highest = &node->Base.Base.Base;
         break;
       }
     }
@@ -240,37 +245,42 @@ void _Scheduler_priority_affinity_SMP_Block(
  * thread because the potential victim thread does not have affinity
  * for that processor.
  */
-static Thread_Control *_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
+static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
   Scheduler_Context *context,
-  Thread_Control    *filter,
+  Scheduler_Node    *filter_base,
   Chain_Node_order   order
 )
 {
   Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
-  Thread_Control  *lowest_scheduled = NULL;
-  Thread_Control  *thread = NULL;
+  Scheduler_Node *lowest_scheduled = NULL;
   Chain_Control   *scheduled = &self->Scheduled;
-  Scheduler_priority_affinity_SMP_Node *node =
-    _Scheduler_priority_affinity_SMP_Node_get( filter );
+  Chain_Node      *chain_node;
+  Scheduler_priority_affinity_SMP_Node *filter =
+    _Scheduler_priority_affinity_SMP_Node_downcast( filter_base );
+
+  for ( chain_node = _Chain_Last( scheduled );
+        chain_node != _Chain_Immutable_head( scheduled ) ;
+        chain_node = _Chain_Previous( chain_node ) ) {
+    Scheduler_priority_affinity_SMP_Node *node;
+    Thread_Control                       *thread;
+    uint32_t                              cpu_index;
 
-  for ( thread =  (Thread_Control *) _Chain_Last( scheduled );
-        (Chain_Node *) thread != _Chain_Immutable_head( scheduled ) ;
-        thread = (Thread_Control *) _Chain_Previous( &thread->Object.Node ) ) {
-    int   cpu_index;
+    node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
 
     /*
      * If we didn't find a thread which is of equal or lower importance
      * than filter thread is, then we can't schedule the filter thread
      * to execute.
      */
-    if ( (*order)(&thread->Object.Node, &filter->Object.Node) )
+    if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) )
       break;
 
     /* cpu_index is the processor number thread is executing on */
+    thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
     cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
 
-    if ( CPU_ISSET( cpu_index, node->Affinity.set ) ) {
-      lowest_scheduled = thread;
+    if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
+      lowest_scheduled = &node->Base.Base.Base;
       break;
     }
 
@@ -286,12 +296,12 @@ static Thread_Control *_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node
 )
 {
   _Scheduler_SMP_Enqueue_ordered(
     context,
-    thread,
+    node,
     _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
     _Scheduler_priority_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo,
@@ -312,19 +322,19 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
   Scheduler_Context *context
 )
 {
-  Thread_Control        *lowest_scheduled;
-  Thread_Control        *highest_ready;
-  Scheduler_SMP_Node    *lowest_scheduled_node;
-  Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+  Scheduler_Node        *lowest_scheduled;
+  Scheduler_Node        *highest_ready;
 
   while (1) {
     highest_ready =
       _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
-    lowest_scheduled = _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
-      context,
-      highest_ready,
-      _Scheduler_simple_Insert_priority_lifo_order
-    );
+
+    lowest_scheduled =
+      _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
+        context,
+        highest_ready,
+        _Scheduler_SMP_Insert_priority_lifo_order
+      );
 
     /*
      * If we can't find a thread to displace from the scheduled set,
@@ -342,15 +352,14 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
      * But if we found a thread which is lower priority than one
      * in the ready set, then we need to swap them out.
      */
-    lowest_scheduled_node = _Scheduler_SMP_Node_get( lowest_scheduled );
 
     _Scheduler_SMP_Node_change_state(
-      lowest_scheduled_node,
+      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
       SCHEDULER_SMP_NODE_READY
     );
 
     _Scheduler_SMP_Allocate_processor_exact(
-      self,
+      context,
       highest_ready,
       lowest_scheduled
     );
@@ -361,7 +370,7 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
     );
 
     _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
-      &self->Base,
+      context,
       lowest_scheduled
     );
   }
@@ -395,7 +404,7 @@ void _Scheduler_priority_affinity_SMP_Unblock(
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
   Scheduler_Context     *context,
-  Thread_Control        *thread,
+  Scheduler_Node        *node,
   Chain_Node_order       order,
   Scheduler_SMP_Insert   insert_ready,
   Scheduler_SMP_Insert   insert_scheduled
@@ -403,7 +412,7 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
 {
   _Scheduler_SMP_Enqueue_ordered(
     context,
-    thread,
+    node,
     order,
     insert_ready,
     insert_scheduled,
@@ -420,12 +429,12 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node
 )
 {
   _Scheduler_priority_affinity_SMP_Enqueue_ordered(
     context,
-    thread,
+    node,
     _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
     _Scheduler_priority_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
@@ -438,16 +447,16 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_lifo(
  * this scheduler's get_highest_ready() helper.
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
-  Scheduler_Context *context,
-  Thread_Control *thread,
-  Chain_Node_order order,
-  Scheduler_SMP_Insert insert_ready,
-  Scheduler_SMP_Insert insert_scheduled
+  Scheduler_Context    *context,
+  Scheduler_Node       *node,
+  Chain_Node_order      order,
+  Scheduler_SMP_Insert  insert_ready,
+  Scheduler_SMP_Insert  insert_scheduled
 )
 {
   _Scheduler_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
+    node,
     order,
     _Scheduler_priority_affinity_SMP_Get_highest_ready,
     insert_ready,
@@ -464,13 +473,13 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node
 )
 {
   _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_lifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
     _Scheduler_priority_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
   );
@@ -483,13 +492,13 @@ static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
  */
 static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node
 )
 {
   _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_fifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_fifo_order,
     _Scheduler_priority_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo
   );
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index b6b5ff4..f340b83 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -25,7 +25,6 @@
 #endif
 
 #include <rtems/score/schedulerprioritysmpimpl.h>
-#include <rtems/score/schedulersmpimpl.h>
 
 static Scheduler_priority_SMP_Context *
 _Scheduler_priority_SMP_Get_context( const Scheduler_Control *scheduler )
@@ -50,7 +49,7 @@ void _Scheduler_priority_SMP_Node_initialize(
 {
   Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
 
-  _Scheduler_SMP_Node_initialize( node );
+  _Scheduler_SMP_Node_initialize( node, thread );
 }
 
 void _Scheduler_priority_SMP_Update_priority(
@@ -65,17 +64,17 @@ void _Scheduler_priority_SMP_Update_priority(
   _Scheduler_priority_SMP_Do_update( context, node, new_priority );
 }
 
-static Thread_Control *_Scheduler_priority_SMP_Get_highest_ready(
+static Scheduler_Node *_Scheduler_priority_SMP_Get_highest_ready(
   Scheduler_Context *context,
-  Thread_Control    *thread
+  Scheduler_Node    *node
 )
 {
   Scheduler_priority_SMP_Context *self =
     _Scheduler_priority_SMP_Get_self( context );
 
-  (void) thread;
+  (void) node;
 
-  return (Thread_Control *) _Scheduler_priority_Ready_queue_first(
+  return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(
     &self->Bit_map,
     &self->Ready[ 0 ]
   );
@@ -100,7 +99,7 @@ void _Scheduler_priority_SMP_Block(
 
 static void _Scheduler_priority_SMP_Enqueue_ordered(
   Scheduler_Context *context,
-  Thread_Control *thread,
+  Scheduler_Node *node,
   Chain_Node_order order,
   Scheduler_SMP_Insert insert_ready,
   Scheduler_SMP_Insert insert_scheduled
@@ -108,7 +107,7 @@ static void _Scheduler_priority_SMP_Enqueue_ordered(
 {
   _Scheduler_SMP_Enqueue_ordered(
     context,
-    thread,
+    node,
     order,
     insert_ready,
     insert_scheduled,
@@ -120,13 +119,13 @@ static void _Scheduler_priority_SMP_Enqueue_ordered(
 
 static void _Scheduler_priority_SMP_Enqueue_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_priority_SMP_Enqueue_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_lifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
     _Scheduler_priority_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
   );
@@ -134,13 +133,13 @@ static void _Scheduler_priority_SMP_Enqueue_lifo(
 
 static void _Scheduler_priority_SMP_Enqueue_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_priority_SMP_Enqueue_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_fifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_fifo_order,
     _Scheduler_priority_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo
   );
@@ -148,7 +147,7 @@ static void _Scheduler_priority_SMP_Enqueue_fifo(
 
 static void _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
   Scheduler_Context *context,
-  Thread_Control *thread,
+  Scheduler_Node *node,
   Chain_Node_order order,
   Scheduler_SMP_Insert insert_ready,
   Scheduler_SMP_Insert insert_scheduled
@@ -156,7 +155,7 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
 {
   _Scheduler_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
+    node,
     order,
     _Scheduler_priority_SMP_Get_highest_ready,
     insert_ready,
@@ -168,13 +167,13 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
 
 static void _Scheduler_priority_SMP_Enqueue_scheduled_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_lifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
     _Scheduler_priority_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
   );
@@ -182,13 +181,13 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled_lifo(
 
 static void _Scheduler_priority_SMP_Enqueue_scheduled_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_fifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_fifo_order,
     _Scheduler_priority_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo
   );
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 37458d6..4b0ce0a 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -49,66 +49,82 @@ void _Scheduler_simple_SMP_Node_initialize(
 {
   Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( the_thread );
 
-  _Scheduler_SMP_Node_initialize( node );
+  _Scheduler_SMP_Node_initialize( node, the_thread );
 }
 
 static void _Scheduler_simple_SMP_Do_update(
   Scheduler_Context *context,
-  Scheduler_Node *node,
-  Priority_Control new_priority
+  Scheduler_Node    *node_to_update,
+  Priority_Control   new_priority
 )
 {
+  Scheduler_SMP_Node *node = _Scheduler_SMP_Node_downcast( node_to_update );
+
   (void) context;
-  (void) node;
-  (void) new_priority;
+
+  _Scheduler_SMP_Node_update_priority( node, new_priority );
 }
 
-static Thread_Control *_Scheduler_simple_SMP_Get_highest_ready(
+void _Scheduler_simple_SMP_Update_priority(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Priority_Control         new_priority
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+  Scheduler_Node *node = _Scheduler_Node_get( thread );
+
+  _Scheduler_simple_SMP_Do_update( context, node, new_priority );
+}
+
+static Scheduler_Node *_Scheduler_simple_SMP_Get_highest_ready(
   Scheduler_Context *context,
-  Thread_Control    *thread
+  Scheduler_Node    *node
 )
 {
   Scheduler_simple_SMP_Context *self =
     _Scheduler_simple_SMP_Get_self( context );
 
-  (void) thread;
+  (void) node;
 
-  return (Thread_Control *) _Chain_First( &self->Ready );
+  return (Scheduler_Node *) _Chain_First( &self->Ready );
 }
 
 static void _Scheduler_simple_SMP_Move_from_scheduled_to_ready(
   Scheduler_Context *context,
-  Thread_Control *scheduled_to_ready
+  Scheduler_Node    *scheduled_to_ready
 )
 {
   Scheduler_simple_SMP_Context *self =
     _Scheduler_simple_SMP_Get_self( context );
 
-  _Chain_Extract_unprotected( &scheduled_to_ready->Object.Node );
-  _Scheduler_simple_Insert_priority_lifo(
+  _Chain_Extract_unprotected( &scheduled_to_ready->Node );
+  _Chain_Insert_ordered_unprotected(
     &self->Ready,
-    scheduled_to_ready
+    &scheduled_to_ready->Node,
+    _Scheduler_SMP_Insert_priority_lifo_order
   );
 }
 
 static void _Scheduler_simple_SMP_Move_from_ready_to_scheduled(
   Scheduler_Context *context,
-  Thread_Control *ready_to_scheduled
+  Scheduler_Node    *ready_to_scheduled
 )
 {
   Scheduler_simple_SMP_Context *self =
     _Scheduler_simple_SMP_Get_self( context );
 
-  _Chain_Extract_unprotected( &ready_to_scheduled->Object.Node );
-  _Scheduler_simple_Insert_priority_fifo(
+  _Chain_Extract_unprotected( &ready_to_scheduled->Node );
+  _Chain_Insert_ordered_unprotected(
     &self->Base.Scheduled,
-    ready_to_scheduled
+    &ready_to_scheduled->Node,
+    _Scheduler_SMP_Insert_priority_fifo_order
   );
 }
 
 static void _Scheduler_simple_SMP_Insert_ready_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node_to_insert
 )
 {
   Scheduler_simple_SMP_Context *self =
@@ -116,14 +132,14 @@ static void _Scheduler_simple_SMP_Insert_ready_lifo(
 
   _Chain_Insert_ordered_unprotected(
     &self->Ready,
-    &thread->Object.Node,
-    _Scheduler_simple_Insert_priority_lifo_order
+    &node_to_insert->Node,
+    _Scheduler_SMP_Insert_priority_lifo_order
   );
 }
 
 static void _Scheduler_simple_SMP_Insert_ready_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node_to_insert
 )
 {
   Scheduler_simple_SMP_Context *self =
@@ -131,19 +147,19 @@ static void _Scheduler_simple_SMP_Insert_ready_fifo(
 
   _Chain_Insert_ordered_unprotected(
     &self->Ready,
-    &thread->Object.Node,
-    _Scheduler_simple_Insert_priority_fifo_order
+    &node_to_insert->Node,
+    _Scheduler_SMP_Insert_priority_fifo_order
   );
 }
 
 static void _Scheduler_simple_SMP_Extract_from_ready(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node    *node_to_extract
 )
 {
   (void) context;
 
-  _Chain_Extract_unprotected( &thread->Object.Node );
+  _Chain_Extract_unprotected( &node_to_extract->Node );
 }
 
 void _Scheduler_simple_SMP_Block(
@@ -165,7 +181,7 @@ void _Scheduler_simple_SMP_Block(
 
 static void _Scheduler_simple_SMP_Enqueue_ordered(
   Scheduler_Context *context,
-  Thread_Control *thread,
+  Scheduler_Node *node,
   Chain_Node_order order,
   Scheduler_SMP_Insert insert_ready,
   Scheduler_SMP_Insert insert_scheduled
@@ -173,7 +189,7 @@ static void _Scheduler_simple_SMP_Enqueue_ordered(
 {
   _Scheduler_SMP_Enqueue_ordered(
     context,
-    thread,
+    node,
     order,
     insert_ready,
     insert_scheduled,
@@ -185,13 +201,13 @@ static void _Scheduler_simple_SMP_Enqueue_ordered(
 
 static void _Scheduler_simple_SMP_Enqueue_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_simple_SMP_Enqueue_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_lifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
     _Scheduler_simple_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
   );
@@ -199,13 +215,13 @@ static void _Scheduler_simple_SMP_Enqueue_lifo(
 
 static void _Scheduler_simple_SMP_Enqueue_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_simple_SMP_Enqueue_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_fifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_fifo_order,
     _Scheduler_simple_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo
   );
@@ -213,7 +229,7 @@ static void _Scheduler_simple_SMP_Enqueue_fifo(
 
 static void _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
   Scheduler_Context *context,
-  Thread_Control *thread,
+  Scheduler_Node *node,
   Chain_Node_order order,
   Scheduler_SMP_Insert insert_ready,
   Scheduler_SMP_Insert insert_scheduled
@@ -221,7 +237,7 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
 {
   _Scheduler_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
+    node,
     order,
     _Scheduler_simple_SMP_Get_highest_ready,
     insert_ready,
@@ -233,13 +249,13 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
 
 static void _Scheduler_simple_SMP_Enqueue_scheduled_lifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_lifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_lifo_order,
     _Scheduler_simple_SMP_Insert_ready_lifo,
     _Scheduler_SMP_Insert_scheduled_lifo
   );
@@ -247,13 +263,13 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled_lifo(
 
 static void _Scheduler_simple_SMP_Enqueue_scheduled_fifo(
   Scheduler_Context *context,
-  Thread_Control *thread
+  Scheduler_Node *node
 )
 {
   _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
     context,
-    thread,
-    _Scheduler_simple_Insert_priority_fifo_order,
+    node,
+    _Scheduler_SMP_Insert_priority_fifo_order,
     _Scheduler_simple_SMP_Insert_ready_fifo,
     _Scheduler_SMP_Insert_scheduled_fifo
   );
diff --git a/cpukit/score/src/schedulersmpstartidle.c b/cpukit/score/src/schedulersmpstartidle.c
index 420bcc0..cb0c2ab 100644
--- a/cpukit/score/src/schedulersmpstartidle.c
+++ b/cpukit/score/src/schedulersmpstartidle.c
@@ -25,5 +25,5 @@ void _Scheduler_SMP_Start_idle(
   node->state = SCHEDULER_SMP_NODE_SCHEDULED;
 
   _Thread_Set_CPU( thread, cpu );
-  _Chain_Append_unprotected( &self->Scheduled, &thread->Object.Node );
+  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node );
 }



More information about the vc mailing list