[PATCH] Pre Release: Strong APA

Richi Dubey richidubey at gmail.com
Sat Aug 22 21:54:48 UTC 2020


---
 cpukit/include/rtems/scheduler.h              |   6 +-
 .../include/rtems/score/schedulerstrongapa.h  | 284 ++++--
 cpukit/score/src/schedulerstrongapa.c         | 920 ++++++++++++++----
 3 files changed, 908 insertions(+), 302 deletions(-)

diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h
index 955a83cfb4..b101842ba7 100644
--- a/cpukit/include/rtems/scheduler.h
+++ b/cpukit/include/rtems/scheduler.h
@@ -257,16 +257,14 @@
   #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \
     static struct { \
       Scheduler_strong_APA_Context Base; \
-      Chain_Control                Ready[ ( prio_count ) ]; \
+      Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS ]; \
     } SCHEDULER_STRONG_APA_CONTEXT_NAME( name )
 
   #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \
     { \
       &SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \
       SCHEDULER_STRONG_APA_ENTRY_POINTS, \
-      RTEMS_ARRAY_SIZE( \
-        SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \
-      ) - 1, \
+      SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \
       ( obj_name ) \
       SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \
     }
diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h
index 0ac28cb439..86d91688e8 100644
--- a/cpukit/include/rtems/score/schedulerstrongapa.h
+++ b/cpukit/include/rtems/score/schedulerstrongapa.h
@@ -6,31 +6,38 @@
  * @brief Strong APA Scheduler API
  */
 
-/*
- * Copyright (c) 2013, 2018 embedded brains GmbH.  All rights reserved.
+/*	
+ * Copyright (c) 2020 Richi Dubey
  *
- *  embedded brains GmbH
- *  Dornierstr. 4
- *  82178 Puchheim
- *  Germany
- *  <rtems at embedded-brains.de>
+ *  <richidubey at gmail.com>
  *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
+ * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
+ *
+ *  embedded brains GmbH	
+ *  Dornierstr. 4	
+ *  82178 Puchheim	
+ *  Germany	
+ *  <rtems at embedded-brains.de>	
+ *	
+ * The license and distribution terms for this file may be	
+ * found in the file LICENSE in this distribution or at	
  * http://www.rtems.org/license/LICENSE.
  */
-
+ 
 #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
 #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H
 
 #include <rtems/score/scheduler.h>
-#include <rtems/score/schedulerpriority.h>
 #include <rtems/score/schedulersmp.h>
+#include <rtems/score/percpu.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
 
+#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
+  RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain )
+
 /**
  * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler
  *
@@ -38,43 +45,98 @@ extern "C" {
  *
  * @brief Strong APA Scheduler
  *
- * This is an implementation of the global fixed priority scheduler (G-FP).  It
- * uses one ready chain per priority to ensure constant time insert operations.
- * The scheduled chain uses linear insert operations and has at most processor
- * count entries.  Since the processor and priority count are constants all
- * scheduler operations complete in a bounded execution time.
+ * This is an implementation of the Strong APA scheduler defined by
+ * Cerqueira et al. in Linux's Processor Affinity API, Refined: 
+ * Shifting Real-Time Tasks Towards Higher Schedulability.
  *
- * The the_thread preempt mode will be ignored.
+ * This is an implementation of the Strong APA scheduler defined by
+ * Cerqueira et al. in Linux's Processor Affinity API, Refined: 
+ * Shifting Real-Time Tasks Towards Higher Schedulability.
  *
  * @{
  */
 
 /**
- * @brief Scheduler context specialization for Strong APA
- * schedulers.
- */
-typedef struct {
-  Scheduler_SMP_Context    Base;
-  Priority_bit_map_Control Bit_map;
-  Chain_Control            Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
-} Scheduler_strong_APA_Context;
-
-/**
- * @brief Scheduler node specialization for Strong APA
- * schedulers.
+ * @brief Scheduler node specialization for Strong APA schedulers.
  */
 typedef struct {
   /**
    * @brief SMP scheduler node.
    */
   Scheduler_SMP_Node Base;
+  
+ /**
+   * @brief Chain node for Scheduler_strong_APA_Context::All_nodes
+   */
+  Chain_Node Chain;
+  
+  /**
+   * @brief CPU that this node would preempt in the backtracking part of
+   * _Scheduler_strong_APA_Get_highest_ready and
+   * _Scheduler_strong_APA_Do_Enqueue.
+   */
+  Per_CPU_Control *invoker;
 
   /**
-   * @brief The associated ready queue of this node.
+   * @brief The associated affinity set of this node.
    */
-  Scheduler_priority_Ready_queue Ready_queue;
+  Processor_mask Affinity;
 } Scheduler_strong_APA_Node;
 
+
+/**
+ * @brief Struct for each index of the variable size arrays
+ */
+typedef struct
+{
+  /**
+   * @brief The node that called this CPU, i.e. a node which has
+   * the cpu at the index of Scheduler_strong_APA_Context::Struct in
+   * its affinity set.
+   */	
+  Scheduler_Node *caller;
+  
+    /**
+   * @brief Cpu at the index of Scheduler_strong_APA_Context::Struct
+   * in Queue implementation.
+   */	
+  Per_CPU_Control *cpu;
+  
+    /**
+   * @brief Indicates if the CPU at the index of 
+   * Scheduler_strong_APA_Context::Struct is already
+   * added to the Queue or not. 
+   */	
+  bool visited;
+} Scheduler_strong_APA_Struct;
+
+ /**
+ * @brief Scheduler context for Strong APA scheduler.
+ *
+ * Has the structure for scheduler context
+ * and Node defintion for Strong APA scheduler
+ */
+typedef struct {
+ /**
+   * @brief SMP Context to refer to SMP implementation
+   * code.  
+   */
+  Scheduler_SMP_Context Base;
+  
+  /**
+   * @brief Chain of all the nodes present in
+   * the system. Accounts for ready and scheduled nodes.
+   */
+  Chain_Control All_nodes;
+  
+  /**
+   * @brief Struct with important variables for each cpu
+   */
+  Scheduler_strong_APA_Struct Struct[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_strong_APA_Context;
+
+#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255
+
 /**
  * @brief Entry points for the Strong APA Scheduler.
  */
@@ -100,81 +162,90 @@ typedef struct {
     _Scheduler_default_Release_job, \
     _Scheduler_default_Cancel_job, \
     _Scheduler_default_Tick, \
-    _Scheduler_SMP_Start_idle \
-    SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+    _Scheduler_strong_APA_Start_idle, \
+    _Scheduler_strong_APA_Set_affinity \
   }
 
 /**
- * @brief Initializes the scheduler.
+ * @brief Initializes the Strong_APA scheduler.
  *
- * @param scheduler The scheduler to initialize.
- */
-void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler );
-
+ * Sets the chain containing all the nodes to empty 
+ * and initializes the SMP scheduler.
+ *
+ * @param scheduler used to get reference to Strong APA scheduler context 
+ * @retval void
+ * @see _Scheduler_strong_APA_Node_initialize()
+ */  
+void _Scheduler_strong_APA_Initialize( 
+  const Scheduler_Control *scheduler 
+);
+   
 /**
- * @brief Initializes the node with the given priority.
+ * @brief Called when a node yields the processor
  *
  * @param scheduler The scheduler control instance.
- * @param[out] node The node to initialize.
- * @param the_thread The thread of the node to initialize.
- * @param priority The priority for @a node.
+ * @param thread Thread corresponding to @node
+ * @param node Node that yield the processor
  */
-void _Scheduler_strong_APA_Node_initialize(
+void _Scheduler_strong_APA_Yield(
   const Scheduler_Control *scheduler,
-  Scheduler_Node          *node,
-  Thread_Control          *the_thread,
-  Priority_Control         priority
+  Thread_Control          *thread,
+  Scheduler_Node          *node
 );
 
 /**
- * @brief Blocks the thread.
+ * @brief Blocks a node
  *
- * @param scheduler The scheduler control instance.
- * @param[in, out] the_thread The thread to block.
- * @param[in, out] node The node of the thread to block.
- */
+ * Changes the state of the node and extracts it from the queue
+ * calls _Scheduler_SMP_Block().
+ *
+ * @param context The scheduler control instance.
+ * @param thread Thread correspoding to the @node.
+ * @param node node which is to be blocked
+ */ 
 void _Scheduler_strong_APA_Block(
   const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
+  Thread_Control          *thread,
   Scheduler_Node          *node
 );
 
 /**
- * @brief Unblocks the thread.
+ * @brief Unblocks a node
+ *
+ * Changes the state of the node and calls _Scheduler_SMP_Unblock().
  *
  * @param scheduler The scheduler control instance.
- * @param[in, out] the_thread The thread to unblock.
- * @param[in, out] node The node of the thread to unblock.
- */
+ * @param thread Thread correspoding to the @node.
+ * @param node node which is to be unblocked
+ * @see _Scheduler_strong_APA_Enqueue() 
+ * @see _Scheduler_strong_APA_Do_update()
+ */ 
 void _Scheduler_strong_APA_Unblock(
   const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
+  Thread_Control          *thread,
   Scheduler_Node          *node
 );
 
 /**
- * @brief Updates the priority of the node.
+ * @brief Updates the priority of the node
  *
  * @param scheduler The scheduler control instance.
- * @param the_thread The thread for the operation.
- * @param[in, out] node The node to update the priority of.
- */
+ * @param thread Thread correspoding to the @node.
+ * @param node Node whose priority has to be updated 
+ */ 
 void _Scheduler_strong_APA_Update_priority(
   const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
+  Thread_Control          *thread,
   Scheduler_Node          *node
 );
 
 /**
- * @brief Asks for help.
+ * @brief Calls the SMP Ask_for_help
  *
- * @param  scheduler The scheduler control instance.
- * @param the_thread The thread that asks for help.
- * @param node The node of @a the_thread.
- *
- * @retval true The request for help was successful.
- * @retval false The request for help was not successful.
- */
+ * @param scheduler The scheduler control instance.
+ * @param thread Thread correspoding to the @node that asks for help.
+ * @param node node associated with @thread
+ */ 
 bool _Scheduler_strong_APA_Ask_for_help(
   const Scheduler_Control *scheduler,
   Thread_Control          *the_thread,
@@ -182,12 +253,13 @@ bool _Scheduler_strong_APA_Ask_for_help(
 );
 
 /**
- * @brief Reconsiders help request.
+ * @brief To Reconsider the help request
  *
  * @param scheduler The scheduler control instance.
- * @param the_thread The thread to reconsider the help request of.
- * @param[in, out] node The node of @a the_thread
- */
+ * @param thread Thread correspoding to the @node.
+ * @param node Node corresponding to @thread which asks for 
+ * reconsideration
+ */ 
 void _Scheduler_strong_APA_Reconsider_help_request(
   const Scheduler_Control *scheduler,
   Thread_Control          *the_thread,
@@ -195,13 +267,13 @@ void _Scheduler_strong_APA_Reconsider_help_request(
 );
 
 /**
- * @brief Withdraws the node.
+ * @brief Withdraws a node 
  *
  * @param scheduler The scheduler control instance.
- * @param[in, out] the_thread The thread to change the state to @a next_state.
- * @param[in, out] node The node to withdraw.
- * @param next_state The next state for @a the_thread.
- */
+ * @param thread Thread correspoding to the @node.
+ * @param node Node that has to be withdrawn
+ * @param next_state the state that the node goes to
+ */ 
 void _Scheduler_strong_APA_Withdraw_node(
   const Scheduler_Control *scheduler,
   Thread_Control          *the_thread,
@@ -210,42 +282,66 @@ void _Scheduler_strong_APA_Withdraw_node(
 );
 
 /**
- * @brief Adds the idle thread to a processor.
+ * @brief Adds a processor to the scheduler instance
+ *
+ * and allocates an idle thread to the processor.
  *
  * @param scheduler The scheduler control instance.
- * @param[in, out] The idle thread to add to the processor.
- */
+ * @param idle Idle thread to be allocated to the processor
+ */ 
 void _Scheduler_strong_APA_Add_processor(
   const Scheduler_Control *scheduler,
   Thread_Control          *idle
 );
 
 /**
- * @brief Removes an idle thread from the given cpu.
- *
- * @param scheduler The scheduler instance.
- * @param cpu The cpu control to remove from @a scheduler.
+ * @brief Removes a processor from the scheduler instance
  *
- * @return The idle thread of the processor.
- */
+ * @param scheduler The scheduler control instance.
+ * @param cpu processor that is removed
+ */ 
 Thread_Control *_Scheduler_strong_APA_Remove_processor(
   const Scheduler_Control *scheduler,
-  struct Per_CPU_Control  *cpu
+  Per_CPU_Control         *cpu
 );
 
 /**
- * @brief Performs a yield operation.
+ * @brief Initializes the node with the given priority.
  *
  * @param scheduler The scheduler control instance.
- * @param the_thread The thread to yield.
- * @param[in, out] node The node of @a the_thread.
+ * @param[out] node The node to initialize.
+ * @param the_thread The thread of the node to initialize.
+ * @param priority The priority for @a node.
  */
-void _Scheduler_strong_APA_Yield(
+void _Scheduler_strong_APA_Node_initialize(
   const Scheduler_Control *scheduler,
+  Scheduler_Node          *node,
   Thread_Control          *the_thread,
-  Scheduler_Node          *node
+  Priority_Control         priority
 );
 
+/**
+ * @brief Starts an idle thread on a CPU
+ *
+ * @param scheduler The scheduler control instance.
+ * @param idle Idle Thread
+ * @param cpu processor that gets the idle thread
+ */ 
+void _Scheduler_strong_APA_Start_idle(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *idle,
+  Per_CPU_Control         *cpu
+);
+
+/**
+ * @brief Sets the affinity of the @node_base to @affinity
+ */
+bool _Scheduler_strong_APA_Set_affinity(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Scheduler_Node          *node_base,
+  const Processor_mask    *affinity
+);
 /** @} */
 
 #ifdef __cplusplus
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index 924cd86412..8ccaa4e4a3 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -6,320 +6,765 @@
  * @brief Strong APA Scheduler Implementation
  */
 
-/*
- * Copyright (c) 2013, 2016 embedded brains GmbH.  All rights reserved.
+/*  
+ * Copyright (c) 2020 Richi Dubey
  *
- *  embedded brains GmbH
- *  Dornierstr. 4
- *  82178 Puchheim
- *  Germany
- *  <rtems at embedded-brains.de>
+ * <richidubey at gmail.com>
  *
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
+ * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.
+ *
+ *  embedded brains GmbH	
+ *  Dornierstr. 4	
+ *  82178 Puchheim	
+ *  Germany	
+ *  <rtems at embedded-brains.de>	
+ *	
+ * The license and distribution terms for this file may be	
+ * found in the file LICENSE in this distribution or at	
  * http://www.rtems.org/license/LICENSE.
  */
-
+ 
 #ifdef HAVE_CONFIG_H
 #include "config.h"
 #endif
 
 #include <rtems/score/schedulerstrongapa.h>
-#include <rtems/score/schedulerpriorityimpl.h>
 #include <rtems/score/schedulersmpimpl.h>
+#include <rtems/score/assert.h>
+#include <rtems/malloc.h>
 
-static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self(
-  Scheduler_Context *context
-)
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
+{
+  return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
+}
+
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_self( Scheduler_Context *context )
 {
   return (Scheduler_strong_APA_Context *) context;
 }
 
-static Scheduler_strong_APA_Node *
+static inline Scheduler_strong_APA_Node *
 _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
 {
   return (Scheduler_strong_APA_Node *) node;
 }
 
-static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
+static inline void _Scheduler_strong_APA_Do_update(
   Scheduler_Context *context,
-  Scheduler_Node    *scheduled_to_ready
+  Scheduler_Node    *node,
+  Priority_Control   new_priority
 )
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_self( context );
-  Scheduler_strong_APA_Node *node =
-    _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );
-
-  _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
-  _Scheduler_priority_Ready_queue_enqueue_first(
-    &node->Base.Base.Node.Chain,
-    &node->Ready_queue,
-    &self->Bit_map
+  Scheduler_SMP_Node *smp_node;
+  (void) context;
+
+  smp_node = _Scheduler_SMP_Node_downcast( node );
+  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
+}
+
+static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
+{
+  Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context );
+	
+  bool               ret;
+  const Chain_Node  *tail;
+  Chain_Node        *next;
+  Scheduler_strong_APA_Node *node;
+  
+  tail = _Chain_Immutable_tail( &self->All_nodes );
+  next = _Chain_First( &self->All_nodes );
+  
+  ret = false;
+  
+  while ( next != tail ) {
+    node = (Scheduler_strong_APA_Node *) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+    
+    if ( 
+    _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_READY 
+    ) {
+      ret = true;
+      break;
+    }
+    
+    next = _Chain_Next( next );
+  }
+  
+  return ret;
+}
+
+static inline void _Scheduler_strong_APA_Allocate_processor(
+  Scheduler_Context *context,
+  Scheduler_Node    *scheduled_base,
+  Scheduler_Node    *victim_base,
+  Per_CPU_Control   *victim_cpu
+)
+{
+  Scheduler_strong_APA_Node *scheduled;
+ 
+  (void) victim_base;
+  scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
+
+  _Scheduler_SMP_Allocate_processor_exact(
+    context,
+    &(scheduled->Base.Base),
+    NULL,
+    victim_cpu
   );
 }
 
-static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
+static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
+  Scheduler_strong_APA_Context *self,
+  uint32_t                      front, 
+  uint32_t                      rear
+)
+{
+  Scheduler_Node              *highest_ready;
+  Scheduler_strong_APA_Struct *Struct;
+  const Chain_Node            *tail;
+  Chain_Node                  *next;
+  uint32_t                     index_assigned_cpu;
+  Scheduler_strong_APA_Node   *node;
+  Priority_Control             min_priority_num;
+  Priority_Control             curr_priority;
+  Per_CPU_Control             *assigned_cpu;
+  Scheduler_SMP_Node_state     curr_state;
+  Per_CPU_Control             *curr_CPU;
+  bool                         first_task;
+  
+  Struct = self->Struct;
+   //When the first task accessed has nothing to compare its priority against
+  // So, it is the task with the highest priority witnessed so far!
+  first_task = true;
+    
+  while( front <= rear ) {
+    curr_CPU = Struct[ front ].cpu; 
+    front = front + 1;
+
+    tail = _Chain_Immutable_tail( &self->All_nodes );
+    next = _Chain_First( &self->All_nodes );
+  
+    while ( next != tail ) {
+      node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+      //Check if the curr_CPU is in the affinity set of the node
+      if (
+        _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU))
+        ) {       
+          curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
+          
+          if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+            assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
+            index_assigned_cpu =  _Per_CPU_Get_index( assigned_cpu );
+        
+            if ( Struct[ index_assigned_cpu ].visited == false ) {
+              rear = rear + 1;
+              Struct[ rear ].cpu = assigned_cpu;
+              Struct[ index_assigned_cpu ].visited = true;
+              // The curr CPU of the queue invoked this node to add its CPU 
+              // that it is executing on to the queue. So this node might get
+              // preempted because of the invoker curr_CPU and this curr_CPU
+              // is the CPU that node should preempt in case this node 
+              // gets preempted.
+              node->invoker = curr_CPU; 
+            }  
+          } 
+          else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
+            curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
+            curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
+  
+            if ( first_task == true || curr_priority < min_priority_num ) {
+              min_priority_num = curr_priority;
+  	      highest_ready = &node->Base.Base;
+  	      first_task = false;
+  	      //In case this task is directly reachable from thread_CPU
+  	      node->invoker = curr_CPU; 
+  	    }
+          }
+        }
+      next = _Chain_Next( next );
+     }
+   }
+   
+   return highest_ready;
+}
+   
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
   Scheduler_Context *context,
-  Scheduler_Node    *ready_to_scheduled
+  Scheduler_Node    *filter
 )
 {
+	
+  //Implement the BFS Algorithm for task departure
+  //to get the highest ready task for a particular CPU
+  //return the highest ready Scheduler_Node and Scheduler_Node filter here points
+  // to the victim node that is blocked resulting which this function is called.
   Scheduler_strong_APA_Context *self;
-  Scheduler_strong_APA_Node    *node;
-  Priority_Control              insert_priority;
-
-  self = _Scheduler_strong_APA_Get_self( context );
-  node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
+  
+  Per_CPU_Control  *filter_cpu;
+  Scheduler_strong_APA_Node *node;
+  Scheduler_Node    *highest_ready;
+  Scheduler_Node *curr_node;
+  Scheduler_Node *next_node;
+  Scheduler_strong_APA_Struct *Struct;
+  
+  self=_Scheduler_strong_APA_Get_self( context );
+  //Denotes front and rear of the queue
+  uint32_t	front;
+  uint32_t	rear;
+  
+  uint32_t	cpu_max;
+  uint32_t	cpu_index;
+  
+  front = 0;
+  rear = -1;  
+
+  filter_cpu = _Thread_Get_CPU( filter->user );
+  Struct = self->Struct;
+  cpu_max = _SMP_Get_processor_maximum();
+  
+  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { 
+   Struct[ cpu_index ].visited = false;
+  }
+  
+  rear = rear + 1;
+  Struct[ rear ].cpu = filter_cpu;
+  Struct[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;	
+  
+  highest_ready = _Scheduler_strong_APA_Find_highest_ready(
+                    self,
+                    front,
+                    rear
+                  );
+  
+  if ( highest_ready != filter ) {  
+    //Backtrack on the path from
+    //filter_cpu to highest_ready, shifting along every task.
+    
+    node = _Scheduler_strong_APA_Node_downcast( highest_ready );
+    
+    if( node->invoker != filter_cpu ) {
+      // Highest ready is not just directly reachable from the victim cpu
+      // So there is need of task shifting 
+      
+      curr_node = &node->Base.Base;
+      next_node = _Thread_Scheduler_get_home_node( node->invoker->heir );
+    
+      _Scheduler_SMP_Enqueue_to_scheduled(
+        context,
+        curr_node,
+        _Thread_Scheduler_get_home_node( node->invoker->heir ),
+        _Scheduler_strong_APA_Allocate_processor
+      );
+     
+      node = _Scheduler_strong_APA_Node_downcast( next_node ); 
+    
+      while( node->invoker !=  filter_cpu ){
+        curr_node = &node->Base.Base;
+        next_node = _Thread_Scheduler_get_home_node( node->invoker->heir );
+    
+        _Scheduler_SMP_Preempt(
+          context,
+          curr_node,
+          _Thread_Scheduler_get_home_node( node->invoker->heir ),
+          _Scheduler_strong_APA_Allocate_processor
+      );
+     
+      node = _Scheduler_strong_APA_Node_downcast( next_node );      
+      }
+      //To save the last node so that the caller SMP_* function 
+      //can do the allocation
+    
+      curr_node = &node->Base.Base;
+      highest_ready = curr_node;  
+    }
+  }
+  
+  return highest_ready; 
+}
 
-  _Scheduler_priority_Ready_queue_extract(
-    &node->Base.Base.Node.Chain,
-    &node->Ready_queue,
-    &self->Bit_map
-  );
-  insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
-  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
-  _Chain_Insert_ordered_unprotected(
-    &self->Base.Scheduled,
-    &node->Base.Base.Node.Chain,
-    &insert_priority,
-    _Scheduler_SMP_Priority_less_equal
-  );
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
+  Scheduler_Context *context,
+  Scheduler_Node    *filter_base
+)
+{	
+  //Checks the lowest scheduled directly reachable task
+	
+  uint32_t	cpu_max;
+  uint32_t	cpu_index;
+
+  Thread_Control  *curr_thread;
+  Scheduler_Node  *curr_node;
+  Scheduler_Node  *lowest_scheduled;
+  Priority_Control max_priority_num;
+  Priority_Control curr_priority;
+  Scheduler_strong_APA_Node *filter_strong_node;  
+
+  lowest_scheduled = NULL; //To remove compiler warning.  
+  max_priority_num = 0;//Max (Lowest) priority encountered so far.
+  filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
+  
+  //lowest_scheduled is NULL if affinty of a node is 0
+  _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) );
+  cpu_max = _SMP_Get_processor_maximum();
+  
+  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { 
+  
+    //Checks if the CPU is in the affinity set of filter_strong_node
+    if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) {
+      Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+         
+      if ( _Per_CPU_Is_processor_online( cpu ) ) { 
+        curr_thread = cpu->heir;
+        curr_node = _Thread_Scheduler_get_home_node( curr_thread );
+        curr_priority = _Scheduler_Node_get_priority( curr_node );
+        curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); 
+        
+        if ( curr_priority > max_priority_num ) {
+          lowest_scheduled = curr_node;
+          max_priority_num = curr_priority;
+        }
+      }
+    }
+  }
+  
+  return lowest_scheduled;
 }
 
-static void _Scheduler_strong_APA_Insert_ready(
+static inline void _Scheduler_strong_APA_Extract_from_scheduled(
   Scheduler_Context *context,
-  Scheduler_Node    *node_base,
-  Priority_Control   insert_priority
+  Scheduler_Node    *node_to_extract
 )
 {
   Scheduler_strong_APA_Context *self;
   Scheduler_strong_APA_Node    *node;
 
   self = _Scheduler_strong_APA_Get_self( context );
-  node = _Scheduler_strong_APA_Node_downcast( node_base );
+  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
 
-  if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
-    _Scheduler_priority_Ready_queue_enqueue(
-      &node->Base.Base.Node.Chain,
-      &node->Ready_queue,
-      &self->Bit_map
-    );
-  } else {
-    _Scheduler_priority_Ready_queue_enqueue_first(
-      &node->Base.Base.Node.Chain,
-      &node->Ready_queue,
-      &self->Bit_map
-    );
-  }
+  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
+  //Not removing it from All_nodes since the node could go in the ready state.
 }
 
-static void _Scheduler_strong_APA_Extract_from_ready(
+static inline void _Scheduler_strong_APA_Extract_from_ready(
   Scheduler_Context *context,
-  Scheduler_Node    *the_thread
+  Scheduler_Node    *node_to_extract
 )
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_self( context );
-  Scheduler_strong_APA_Node *node =
-    _Scheduler_strong_APA_Node_downcast( the_thread );
-
-  _Scheduler_priority_Ready_queue_extract(
-    &node->Base.Base.Node.Chain,
-    &node->Ready_queue,
-    &self->Bit_map
-  );
+  Scheduler_strong_APA_Context *self;
+  Scheduler_strong_APA_Node    *node;
+
+  self = _Scheduler_strong_APA_Get_self( context );
+  node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
+ 
+  _Assert( !_Chain_Is_empty(self->All_nodes) );
+  _Assert( !_Chain_Is_node_off_chain( &node->Chain ) );
+   
+   _Chain_Extract_unprotected( &node->Chain );	//Removed from All_nodes
+   _Chain_Set_off_chain( &node->Chain );
 }
 
-static void _Scheduler_strong_APA_Do_update(
+static inline void _Scheduler_strong_APA_Insert_ready(
   Scheduler_Context *context,
-  Scheduler_Node *node_to_update,
-  Priority_Control new_priority
+  Scheduler_Node    *node_base,
+  Priority_Control   insert_priority
 )
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_self( context );
-  Scheduler_strong_APA_Node *node =
-    _Scheduler_strong_APA_Node_downcast( node_to_update );
-
-  _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
-  _Scheduler_priority_Ready_queue_update(
-    &node->Ready_queue,
-    SCHEDULER_PRIORITY_UNMAP( new_priority ),
-    &self->Bit_map,
-    &self->Ready[ 0 ]
-  );
-}
+  Scheduler_strong_APA_Context *self;
+  Scheduler_strong_APA_Node    *node;
 
-static Scheduler_strong_APA_Context *
-_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
-{
-  return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
+  self = _Scheduler_strong_APA_Get_self( context );
+  node = _Scheduler_strong_APA_Node_downcast( node_base );
+  
+  if(_Chain_Is_node_off_chain( &node->Chain ) )
+    _Chain_Append_unprotected( &self->All_nodes, &node->Chain );
 }
 
-void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
+static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
+  Scheduler_Context *context,
+  Scheduler_Node    *scheduled_to_ready
+)
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_context( scheduler );
+  Priority_Control insert_priority;
 
-  _Scheduler_SMP_Initialize( &self->Base );
-  _Priority_bit_map_Initialize( &self->Bit_map );
-  _Scheduler_priority_Ready_queue_initialize(
-    &self->Ready[ 0 ],
-    scheduler->maximum_priority
+  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
+  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
+  
+  _Scheduler_strong_APA_Insert_ready(
+    context,
+    scheduled_to_ready,
+    insert_priority
   );
 }
 
-void _Scheduler_strong_APA_Node_initialize(
-  const Scheduler_Control *scheduler,
-  Scheduler_Node          *node,
-  Thread_Control          *the_thread,
-  Priority_Control         priority
+static inline void  _Scheduler_strong_APA_Move_from_ready_to_scheduled(
+  Scheduler_Context *context,
+  Scheduler_Node    *ready_to_scheduled
 )
 {
-  Scheduler_Context            *context;
-  Scheduler_strong_APA_Context *self;
-  Scheduler_strong_APA_Node    *the_node;
-
-  the_node = _Scheduler_strong_APA_Node_downcast( node );
-  _Scheduler_SMP_Node_initialize(
-    scheduler,
-    &the_node->Base,
-    the_thread,
-    priority
-  );
+  Priority_Control insert_priority;
 
-  context = _Scheduler_Get_context( scheduler );
-  self = _Scheduler_strong_APA_Get_self( context );
-  _Scheduler_priority_Ready_queue_update(
-    &the_node->Ready_queue,
-    SCHEDULER_PRIORITY_UNMAP( priority ),
-    &self->Bit_map,
-    &self->Ready[ 0 ]
+  _Scheduler_strong_APA_Extract_from_ready( context, ready_to_scheduled );
+  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
+  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+  _Scheduler_SMP_Insert_scheduled(
+    context,
+    ready_to_scheduled,
+    insert_priority
   );
+  //Note: The node still stays in the All_nodes chain
 }
 
-static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
+static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
+  Scheduler_strong_APA_Context *self,
+  uint32_t                      front, 
+  uint32_t                      rear,
+  Per_CPU_Control             **cpu_to_preempt
+)
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_self( context );
-
-  return !_Priority_bit_map_Is_empty( &self->Bit_map );
+  Scheduler_Node              *lowest_reachable;
+  Priority_Control             max_priority_num;
+  uint32_t	               cpu_max;
+  uint32_t	               cpu_index;
+  Thread_Control              *curr_thread;
+  Per_CPU_Control             *curr_CPU;
+  Priority_Control             curr_priority;
+  Scheduler_Node              *curr_node;
+  Scheduler_strong_APA_Node   *curr_strong_node; //Current Strong_APA_Node     
+  Scheduler_strong_APA_Struct *Struct; 
+    
+  max_priority_num = 0;//Max (Lowest) priority encountered so far.
+  Struct = self->Struct;
+  cpu_max = _SMP_Get_processor_maximum();
+  
+  while( front <= rear ) {
+    curr_CPU = Struct[ front ].cpu; 
+    front = front + 1;
+    
+    curr_thread = curr_CPU->heir;
+    curr_node = _Thread_Scheduler_get_home_node( curr_thread );
+  
+    curr_priority = _Scheduler_Node_get_priority( curr_node );
+    curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); 
+    
+    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );  
+      
+    if ( curr_priority > max_priority_num ) {
+      lowest_reachable = curr_node;
+      max_priority_num = curr_priority;
+      *cpu_to_preempt = curr_CPU;
+    }
+    
+    if ( !curr_thread->is_idle ) {
+      for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+        if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { 
+          //Checks if the thread_CPU is in the affinity set of the node
+          Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+          if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) {
+            rear = rear + 1;
+            Struct[ rear ].cpu = cpu;
+            Struct[ cpu_index ].visited = true;
+            Struct[ cpu_index ].caller = curr_node;
+          }
+        }  
+      }
+    }
+  }
+  
+  return lowest_reachable;
 }
-
-static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
+  
+static inline bool _Scheduler_strong_APA_Do_enqueue(
   Scheduler_Context *context,
-  Scheduler_Node    *node
+  Scheduler_Node    *lowest_reachable,
+  Scheduler_Node    *node,
+  Priority_Control  insert_priority,
+  Per_CPU_Control  *cpu_to_preempt
 )
 {
-  Scheduler_strong_APA_Context *self =
-    _Scheduler_strong_APA_Get_self( context );
-
-  (void) node;
-
-  return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(
-    &self->Bit_map,
-    &self->Ready[ 0 ]
-  );
+  bool                          needs_help;
+  Priority_Control              node_priority;
+  Priority_Control              lowest_priority;
+  Scheduler_strong_APA_Struct  *Struct; 
+  Scheduler_Node               *curr_node;
+  Scheduler_strong_APA_Node    *curr_strong_node; //Current Strong_APA_Node
+  Per_CPU_Control              *curr_CPU;
+  Thread_Control               *next_thread;
+  Scheduler_strong_APA_Context *self;
+  Scheduler_Node               *next_node;
+            
+  self = _Scheduler_strong_APA_Get_self( context );
+  Struct = self->Struct;
+  
+  node_priority = _Scheduler_Node_get_priority( node );
+  node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); 
+  
+  lowest_priority =  _Scheduler_Node_get_priority( lowest_reachable );
+  lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); 
+  
+  if( lowest_priority > node_priority ) {
+    //Backtrack on the path from
+    //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting 
+    //along every task
+    
+    curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller;
+    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+    curr_strong_node->invoker = cpu_to_preempt;
+    
+    //Save which cpu to preempt in invoker value of the node
+    while( curr_node != node ) {	
+      curr_CPU = _Thread_Get_CPU( curr_node->user );
+      curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller;
+      curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+      curr_strong_node->invoker =  curr_CPU;
+     }
+   
+    next_thread = curr_strong_node->invoker->heir;
+    next_node = _Thread_Scheduler_get_home_node( next_thread );
+      
+    node_priority = _Scheduler_Node_get_priority( curr_node );
+    node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); 
+  
+    _Scheduler_SMP_Enqueue_to_scheduled(
+      context,
+      curr_node,
+      node_priority,
+      next_node,
+      _Scheduler_SMP_Insert_scheduled,
+      _Scheduler_strong_APA_Move_from_scheduled_to_ready,
+      _Scheduler_strong_APA_Allocate_processor
+    );
+    
+    curr_node = next_node;
+    curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+      
+    while( curr_node !=  lowest_reachable) {
+      next_thread = curr_strong_node->invoker->heir;
+      next_node = _Thread_Scheduler_get_home_node( next_thread );	
+      //curr_node preempts the next_node;
+      _Scheduler_SMP_Preempt(
+	context,
+	curr_node,
+	next_node,
+	_Scheduler_strong_APA_Allocate_processor
+      );
+      	
+      curr_node = next_node;
+      curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+    }
+    
+    _Scheduler_strong_APA_Move_from_scheduled_to_ready(context, lowest_reachable);
+    
+    needs_help = false;
+  } else {
+    needs_help = true;
+  }
+  
+  //Add it to All_nodes chain since it is now either scheduled or just ready.
+  _Scheduler_strong_APA_Insert_ready(context,node,insert_priority);
+  
+  return needs_help;
 }
 
-void _Scheduler_strong_APA_Block(
-  const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
-  Scheduler_Node          *node
+static inline bool _Scheduler_strong_APA_Enqueue(
+  Scheduler_Context *context,
+  Scheduler_Node    *node,
+  Priority_Control   insert_priority
 )
 {
-  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+  //Idea: BFS Algorithm for task arrival
+  //Enqueue node either in the scheduled chain or in the ready chain  
+  //node is the newly arrived node and is not scheduled.
+  Scheduler_strong_APA_Context *self;
+  Scheduler_strong_APA_Struct  *Struct; 
+  uint32_t	                cpu_max;
+  uint32_t              	cpu_index;
+  Per_CPU_Control              *cpu_to_preempt;
+  Scheduler_Node               *lowest_reachable;
+  Scheduler_strong_APA_Node    *strong_node;  
+
+  //Denotes front and rear of the queue
+  uint32_t	front;	
+  uint32_t	rear;
+  
+  front = 0;
+  rear = -1;  
 
-  _Scheduler_SMP_Block(
+  self = _Scheduler_strong_APA_Get_self( context );
+  Struct = self->Struct;
+       
+  strong_node = _Scheduler_strong_APA_Node_downcast( node );
+  cpu_max = _SMP_Get_processor_maximum();
+  
+  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { 
+    Struct[ cpu_index ].visited = false;
+    
+    //Checks if the thread_CPU is in the affinity set of the node
+    if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index) ) { 
+      Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+         
+      if ( _Per_CPU_Is_processor_online( cpu ) ) {
+        rear = rear + 1;
+        Struct[ rear ].cpu = cpu;
+        Struct[ cpu_index ].visited = true;
+        Struct[ cpu_index ].caller = node;
+      }
+    }
+  }
+  
+  //This assert makes sure that there always exist an element in the 
+  // Queue when we start the queue traversal. 
+  _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) );
+  
+  lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable(
+                       self,
+                       front,
+                       rear,
+                       &cpu_to_preempt
+                     );
+  
+  return _Scheduler_strong_APA_Do_enqueue(
+           context,
+           lowest_reachable,
+           node,
+           insert_priority,
+           cpu_to_preempt
+        );
+}
+
+static inline bool _Scheduler_strong_APA_Enqueue_scheduled(
+  Scheduler_Context *context,
+  Scheduler_Node    *node,
+  Priority_Control   insert_priority
+)
+{	
+  return _Scheduler_SMP_Enqueue_scheduled(
     context,
-    the_thread,
     node,
-    _Scheduler_SMP_Extract_from_scheduled,
+    insert_priority,
+    _Scheduler_SMP_Priority_less_equal,
     _Scheduler_strong_APA_Extract_from_ready,
     _Scheduler_strong_APA_Get_highest_ready,
+    _Scheduler_strong_APA_Insert_ready,
+    _Scheduler_SMP_Insert_scheduled,
     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_strong_APA_Allocate_processor
   );
 }
 
-static bool _Scheduler_strong_APA_Enqueue(
+static inline bool _Scheduler_strong_APA_Do_ask_for_help(
   Scheduler_Context *context,
-  Scheduler_Node    *node,
-  Priority_Control   insert_priority
+  Thread_Control    *the_thread,
+  Scheduler_Node    *node
 )
 {
-  return _Scheduler_SMP_Enqueue(
+  return _Scheduler_SMP_Ask_for_help(
     context,
+    the_thread,
     node,
-    insert_priority,
     _Scheduler_SMP_Priority_less_equal,
     _Scheduler_strong_APA_Insert_ready,
     _Scheduler_SMP_Insert_scheduled,
     _Scheduler_strong_APA_Move_from_scheduled_to_ready,
-    _Scheduler_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_strong_APA_Get_lowest_scheduled,
+    _Scheduler_strong_APA_Allocate_processor
   );
 }
 
-static bool _Scheduler_strong_APA_Enqueue_scheduled(
+static inline void _Scheduler_strong_APA_Register_idle(
   Scheduler_Context *context,
-  Scheduler_Node    *node,
-  Priority_Control  insert_priority
+  Scheduler_Node    *idle_base,
+  Per_CPU_Control   *cpu
 )
 {
-  return _Scheduler_SMP_Enqueue_scheduled(
+  (void) context;
+  (void) idle_base;
+  (void) cpu;
+  //We do not maintain a variable to access the scheduled
+  //node for a CPU. So this function does nothing.
+}
+
+static inline  void  _Scheduler_strong_APA_Do_set_affinity(
+  Scheduler_Context *context,
+  Scheduler_Node    *node_base,
+  void              *arg
+)
+{
+  Scheduler_strong_APA_Node *node;
+  const Processor_mask       *affinity;
+
+  node = _Scheduler_strong_APA_Node_downcast( node_base );
+  affinity = arg;
+  node->Affinity = *affinity;
+}
+
+void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
+{
+  Scheduler_strong_APA_Context *self =
+    _Scheduler_strong_APA_Get_context( scheduler );
+
+  _Scheduler_SMP_Initialize( &self->Base );
+  _Chain_Initialize_empty( &self->All_nodes );
+}
+
+void _Scheduler_strong_APA_Yield(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Scheduler_Node          *node
+)
+{
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Yield(
     context,
+    thread,
     node,
-    insert_priority,
-    _Scheduler_SMP_Priority_less_equal,
     _Scheduler_strong_APA_Extract_from_ready,
-    _Scheduler_strong_APA_Get_highest_ready,
-    _Scheduler_strong_APA_Insert_ready,
-    _Scheduler_SMP_Insert_scheduled,
-    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_exact
+    _Scheduler_strong_APA_Enqueue,
+    _Scheduler_strong_APA_Enqueue_scheduled
   );
 }
 
-void _Scheduler_strong_APA_Unblock(
+void _Scheduler_strong_APA_Block(
   const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
+  Thread_Control          *thread,
   Scheduler_Node          *node
 )
 {
   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
-
-  _Scheduler_SMP_Unblock(
+//The extract from ready automatically removes the node from All_nodes chain.
+  _Scheduler_SMP_Block(
     context,
-    the_thread,
+    thread,
     node,
-    _Scheduler_strong_APA_Do_update,
-    _Scheduler_strong_APA_Enqueue
+    _Scheduler_strong_APA_Extract_from_scheduled,
+    _Scheduler_strong_APA_Extract_from_ready,
+    _Scheduler_strong_APA_Get_highest_ready,
+    _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+    _Scheduler_strong_APA_Allocate_processor
   );
 }
 
-static bool _Scheduler_strong_APA_Do_ask_for_help(
-  Scheduler_Context *context,
-  Thread_Control    *the_thread,
-  Scheduler_Node    *node
+void _Scheduler_strong_APA_Unblock(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Scheduler_Node          *node
 )
 {
-  return _Scheduler_SMP_Ask_for_help(
+  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Unblock(
     context,
-    the_thread,
+    thread,
     node,
-    _Scheduler_SMP_Priority_less_equal,
-    _Scheduler_strong_APA_Insert_ready,
-    _Scheduler_SMP_Insert_scheduled,
-    _Scheduler_strong_APA_Move_from_scheduled_to_ready,
-    _Scheduler_SMP_Get_lowest_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_strong_APA_Do_update,
+    _Scheduler_strong_APA_Enqueue
   );
 }
 
 void _Scheduler_strong_APA_Update_priority(
   const Scheduler_Control *scheduler,
-  Thread_Control          *the_thread,
+  Thread_Control          *thread,
   Scheduler_Node          *node
 )
 {
@@ -327,7 +772,7 @@ void _Scheduler_strong_APA_Update_priority(
 
   _Scheduler_SMP_Update_priority(
     context,
-    the_thread,
+    thread,
     node,
     _Scheduler_strong_APA_Extract_from_ready,
     _Scheduler_strong_APA_Do_update,
@@ -345,7 +790,11 @@ bool _Scheduler_strong_APA_Ask_for_help(
 {
   Scheduler_Context *context = _Scheduler_Get_context( scheduler );
 
-  return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node );
+  return _Scheduler_strong_APA_Do_ask_for_help(
+    context,
+    the_thread,
+    node
+  );
 }
 
 void _Scheduler_strong_APA_Reconsider_help_request(
@@ -381,7 +830,7 @@ void _Scheduler_strong_APA_Withdraw_node(
     _Scheduler_strong_APA_Extract_from_ready,
     _Scheduler_strong_APA_Get_highest_ready,
     _Scheduler_strong_APA_Move_from_ready_to_scheduled,
-    _Scheduler_SMP_Allocate_processor_lazy
+    _Scheduler_strong_APA_Allocate_processor
   );
 }
 
@@ -397,7 +846,7 @@ void _Scheduler_strong_APA_Add_processor(
     idle,
     _Scheduler_strong_APA_Has_ready,
     _Scheduler_strong_APA_Enqueue_scheduled,
-    _Scheduler_SMP_Do_nothing_register_idle
+    _Scheduler_strong_APA_Register_idle
   );
 }
 
@@ -416,20 +865,83 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(
   );
 }
 
-void _Scheduler_strong_APA_Yield(
+void _Scheduler_strong_APA_Node_initialize(
   const Scheduler_Control *scheduler,
+  Scheduler_Node          *node,
   Thread_Control          *the_thread,
-  Scheduler_Node          *node
+  Priority_Control         priority
 )
 {
-  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+  Scheduler_SMP_Node *smp_node;
+  Scheduler_strong_APA_Node *strong_node;
+  
+  smp_node = _Scheduler_SMP_Node_downcast( node );  
+  strong_node = _Scheduler_strong_APA_Node_downcast( node );
+  
+  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
+  
+  _Processor_mask_Assign(
+    &strong_node->Affinity,
+   _SMP_Get_online_processors()
+  );
+}
 
-  _Scheduler_SMP_Yield(
+void _Scheduler_strong_APA_Start_idle(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *idle,
+  Per_CPU_Control         *cpu
+)
+{
+  Scheduler_Context *context;
+
+  context = _Scheduler_Get_context( scheduler );
+
+  _Scheduler_SMP_Do_start_idle(
     context,
-    the_thread,
-    node,
-    _Scheduler_strong_APA_Extract_from_ready,
-    _Scheduler_strong_APA_Enqueue,
-    _Scheduler_strong_APA_Enqueue_scheduled
+    idle,
+    cpu,
+    _Scheduler_strong_APA_Register_idle
   );
 }
+
+bool _Scheduler_strong_APA_Set_affinity(
+  const Scheduler_Control *scheduler,
+  Thread_Control          *thread,
+  Scheduler_Node          *node_base,
+  const Processor_mask    *affinity
+)
+{
+  Scheduler_Context         *context;
+  Scheduler_strong_APA_Node *node;
+  Processor_mask             local_affinity;
+ 
+  context = _Scheduler_Get_context( scheduler );
+  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
+
+  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
+    return false;
+  }
+
+  node = _Scheduler_strong_APA_Node_downcast( node_base );
+
+  if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
+    return true;	//Nothing to do. Return true.
+
+ _Processor_mask_Assign( &node->Affinity, &local_affinity );
+
+ _Scheduler_SMP_Set_affinity(
+   context,
+   thread,
+   node_base,
+   &local_affinity,
+   _Scheduler_strong_APA_Do_set_affinity,
+   _Scheduler_strong_APA_Extract_from_ready,		
+   _Scheduler_strong_APA_Get_highest_ready,	
+   _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+   _Scheduler_strong_APA_Enqueue,
+   _Scheduler_strong_APA_Allocate_processor
+ );
+ 
+  return true;
+}
+
-- 
2.17.1



More information about the devel mailing list