<div dir="ltr">Hi,<div><br></div><div>Thanks for your reviews so far. This patch implements the reviews suggested in the following links:</div><div><br></div><div><a href="https://lists.rtems.org/pipermail/devel/2020-August/061578.html">https://lists.rtems.org/pipermail/devel/2020-August/061578.html</a><br></div><div><a href="https://github.com/richidubey/rtems/pull/7/files">https://github.com/richidubey/rtems/pull/7/files</a><br></div><div><br></div><div>and my comments on resolving the issues are here: <a href="https://lists.rtems.org/pipermail/devel/2020-August/061614.html">https://lists.rtems.org/pipermail/devel/2020-August/061614.html</a></div><div><br></div><div>Please let me know how to improve this patch.</div><div><br></div><div>Thanks,</div><div>Richi.</div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Thu, Aug 27, 2020 at 7:10 PM Richi Dubey <<a href="mailto:richidubey@gmail.com">richidubey@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><div dir="ltr">From 18982f8084f8d603ea0492e809d759d3c09ea263 Mon Sep 17 00:00:00 2001<br>From: Richi Dubey <<a href="mailto:richidubey@gmail.com" target="_blank">richidubey@gmail.com</a>><br>Date: Thu, 27 Aug 2020 19:07:46 +0530<br>Subject: [PATCH v5] Pre-Release Strong-APA<br><br>---<br> cpukit/include/rtems/scheduler.h | 6 +-<br> .../include/rtems/score/schedulerstrongapa.h | 152 ++-<br> cpukit/score/src/schedulerstrongapa.c | 893 ++++++++++++++----<br> 3 files changed, 820 insertions(+), 231 deletions(-)<br><br>diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h<br>index 955a83cfb4..6a05c2798a 100644<br>--- a/cpukit/include/rtems/scheduler.h<br>+++ b/cpukit/include/rtems/scheduler.h<br>@@ -257,16 +257,14 @@<br> #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \<br> static struct { \<br> Scheduler_strong_APA_Context Base; \<br>- Chain_Control Ready[ ( prio_count ) ]; \<br>+ Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; \<br> } SCHEDULER_STRONG_APA_CONTEXT_NAME( name )<br> <br> #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \<br> { \<br> &SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \<br> SCHEDULER_STRONG_APA_ENTRY_POINTS, \<br>- RTEMS_ARRAY_SIZE( \<br>- SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \<br>- ) - 1, \<br>+ SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \<br> ( obj_name ) \<br> SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \<br> }<br>diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h<br>index 0ac28cb439..cfd79e932a 100644<br>--- a/cpukit/include/rtems/score/schedulerstrongapa.h<br>+++ b/cpukit/include/rtems/score/schedulerstrongapa.h<br>@@ -6,31 +6,47 @@<br> * @brief Strong APA Scheduler API<br> */<br> <br>-/*<br>- * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved.<br>+/* SPDX-License-Identifier: BSD-2-Clause<br> *<br>- * embedded brains GmbH<br>- * Dornierstr. 4<br>- * 82178 Puchheim<br>- * Germany<br>- * <<a href="mailto:rtems@embedded-brains.de" target="_blank">rtems@embedded-brains.de</a>><br>+ * Copyright (C) 2020 Richi Dubey<br>+ * Copyright (c) 2013, 2018 embedded brains GmbH<br> *<br>- * The license and distribution terms for this file may be<br>- * found in the file LICENSE in this distribution or at<br>- * <a href="http://www.rtems.org/license/LICENSE" target="_blank">http://www.rtems.org/license/LICENSE</a>.<br>+ * Redistribution and use in source and binary forms, with or without<br>+ * modification, are permitted provided that the following conditions<br>+ * are met:<br>+ * 1. Redistributions of source code must retain the above copyright<br>+ * notice, this list of conditions and the following disclaimer.<br>+ * 2. Redistributions in binary form must reproduce the above copyright<br>+ * notice, this list of conditions and the following disclaimer in the<br>+ * documentation and/or other materials provided with the distribution.<br>+ *<br>+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"<br>+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE<br>+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE<br>+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE<br>+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR<br>+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF<br>+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS<br>+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN<br>+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)<br>+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE<br>+ * POSSIBILITY OF SUCH DAMAGE.<br> */<br> <br> #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H<br> #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H<br> <br> #include <rtems/score/scheduler.h><br>-#include <rtems/score/schedulerpriority.h><br> #include <rtems/score/schedulersmp.h><br>+#include <rtems/score/percpu.h><br> <br> #ifdef __cplusplus<br> extern "C" {<br> #endif /* __cplusplus */<br> <br>+#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \<br>+ RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node )<br>+<br> /**<br> * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler<br> *<br>@@ -38,42 +54,96 @@ extern "C" {<br> *<br> * @brief Strong APA Scheduler<br> *<br>- * This is an implementation of the global fixed priority scheduler (G-FP). It<br>- * uses one ready chain per priority to ensure constant time insert operations.<br>- * The scheduled chain uses linear insert operations and has at most processor<br>- * count entries. Since the processor and priority count are constants all<br>- * scheduler operations complete in a bounded execution time.<br>- *<br>- * The the_thread preempt mode will be ignored.<br>+ * This is an implementation of the Strong APA scheduler defined by<br>+ * Cerqueira et al. in Linux's Processor Affinity API, Refined:<br>+ * Shifting Real-Time Tasks Towards Higher Schedulability.<br> *<br>+ * The scheduled and ready nodes are accessed via the<br>+ * Scheduler_strong_APA_Context::Ready which helps in backtracking when a<br>+ * node which is executing on a CPU gets blocked. New node is allocated to<br>+ * the cpu by checking all the executing nodes in the affinity set of the<br>+ * node and the subsequent nodes executing on the processors in its<br>+ * affinity set.<br> * @{<br> */<br> <br> /**<br>- * @brief Scheduler context specialization for Strong APA<br>- * schedulers.<br>+ * @brief Scheduler node specialization for Strong APA schedulers.<br> */<br> typedef struct {<br>- Scheduler_SMP_Context Base;<br>- Priority_bit_map_Control Bit_map;<br>- Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];<br>-} Scheduler_strong_APA_Context;<br>+ /**<br>+ * @brief SMP scheduler node.<br>+ */<br>+ Scheduler_SMP_Node Base;<br>+<br>+ /**<br>+ * @brief Chain node for Scheduler_strong_APA_Context::Ready.<br>+ */<br>+ Chain_Node Ready_node;<br>+<br>+ /**<br>+ * @brief CPU that this node would preempt in the backtracking part of<br>+ * _Scheduler_strong_APA_Get_highest_ready and<br>+ * _Scheduler_strong_APA_Do_Enqueue.<br>+ */<br>+ Per_CPU_Control *cpu_to_preempt;<br>+<br>+ /**<br>+ * @brief The associated affinity set of this node.<br>+ */<br>+ Processor_mask Affinity;<br>+} Scheduler_strong_APA_Node;<br>+<br> <br> /**<br>- * @brief Scheduler node specialization for Strong APA<br>- * schedulers.<br>+ * @brief CPU related variables and a CPU_Control to implement BFS.<br>+ */<br>+typedef struct<br>+{<br>+ /**<br>+ * @brief CPU in a queue.<br>+ */<br>+ Per_CPU_Control *cpu;<br>+<br>+ /**<br>+ * @brief The node that would preempt this CPU.<br>+ */<br>+ Scheduler_Node *preempting_node;<br>+<br>+ /**<br>+ * @brief Whether or not this cpu has been added to the queue<br>+ * (visited in BFS).<br>+ */<br>+ bool visited;<br>+<br>+ /**<br>+ * @brief The node currently executing on this cpu.<br>+ */<br>+ Scheduler_Node *executing;<br>+} Scheduler_strong_APA_CPU;<br>+<br>+ /**<br>+ * @brief Scheduler context and node definition for Strong APA scheduler.<br> */<br> typedef struct {<br>+ /**<br>+ * @brief @see Scheduler_SMP_Context.<br>+ */<br>+ Scheduler_SMP_Context Base;<br>+<br> /**<br>- * @brief SMP scheduler node.<br>+ * @brief Chain of all the ready and scheduled nodes present in<br>+ * the Strong APA scheduler.<br> */<br>- Scheduler_SMP_Node Base;<br>+ Chain_Control Ready;<br> <br> /**<br>- * @brief The associated ready queue of this node.<br>+ * @brief Struct with important variables for each cpu.<br> */<br>- Scheduler_priority_Ready_queue Ready_queue;<br>-} Scheduler_strong_APA_Node;<br>+ Scheduler_strong_APA_CPU CPU[ RTEMS_ZERO_LENGTH_ARRAY ];<br>+} Scheduler_strong_APA_Context;<br>+<br>+#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255<br> <br> /**<br> * @brief Entry points for the Strong APA Scheduler.<br>@@ -100,8 +170,8 @@ typedef struct {<br> _Scheduler_default_Release_job, \<br> _Scheduler_default_Cancel_job, \<br> _Scheduler_default_Tick, \<br>- _Scheduler_SMP_Start_idle \<br>- SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \<br>+ _Scheduler_SMP_Start_idle, \<br>+ _Scheduler_strong_APA_Set_affinity \<br> }<br> <br> /**<br>@@ -168,7 +238,7 @@ void _Scheduler_strong_APA_Update_priority(<br> /**<br> * @brief Asks for help.<br> *<br>- * @param scheduler The scheduler control instance.<br>+ * @param scheduler The scheduler control instance.<br> * @param the_thread The thread that asks for help.<br> * @param node The node of @a the_thread.<br> *<br>@@ -246,6 +316,20 @@ void _Scheduler_strong_APA_Yield(<br> Scheduler_Node *node<br> );<br> <br>+/**<br>+ * @brief Sets the affinity .<br>+ *<br>+ * @param scheduler The scheduler control instance.<br>+ * @param the_thread The thread to yield.<br>+ * @param[in, out] node The node of @a the_thread.<br>+ */<br>+bool _Scheduler_strong_APA_Set_affinity(<br>+ const Scheduler_Control *scheduler,<br>+ Thread_Control *thread,<br>+ Scheduler_Node *node_base,<br>+ const Processor_mask *affinity<br>+);<br>+<br> /** @} */<br> <br> #ifdef __cplusplus<br>diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c<br>index 924cd86412..4d4e38bf0d 100644<br>--- a/cpukit/score/src/schedulerstrongapa.c<br>+++ b/cpukit/score/src/schedulerstrongapa.c<br>@@ -6,18 +6,31 @@<br> * @brief Strong APA Scheduler Implementation<br> */<br> <br>-/*<br>- * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.<br>+/* SPDX-License-Identifier: BSD-2-Clause<br>+ *<br>+ * Copyright (C) 2020 Richi Dubey<br>+ * Copyright (c) 2013, 2018 embedded brains GmbH<br> *<br>- * embedded brains GmbH<br>- * Dornierstr. 4<br>- * 82178 Puchheim<br>- * Germany<br>- * <<a href="mailto:rtems@embedded-brains.de" target="_blank">rtems@embedded-brains.de</a>><br>+ * Redistribution and use in source and binary forms, with or without<br>+ * modification, are permitted provided that the following conditions<br>+ * are met:<br>+ * 1. Redistributions of source code must retain the above copyright<br>+ * notice, this list of conditions and the following disclaimer.<br>+ * 2. Redistributions in binary form must reproduce the above copyright<br>+ * notice, this list of conditions and the following disclaimer in the<br>+ * documentation and/or other materials provided with the distribution.<br> *<br>- * The license and distribution terms for this file may be<br>- * found in the file LICENSE in this distribution or at<br>- * <a href="http://www.rtems.org/license/LICENSE" target="_blank">http://www.rtems.org/license/LICENSE</a>.<br>+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"<br>+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE<br>+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE<br>+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE<br>+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR<br>+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF<br>+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS<br>+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN<br>+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)<br>+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE<br>+ * POSSIBILITY OF SUCH DAMAGE.<br> */<br> <br> #ifdef HAVE_CONFIG_H<br>@@ -25,301 +38,746 @@<br> #endif<br> <br> #include <rtems/score/schedulerstrongapa.h><br>-#include <rtems/score/schedulerpriorityimpl.h><br> #include <rtems/score/schedulersmpimpl.h><br>+#include <rtems/score/assert.h><br> <br>-static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self(<br>- Scheduler_Context *context<br>-)<br>+static inline Scheduler_strong_APA_Context *<br>+_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )<br>+{<br>+ return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );<br>+}<br>+<br>+static inline Scheduler_strong_APA_Context *<br>+_Scheduler_strong_APA_Get_self( Scheduler_Context *context )<br> {<br> return (Scheduler_strong_APA_Context *) context;<br> }<br> <br>-static Scheduler_strong_APA_Node *<br>+static inline Scheduler_strong_APA_Node *<br> _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )<br> {<br> return (Scheduler_strong_APA_Node *) node;<br> }<br> <br>-static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(<br>+static inline void _Scheduler_strong_APA_Do_update(<br> Scheduler_Context *context,<br>- Scheduler_Node *scheduled_to_ready<br>+ Scheduler_Node *node,<br>+ Priority_Control new_priority<br> )<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_self( context );<br>- Scheduler_strong_APA_Node *node =<br>- _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );<br>-<br>- _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );<br>- _Scheduler_priority_Ready_queue_enqueue_first(<br>- &node->Base.Base.Node.Chain,<br>- &node->Ready_queue,<br>- &self->Bit_map<br>- );<br>+ Scheduler_SMP_Node *smp_node;<br>+ (void) context;<br>+<br>+ smp_node = _Scheduler_SMP_Node_downcast( node );<br>+ _Scheduler_SMP_Node_update_priority( smp_node, new_priority );<br> }<br> <br>-static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(<br>+/*<br>+ * Returns true if the Strong APA scheduler has ready nodes<br>+ * available for scheduling.<br>+ */<br>+static inline bool<br>+ _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )<br>+{<br>+ Scheduler_strong_APA_Context *self;<br>+ const Chain_Node *tail;<br>+ Chain_Node *next;<br>+ Scheduler_strong_APA_Node *node;<br>+<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ tail = _Chain_Immutable_tail( &self->Ready );<br>+ next = _Chain_First( &self->Ready );<br>+<br>+ while ( next != tail ) {<br>+ node = (Scheduler_strong_APA_Node *) STRONG_SCHEDULER_NODE_OF_CHAIN( next );<br>+<br>+ if (<br>+ _Scheduler_SMP_Node_state( &node->Base.Base ) ==<br>+ SCHEDULER_SMP_NODE_READY<br>+ ) {<br>+ return true;<br>+ }<br>+<br>+ next = _Chain_Next( next );<br>+ }<br>+<br>+ return false;<br>+}<br>+<br>+static inline void _Scheduler_strong_APA_Allocate_processor(<br> Scheduler_Context *context,<br>- Scheduler_Node *ready_to_scheduled<br>+ Scheduler_Node *scheduled_base,<br>+ Scheduler_Node *victim_base,<br>+ Per_CPU_Control *victim_cpu<br> )<br> {<br>+ Scheduler_strong_APA_Node *scheduled;<br> Scheduler_strong_APA_Context *self;<br>- Scheduler_strong_APA_Node *node;<br>- Priority_Control insert_priority;<br> <br>+ (void) victim_base;<br>+<br>+ scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );<br> self = _Scheduler_strong_APA_Get_self( context );<br>- node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );<br> <br>- _Scheduler_priority_Ready_queue_extract(<br>- &node->Base.Base.Node.Chain,<br>- &node->Ready_queue,<br>- &self->Bit_map<br>+ self->CPU[ _Per_CPU_Get_index( victim_cpu ) ].executing = scheduled_base;<br>+<br>+ _Scheduler_SMP_Allocate_processor_exact(<br>+ context,<br>+ &( scheduled->Base.Base ),<br>+ NULL,<br>+ victim_cpu<br> );<br>- insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );<br>+}<br>+<br>+/*<br>+ * Finds and returns the highest ready node present by accessing the<br>+ * _Strong_APA_Context->CPU with front and rear values.<br>+ */<br>+static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(<br>+ Scheduler_strong_APA_Context *self,<br>+ uint32_t front,<br>+ uint32_t rear<br>+)<br>+{<br>+ Scheduler_Node *highest_ready;<br>+ Scheduler_strong_APA_CPU *CPU;<br>+ const Chain_Node *tail;<br>+ Chain_Node *next;<br>+ uint32_t index_assigned_cpu;<br>+ uint32_t index_curr_cpu;<br>+ Scheduler_strong_APA_Node *node;<br>+ Priority_Control min_priority_num;<br>+ Priority_Control curr_priority;<br>+ Per_CPU_Control *assigned_cpu;<br>+ Scheduler_SMP_Node_state curr_state;<br>+ Per_CPU_Control *curr_CPU;<br>+ bool first_task;<br>+<br>+ CPU = self->CPU;<br>+ /*<br>+ * When the first task accessed has nothing to compare its priority against<br>+ * So, it is the task with the highest priority witnessed so far.<br>+ */<br>+ first_task = true;<br>+<br>+ _Assert( rear < CONFIGURE_MAXIMUM_PROCESSOR );<br>+<br>+ while( front <= rear ) {<br>+ curr_CPU = CPU[ front ].cpu;<br>+ front = front + 1;<br>+<br>+ tail = _Chain_Immutable_tail( &self->Ready );<br>+ next = _Chain_First( &self->Ready );<br>+<br>+ while ( next != tail ) {<br>+ node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );<br>+ /* Check if the curr_CPU is in the affinity set of the node. */<br>+ index_curr_cpu = _Per_CPU_Get_index( curr_CPU );<br>+ if (<br>+ _Processor_mask_Is_set( &node->Affinity, index_curr_cpu )<br>+ ) {<br>+ curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );<br>+<br>+ if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {<br>+ assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );<br>+ index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu );<br>+<br>+ if ( CPU[ index_assigned_cpu ].visited == false ) {<br>+ rear = rear + 1;<br>+ CPU[ rear ].cpu = assigned_cpu;<br>+ CPU[ index_assigned_cpu ].visited = true;<br>+ /*<br>+ * The curr CPU of the queue invoked this node to add its CPU<br>+ * that it is executing on to the queue. So this node might get<br>+ * preempted because of the invoker curr_CPU and this curr_CPU<br>+ * is the CPU that node should preempt in case this node<br>+ * gets preempted.<br>+ */<br>+ node->cpu_to_preempt = curr_CPU;<br>+ }<br>+ }<br>+ else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {<br>+ curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );<br>+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );<br>+<br>+ if ( first_task == true || curr_priority < min_priority_num ) {<br>+ min_priority_num = curr_priority;<br>+ highest_ready = &node->Base.Base;<br>+ first_task = false;<br>+ /*<br>+ * In case curr_CPU is filter_CPU, we need to store the<br>+ * cpu_to_preempt value so that we go back to SMP_*<br>+ * function, rather than preempting the node ourselves.<br>+ */<br>+ node->cpu_to_preempt = curr_CPU;<br>+ }<br>+ }<br>+ }<br>+ next = _Chain_Next( next );<br>+ }<br>+ }<br>+<br>+ return highest_ready;<br>+}<br>+<br>+static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *ready_to_scheduled<br>+)<br>+{<br>+ Priority_Control insert_priority;<br>+<br>+ insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );<br> insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );<br>- _Chain_Insert_ordered_unprotected(<br>- &self->Base.Scheduled,<br>- &node->Base.Base.Node.Chain,<br>- &insert_priority,<br>- _Scheduler_SMP_Priority_less_equal<br>+ _Scheduler_SMP_Insert_scheduled(<br>+ context,<br>+ ready_to_scheduled,<br>+ insert_priority<br> );<br> }<br> <br>-static void _Scheduler_strong_APA_Insert_ready(<br>+/*<br>+ * Implement the BFS Algorithm for task departure to get the highest ready task<br>+ * for a particular CPU, returns the highest ready Scheduler_Node<br>+ * Scheduler_Node filter here pointst to the victim node that is blocked<br>+ * resulting which this function is called.<br>+ */<br>+static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(<br> Scheduler_Context *context,<br>- Scheduler_Node *node_base,<br>- Priority_Control insert_priority<br>+ Scheduler_Node *filter<br> )<br> {<br> Scheduler_strong_APA_Context *self;<br>+ Per_CPU_Control *filter_cpu;<br> Scheduler_strong_APA_Node *node;<br>+ Scheduler_Node *highest_ready;<br>+ Scheduler_Node *curr_node;<br>+ Scheduler_Node *next_node;<br>+ Scheduler_strong_APA_CPU *CPU;<br>+ uint32_t front;<br>+ uint32_t rear;<br>+ uint32_t cpu_max;<br>+ uint32_t cpu_index;<br> <br> self = _Scheduler_strong_APA_Get_self( context );<br>- node = _Scheduler_strong_APA_Node_downcast( node_base );<br>+ /* Denotes front and rear of the queue */<br>+ front = 0;<br>+ rear = -1;<br> <br>- if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {<br>- _Scheduler_priority_Ready_queue_enqueue(<br>- &node->Base.Base.Node.Chain,<br>- &node->Ready_queue,<br>- &self->Bit_map<br>- );<br>- } else {<br>- _Scheduler_priority_Ready_queue_enqueue_first(<br>- &node->Base.Base.Node.Chain,<br>- &node->Ready_queue,<br>- &self->Bit_map<br>- );<br>+ filter_cpu = _Thread_Get_CPU( filter->user );<br>+ CPU = self->CPU;<br>+ cpu_max = _SMP_Get_processor_maximum();<br>+<br>+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {<br>+ CPU[ cpu_index ].visited = false;<br> }<br>+<br>+ rear = rear + 1;<br>+ CPU[ rear ].cpu = filter_cpu;<br>+ CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;<br>+<br>+ highest_ready = _Scheduler_strong_APA_Find_highest_ready(<br>+ self,<br>+ front,<br>+ rear<br>+ );<br>+<br>+ if ( highest_ready != filter ) {<br>+ /*<br>+ * Backtrack on the path from<br>+ * filter_cpu to highest_ready, shifting along every task.<br>+ */<br>+<br>+ node = _Scheduler_strong_APA_Node_downcast( highest_ready );<br>+ /*<br>+ * Highest ready is not just directly reachable from the victim cpu<br>+ * So there is need for task shifting .<br>+ */<br>+ while ( node->cpu_to_preempt != filter_cpu ) {<br>+ curr_node = &node->Base.Base;<br>+ next_node = CPU[ _Per_CPU_Get_index( node->cpu_to_preempt ) ].executing;<br>+<br>+ _Scheduler_SMP_Preempt(<br>+ context,<br>+ curr_node,<br>+ next_node,<br>+ _Scheduler_strong_APA_Allocate_processor<br>+ );<br>+<br>+ if( curr_node == highest_ready ) {<br>+ _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node );<br>+ }<br>+<br>+ node = _Scheduler_strong_APA_Node_downcast( next_node );<br>+ }<br>+ /*<br>+ * To save the last node so that the caller SMP_* function<br>+ * can do the allocation<br>+ */<br>+ curr_node = &node->Base.Base;<br>+ highest_ready = curr_node;<br>+ }<br>+<br>+ return highest_ready;<br> }<br> <br>-static void _Scheduler_strong_APA_Extract_from_ready(<br>+/*<br>+ * Checks the lowest scheduled directly reachable task<br>+ */<br>+static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(<br> Scheduler_Context *context,<br>- Scheduler_Node *the_thread<br>+ Scheduler_Node *filter_base<br> )<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_self( context );<br>- Scheduler_strong_APA_Node *node =<br>- _Scheduler_strong_APA_Node_downcast( the_thread );<br>-<br>- _Scheduler_priority_Ready_queue_extract(<br>- &node->Base.Base.Node.Chain,<br>- &node->Ready_queue,<br>- &self->Bit_map<br>- );<br>+ uint32_t cpu_max;<br>+ uint32_t cpu_index;<br>+ Scheduler_Node *curr_node;<br>+ Scheduler_Node *lowest_scheduled;<br>+ Priority_Control max_priority_num;<br>+ Priority_Control curr_priority;<br>+ Scheduler_strong_APA_Node *filter_strong_node;<br>+ Scheduler_strong_APA_Context *self;<br>+<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ lowest_scheduled = NULL; /* To remove compiler warning */<br>+ max_priority_num = 0; /* Max (Lowest) priority encountered so far */<br>+ filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );<br>+<br>+ /* lowest_scheduled is NULL if affinity of a node is 0 */<br>+ _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) );<br>+ cpu_max = _SMP_Get_processor_maximum();<br>+<br>+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {<br>+ /* Checks if the CPU is in the affinity set of filter_strong_node */<br>+ if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {<br>+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );<br>+<br>+ if ( _Per_CPU_Is_processor_online( cpu ) ) {<br>+ curr_node = self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;<br>+ curr_priority = _Scheduler_Node_get_priority( curr_node );<br>+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );<br>+<br>+ if ( curr_priority > max_priority_num ) {<br>+ lowest_scheduled = curr_node;<br>+ max_priority_num = curr_priority;<br>+ }<br>+ }<br>+ }<br>+ }<br>+<br>+ _Assert( lowest_scheduled != NULL );<br>+ return lowest_scheduled;<br> }<br> <br>-static void _Scheduler_strong_APA_Do_update(<br>+static inline void _Scheduler_strong_APA_Extract_from_scheduled(<br> Scheduler_Context *context,<br>- Scheduler_Node *node_to_update,<br>- Priority_Control new_priority<br>+ Scheduler_Node *node_to_extract<br> )<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_self( context );<br>- Scheduler_strong_APA_Node *node =<br>- _Scheduler_strong_APA_Node_downcast( node_to_update );<br>-<br>- _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );<br>- _Scheduler_priority_Ready_queue_update(<br>- &node->Ready_queue,<br>- SCHEDULER_PRIORITY_UNMAP( new_priority ),<br>- &self->Bit_map,<br>- &self->Ready[ 0 ]<br>- );<br>+ Scheduler_strong_APA_Context *self;<br>+ Scheduler_strong_APA_Node *node;<br>+<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ node = _Scheduler_strong_APA_Node_downcast( node_to_extract );<br>+<br>+ _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );<br>+ /* Not removing it from Ready since the node could go in the READY state */<br> }<br> <br>-static Scheduler_strong_APA_Context *<br>-_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )<br>+static inline void _Scheduler_strong_APA_Extract_from_ready(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *node_to_extract<br>+)<br> {<br>- return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );<br>+ Scheduler_strong_APA_Context *self;<br>+ Scheduler_strong_APA_Node *node;<br>+<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ node = _Scheduler_strong_APA_Node_downcast( node_to_extract );<br>+<br>+ _Assert( !_Chain_Is_empty( self->Ready ) );<br>+ _Assert( !_Chain_Is_node_off_chain( &node->Ready_node ) );<br>+<br>+ _Chain_Extract_unprotected( &node->Ready_node );<br>+ _Chain_Set_off_chain( &node->Ready_node );<br> }<br> <br>-void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )<br>+static inline void _Scheduler_strong_APA_Insert_ready(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *node_base,<br>+ Priority_Control insert_priority<br>+)<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_context( scheduler );<br>+ Scheduler_strong_APA_Context *self;<br>+ Scheduler_strong_APA_Node *node;<br> <br>- _Scheduler_SMP_Initialize( &self->Base );<br>- _Priority_bit_map_Initialize( &self->Bit_map );<br>- _Scheduler_priority_Ready_queue_initialize(<br>- &self->Ready[ 0 ],<br>- scheduler->maximum_priority<br>- );<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ node = _Scheduler_strong_APA_Node_downcast( node_base );<br>+<br>+ if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {<br>+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );<br>+ }<br> }<br> <br>-void _Scheduler_strong_APA_Node_initialize(<br>- const Scheduler_Control *scheduler,<br>- Scheduler_Node *node,<br>- Thread_Control *the_thread,<br>- Priority_Control priority<br>+static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *scheduled_to_ready<br> )<br> {<br>- Scheduler_Context *context;<br>- Scheduler_strong_APA_Context *self;<br>- Scheduler_strong_APA_Node *the_node;<br>+ Priority_Control insert_priority;<br> <br>- the_node = _Scheduler_strong_APA_Node_downcast( node );<br>- _Scheduler_SMP_Node_initialize(<br>- scheduler,<br>- &the_node->Base,<br>- the_thread,<br>- priority<br>- );<br>+ _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );<br>+ insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );<br> <br>- context = _Scheduler_Get_context( scheduler );<br>- self = _Scheduler_strong_APA_Get_self( context );<br>- _Scheduler_priority_Ready_queue_update(<br>- &the_node->Ready_queue,<br>- SCHEDULER_PRIORITY_UNMAP( priority ),<br>- &self->Bit_map,<br>- &self->Ready[ 0 ]<br>+ _Scheduler_strong_APA_Insert_ready(<br>+ context,<br>+ scheduled_to_ready,<br>+ insert_priority<br> );<br> }<br> <br>-static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )<br>+static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(<br>+ Scheduler_strong_APA_Context *self,<br>+ uint32_t front,<br>+ uint32_t rear,<br>+ Per_CPU_Control **cpu_to_preempt<br>+)<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_self( context );<br>+ Scheduler_Node *lowest_reachable;<br>+ Priority_Control max_priority_num;<br>+ uint32_t cpu_max;<br>+ uint32_t cpu_index;<br>+ Thread_Control *curr_thread;<br>+ Per_CPU_Control *curr_CPU;<br>+ Priority_Control curr_priority;<br>+ Scheduler_Node *curr_node;<br>+ Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */<br>+ Scheduler_strong_APA_CPU *CPU;<br>+<br>+ max_priority_num = 0; /* Max (Lowest) priority encountered so far */<br>+ CPU = self->CPU;<br>+ cpu_max = _SMP_Get_processor_maximum();<br>+<br>+ while( front <= rear ) {<br>+ curr_CPU = CPU[ front ].cpu;<br>+ front = front + 1;<br>+<br>+ curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing;<br>+ curr_thread = curr_node->user;<br>+<br>+ curr_priority = _Scheduler_Node_get_priority( curr_node );<br>+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );<br>+<br>+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );<br>+<br>+ if ( curr_priority > max_priority_num ) {<br>+ lowest_reachable = curr_node;<br>+ max_priority_num = curr_priority;<br>+ *cpu_to_preempt = curr_CPU;<br>+ }<br>+<br>+ if ( !curr_thread->is_idle ) {<br>+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {<br>+ if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {<br>+ /* Checks if the thread_CPU is in the affinity set of the node */<br>+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );<br>+ if ( <br>+ _Per_CPU_Is_processor_online( cpu ) &&<br>+ CPU[ cpu_index ].visited == false ) <br>+ {<br>+ rear = rear + 1;<br>+ CPU[ rear ].cpu = cpu;<br>+ CPU[ cpu_index ].visited = true;<br>+ CPU[ cpu_index ].preempting_node = curr_node;<br>+ }<br>+ }<br>+ }<br>+ }<br>+ }<br> <br>- return !_Priority_bit_map_Is_empty( &self->Bit_map );<br>+ return lowest_reachable;<br> }<br> <br>-static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(<br>+static inline bool _Scheduler_strong_APA_Do_enqueue(<br> Scheduler_Context *context,<br>- Scheduler_Node *node<br>+ Scheduler_Node *lowest_reachable,<br>+ Scheduler_Node *node,<br>+ Priority_Control insert_priority,<br>+ Per_CPU_Control *cpu_to_preempt<br> )<br> {<br>- Scheduler_strong_APA_Context *self =<br>- _Scheduler_strong_APA_Get_self( context );<br>+ bool needs_help;<br>+ Priority_Control node_priority;<br>+ Priority_Control lowest_priority;<br>+ Scheduler_strong_APA_CPU *CPU;<br>+ Scheduler_Node *curr_node;<br>+ Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */<br>+ Per_CPU_Control *curr_CPU;<br>+ Scheduler_strong_APA_Context *self;<br>+ Scheduler_Node *next_node;<br> <br>- (void) node;<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ CPU = self->CPU;<br>+<br>+ node_priority = _Scheduler_Node_get_priority( node );<br>+ node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );<br>+<br>+ lowest_priority = _Scheduler_Node_get_priority( lowest_reachable );<br>+ lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );<br>+<br>+ if( lowest_priority > node_priority ) {<br>+ /*<br>+ * Backtrack on the path from<br>+ * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting<br>+ * along every task<br>+ */<br>+<br>+ curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;<br>+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );<br>+ curr_strong_node->cpu_to_preempt = cpu_to_preempt;<br>+<br>+ /* Save which cpu to preempt in cpu_to_preempt value of the node */<br>+ while ( curr_node != node ) {<br>+ curr_CPU = _Thread_Get_CPU( curr_node->user );<br>+ curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;<br>+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );<br>+ curr_strong_node->cpu_to_preempt = curr_CPU;<br>+ }<br>+<br>+ curr_CPU = curr_strong_node->cpu_to_preempt;<br>+ next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing;<br>+<br>+ node_priority = _Scheduler_Node_get_priority( curr_node );<br>+ node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );<br>+<br>+ _Scheduler_SMP_Enqueue_to_scheduled(<br>+ context,<br>+ curr_node,<br>+ node_priority,<br>+ next_node,<br>+ _Scheduler_SMP_Insert_scheduled,<br>+ _Scheduler_strong_APA_Move_from_scheduled_to_ready,<br>+ _Scheduler_strong_APA_Allocate_processor<br>+ );<br> <br>- return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(<br>- &self->Bit_map,<br>- &self->Ready[ 0 ]<br>- );<br>+ curr_node = next_node;<br>+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );<br>+<br>+ while ( curr_node != lowest_reachable ) {<br>+ curr_CPU = curr_strong_node->cpu_to_preempt;<br>+ next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing;<br>+ /* curr_node preempts the next_node; */<br>+ _Scheduler_SMP_Preempt(<br>+ context,<br>+ curr_node,<br>+ next_node,<br>+ _Scheduler_strong_APA_Allocate_processor<br>+ );<br>+<br>+ curr_node = next_node;<br>+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );<br>+ }<br>+<br>+ _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );<br>+<br>+ needs_help = false;<br>+ } else {<br>+ needs_help = true;<br>+ }<br>+<br>+ /* Add it to Ready chain since it is now either scheduled or just ready. */<br>+ _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );<br>+<br>+ return needs_help;<br> }<br> <br>-void _Scheduler_strong_APA_Block(<br>- const Scheduler_Control *scheduler,<br>- Thread_Control *the_thread,<br>- Scheduler_Node *node<br>+/*<br>+ * BFS Algorithm for task arrival<br>+ * Enqueue node either in the scheduled chain or in the ready chain.<br>+ * node is the newly arrived node and is currently not scheduled.<br>+ */<br>+static inline bool _Scheduler_strong_APA_Enqueue(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *node,<br>+ Priority_Control insert_priority<br> )<br> {<br>- Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br>+ Scheduler_strong_APA_Context *self;<br>+ Scheduler_strong_APA_CPU *CPU;<br>+ uint32_t cpu_max;<br>+ uint32_t cpu_index;<br>+ Per_CPU_Control *cpu_to_preempt;<br>+ Scheduler_Node *lowest_reachable;<br>+ Scheduler_strong_APA_Node *strong_node;<br> <br>- _Scheduler_SMP_Block(<br>+ /* Denotes front and rear of the queue */<br>+ uint32_t front;<br>+ uint32_t rear;<br>+<br>+ front = 0;<br>+ rear = -1;<br>+<br>+ self = _Scheduler_strong_APA_Get_self( context );<br>+ strong_node = _Scheduler_strong_APA_Node_downcast( node );<br>+ cpu_max = _SMP_Get_processor_maximum();<br>+ CPU = self->CPU;<br>+<br>+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {<br>+ CPU[ cpu_index ].visited = false;<br>+<br>+ /* Checks if the thread_CPU is in the affinity set of the node */<br>+ if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {<br>+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );<br>+<br>+ if ( _Per_CPU_Is_processor_online( cpu ) ) {<br>+ rear = rear + 1;<br>+ CPU[ rear ].cpu = cpu;<br>+ CPU[ cpu_index ].visited = true;<br>+ CPU[ cpu_index ].preempting_node = node;<br>+ }<br>+ }<br>+ }<br>+<br>+ /*<br>+ * This assert makes sure that there always exist an element in the<br>+ * Queue when we start the queue traversal.<br>+ */<br>+ _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) );<br>+<br>+ lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable(<br>+ self,<br>+ front,<br>+ rear,<br>+ &cpu_to_preempt<br>+ );<br>+<br>+ return _Scheduler_strong_APA_Do_enqueue(<br>+ context,<br>+ lowest_reachable,<br>+ node,<br>+ insert_priority,<br>+ cpu_to_preempt<br>+ );<br>+}<br>+<br>+static inline bool _Scheduler_strong_APA_Enqueue_scheduled(<br>+ Scheduler_Context *context,<br>+ Scheduler_Node *node,<br>+ Priority_Control insert_priority<br>+)<br>+{<br>+ return _Scheduler_SMP_Enqueue_scheduled(<br> context,<br>- the_thread,<br> node,<br>- _Scheduler_SMP_Extract_from_scheduled,<br>+ insert_priority,<br>+ _Scheduler_SMP_Priority_less_equal,<br> _Scheduler_strong_APA_Extract_from_ready,<br> _Scheduler_strong_APA_Get_highest_ready,<br>+ _Scheduler_strong_APA_Insert_ready,<br>+ _Scheduler_SMP_Insert_scheduled,<br> _Scheduler_strong_APA_Move_from_ready_to_scheduled,<br>- _Scheduler_SMP_Allocate_processor_exact<br>+ _Scheduler_strong_APA_Allocate_processor<br> );<br> }<br> <br>-static bool _Scheduler_strong_APA_Enqueue(<br>+static inline bool _Scheduler_strong_APA_Do_ask_for_help(<br> Scheduler_Context *context,<br>- Scheduler_Node *node,<br>- Priority_Control insert_priority<br>+ Thread_Control *the_thread,<br>+ Scheduler_Node *node<br> )<br> {<br>- return _Scheduler_SMP_Enqueue(<br>+ return _Scheduler_SMP_Ask_for_help(<br> context,<br>+ the_thread,<br> node,<br>- insert_priority,<br> _Scheduler_SMP_Priority_less_equal,<br> _Scheduler_strong_APA_Insert_ready,<br> _Scheduler_SMP_Insert_scheduled,<br> _Scheduler_strong_APA_Move_from_scheduled_to_ready,<br>- _Scheduler_SMP_Get_lowest_scheduled,<br>- _Scheduler_SMP_Allocate_processor_exact<br>+ _Scheduler_strong_APA_Get_lowest_scheduled,<br>+ _Scheduler_strong_APA_Allocate_processor<br> );<br> }<br> <br>-static bool _Scheduler_strong_APA_Enqueue_scheduled(<br>+static inline void _Scheduler_strong_APA_Do_set_affinity(<br> Scheduler_Context *context,<br>- Scheduler_Node *node,<br>- Priority_Control insert_priority<br>+ Scheduler_Node *node_base,<br>+ void *arg<br> )<br> {<br>- return _Scheduler_SMP_Enqueue_scheduled(<br>+ Scheduler_strong_APA_Node *node;<br>+<br>+ node = _Scheduler_strong_APA_Node_downcast( node_base );<br>+ node->Affinity = *( (const Processor_mask *) arg );<br>+}<br>+<br>+void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )<br>+{<br>+ Scheduler_strong_APA_Context *self =<br>+ _Scheduler_strong_APA_Get_context( scheduler );<br>+<br>+ _Scheduler_SMP_Initialize( &self->Base );<br>+ _Chain_Initialize_empty( &self->Ready );<br>+}<br>+<br>+void _Scheduler_strong_APA_Yield(<br>+ const Scheduler_Control *scheduler,<br>+ Thread_Control *thread,<br>+ Scheduler_Node *node<br>+)<br>+{<br>+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br>+<br>+ _Scheduler_SMP_Yield(<br> context,<br>+ thread,<br> node,<br>- insert_priority,<br>- _Scheduler_SMP_Priority_less_equal,<br> _Scheduler_strong_APA_Extract_from_ready,<br>- _Scheduler_strong_APA_Get_highest_ready,<br>- _Scheduler_strong_APA_Insert_ready,<br>- _Scheduler_SMP_Insert_scheduled,<br>- _Scheduler_strong_APA_Move_from_ready_to_scheduled,<br>- _Scheduler_SMP_Allocate_processor_exact<br>+ _Scheduler_strong_APA_Enqueue,<br>+ _Scheduler_strong_APA_Enqueue_scheduled<br> );<br> }<br> <br>-void _Scheduler_strong_APA_Unblock(<br>+void _Scheduler_strong_APA_Block(<br> const Scheduler_Control *scheduler,<br>- Thread_Control *the_thread,<br>+ Thread_Control *thread,<br> Scheduler_Node *node<br> )<br> {<br> Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br>-<br>- _Scheduler_SMP_Unblock(<br>+ /* The extract from ready automatically removes the node from Ready chain */<br>+ _Scheduler_SMP_Block(<br> context,<br>- the_thread,<br>+ thread,<br> node,<br>- _Scheduler_strong_APA_Do_update,<br>- _Scheduler_strong_APA_Enqueue<br>+ _Scheduler_strong_APA_Extract_from_scheduled,<br>+ _Scheduler_strong_APA_Extract_from_ready,<br>+ _Scheduler_strong_APA_Get_highest_ready,<br>+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,<br>+ _Scheduler_strong_APA_Allocate_processor<br> );<br> }<br> <br>-static bool _Scheduler_strong_APA_Do_ask_for_help(<br>- Scheduler_Context *context,<br>- Thread_Control *the_thread,<br>- Scheduler_Node *node<br>+void _Scheduler_strong_APA_Unblock(<br>+ const Scheduler_Control *scheduler,<br>+ Thread_Control *thread,<br>+ Scheduler_Node *node<br> )<br> {<br>- return _Scheduler_SMP_Ask_for_help(<br>+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br>+<br>+ _Scheduler_SMP_Unblock(<br> context,<br>- the_thread,<br>+ thread,<br> node,<br>- _Scheduler_SMP_Priority_less_equal,<br>- _Scheduler_strong_APA_Insert_ready,<br>- _Scheduler_SMP_Insert_scheduled,<br>- _Scheduler_strong_APA_Move_from_scheduled_to_ready,<br>- _Scheduler_SMP_Get_lowest_scheduled,<br>- _Scheduler_SMP_Allocate_processor_lazy<br>+ _Scheduler_strong_APA_Do_update,<br>+ _Scheduler_strong_APA_Enqueue<br> );<br> }<br> <br> void _Scheduler_strong_APA_Update_priority(<br> const Scheduler_Control *scheduler,<br>- Thread_Control *the_thread,<br>+ Thread_Control *thread,<br> Scheduler_Node *node<br> )<br> {<br>@@ -327,7 +785,7 @@ void _Scheduler_strong_APA_Update_priority(<br> <br> _Scheduler_SMP_Update_priority(<br> context,<br>- the_thread,<br>+ thread,<br> node,<br> _Scheduler_strong_APA_Extract_from_ready,<br> _Scheduler_strong_APA_Do_update,<br>@@ -345,7 +803,11 @@ bool _Scheduler_strong_APA_Ask_for_help(<br> {<br> Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br> <br>- return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node );<br>+ return _Scheduler_strong_APA_Do_ask_for_help(<br>+ context,<br>+ the_thread,<br>+ node<br>+ );<br> }<br> <br> void _Scheduler_strong_APA_Reconsider_help_request(<br>@@ -381,7 +843,7 @@ void _Scheduler_strong_APA_Withdraw_node(<br> _Scheduler_strong_APA_Extract_from_ready,<br> _Scheduler_strong_APA_Get_highest_ready,<br> _Scheduler_strong_APA_Move_from_ready_to_scheduled,<br>- _Scheduler_SMP_Allocate_processor_lazy<br>+ _Scheduler_strong_APA_Allocate_processor<br> );<br> }<br> <br>@@ -416,20 +878,65 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(<br> );<br> }<br> <br>-void _Scheduler_strong_APA_Yield(<br>+void _Scheduler_strong_APA_Node_initialize(<br> const Scheduler_Control *scheduler,<br>+ Scheduler_Node *node,<br> Thread_Control *the_thread,<br>- Scheduler_Node *node<br>+ Priority_Control priority<br> )<br> {<br>- Scheduler_Context *context = _Scheduler_Get_context( scheduler );<br>+ Scheduler_SMP_Node *smp_node;<br>+ Scheduler_strong_APA_Node *strong_node;<br> <br>- _Scheduler_SMP_Yield(<br>- context,<br>- the_thread,<br>- node,<br>- _Scheduler_strong_APA_Extract_from_ready,<br>- _Scheduler_strong_APA_Enqueue,<br>- _Scheduler_strong_APA_Enqueue_scheduled<br>+ smp_node = _Scheduler_SMP_Node_downcast( node );<br>+ strong_node = _Scheduler_strong_APA_Node_downcast( node );<br>+<br>+ _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );<br>+<br>+ _Processor_mask_Assign(<br>+ &strong_node->Affinity,<br>+ _SMP_Get_online_processors()<br> );<br> }<br>+<br>+bool _Scheduler_strong_APA_Set_affinity(<br>+ const Scheduler_Control *scheduler,<br>+ Thread_Control *thread,<br>+ Scheduler_Node *node_base,<br>+ const Processor_mask *affinity<br>+)<br>+{<br>+ Scheduler_Context *context;<br>+ Scheduler_strong_APA_Node *node;<br>+ Processor_mask local_affinity;<br>+<br>+ context = _Scheduler_Get_context( scheduler );<br>+ _Processor_mask_And( &local_affinity, &context->Processors, affinity );<br>+<br>+ if ( _Processor_mask_Is_zero( &local_affinity ) ) {<br>+ return false;<br>+ }<br>+<br>+ node = _Scheduler_strong_APA_Node_downcast( node_base );<br>+<br>+ if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )<br>+ return true; /* Nothing to do. Return true. */<br>+<br>+ _Processor_mask_Assign( &node->Affinity, &local_affinity );<br>+<br>+ _Scheduler_SMP_Set_affinity(<br>+ context,<br>+ thread,<br>+ node_base,<br>+ &local_affinity,<br>+ _Scheduler_strong_APA_Do_set_affinity,<br>+ _Scheduler_strong_APA_Extract_from_ready,<br>+ _Scheduler_strong_APA_Get_highest_ready,<br>+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,<br>+ _Scheduler_strong_APA_Enqueue,<br>+ _Scheduler_strong_APA_Allocate_processor<br>+ );<br>+<br>+ return true;<br>+}<br>+<br>-- <br>2.17.1<br></div>
</blockquote></div>