[rtems commit] rtems: Fix rtems_scheduler_remove_processor()

Sebastian Huber sebh at rtems.org
Tue Nov 23 13:34:54 UTC 2021


Module:    rtems
Branch:    master
Commit:    c69a70a597ec5df75a51bfa39c14198a5c5fb22e
Changeset: http://git.rtems.org/rtems/commit/?id=c69a70a597ec5df75a51bfa39c14198a5c5fb22e

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Nov  8 11:08:24 2021 +0100

rtems: Fix rtems_scheduler_remove_processor()

Return an error status for the following error condition in
rtems_scheduler_remove_processor():

While an attempt is made to remove a processor from a scheduler, while
the processor is the only processor owned by the scheduler, if a thread
exists which uses the scheduler as a helping scheduler, then the
processor shall not be removed.

The reason is that ask for help requests and withdraw node requests are
processed asynchronously in any order.  An ask for help request carried
out on a scheduler without a processor is undefined behaviour.

Update error status description.

Update #4544.

---

 cpukit/include/rtems/rtems/tasks.h            |  9 +++--
 cpukit/include/rtems/score/schedulersmpimpl.h | 16 ++++-----
 cpukit/rtems/src/schedulerremoveprocessor.c   | 50 +++++++++++++++++++++++++--
 3 files changed, 61 insertions(+), 14 deletions(-)

diff --git a/cpukit/include/rtems/rtems/tasks.h b/cpukit/include/rtems/rtems/tasks.h
index 0392586..8e87bfd 100644
--- a/cpukit/include/rtems/rtems/tasks.h
+++ b/cpukit/include/rtems/rtems/tasks.h
@@ -867,9 +867,12 @@ rtems_status_code rtems_scheduler_add_processor(
  *
  * @retval ::RTEMS_INVALID_NUMBER The processor was not owned by the scheduler.
  *
- * @retval ::RTEMS_RESOURCE_IN_USE The set of processors owned by the scheduler
- *   would have been empty after the processor removal and there was at least
- *   one non-idle task that used this scheduler as its home scheduler.
+ * @retval ::RTEMS_RESOURCE_IN_USE The processor was required by at least one
+ *   non-idle task that used the scheduler as its home scheduler.
+ *
+ * @retval ::RTEMS_RESOURCE_IN_USE The processor was the last processor owned
+ *   by the scheduler and there was at least one task that used the scheduler
+ *   as a helping scheduler.
  *
  * @par Notes
  * Removing a processor from a scheduler is a complex operation that involves
diff --git a/cpukit/include/rtems/score/schedulersmpimpl.h b/cpukit/include/rtems/score/schedulersmpimpl.h
index 97a7712..c37f53c 100644
--- a/cpukit/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/include/rtems/score/schedulersmpimpl.h
@@ -1978,8 +1978,9 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
   victim_owner = _Scheduler_Node_get_owner( victim_node );
 
   if ( !victim_owner->is_idle ) {
-    Thread_Control *victim_idle;
-    Scheduler_Node *idle_node;
+    Thread_Control  *victim_idle;
+    Scheduler_Node  *idle_node;
+    Priority_Control insert_priority;
 
     victim_idle = _Scheduler_Release_idle_thread_if_necessary(
       victim_node,
@@ -1996,13 +1997,10 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
       _Scheduler_SMP_Allocate_processor_exact
     );
 
-    if ( !_Chain_Is_empty( &self->Scheduled ) ) {
-      Priority_Control insert_priority;
-
-      insert_priority = _Scheduler_SMP_Node_priority( victim_node );
-      insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
-      ( *enqueue )( &self->Base, victim_node, insert_priority );
-    }
+    _Assert( !_Chain_Is_empty( &self->Scheduled ) );
+    insert_priority = _Scheduler_SMP_Node_priority( victim_node );
+    insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+    ( *enqueue )( &self->Base, victim_node, insert_priority );
   } else {
     _Assert( victim_owner == victim_user );
     _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c
index 79c17bd..3136a8e 100644
--- a/cpukit/rtems/src/schedulerremoveprocessor.c
+++ b/cpukit/rtems/src/schedulerremoveprocessor.c
@@ -35,7 +35,7 @@ typedef struct {
   rtems_status_code        status;
 } Scheduler_Processor_removal_context;
 
-static bool _Scheduler_Check_processor_removal(
+static bool _Scheduler_Check_processor_not_required(
   Thread_Control *the_thread,
   void           *arg
 )
@@ -68,6 +68,45 @@ static bool _Scheduler_Check_processor_removal(
   _Thread_Wait_release( the_thread, &queue_context );
   return iter_context->status != RTEMS_SUCCESSFUL;
 }
+
+static bool _Scheduler_Check_no_helping(
+  Thread_Control *the_thread,
+  void           *arg
+)
+{
+  Scheduler_Processor_removal_context *iter_context;
+  ISR_lock_Context                     lock_context;
+  const Chain_Node                    *node;
+  const Chain_Node                    *tail;
+
+  if ( the_thread->is_idle ) {
+    return false;
+  }
+
+  iter_context = arg;
+
+  _Thread_State_acquire( the_thread, &lock_context );
+  node = _Chain_Immutable_first( &the_thread->Scheduler.Scheduler_nodes );
+  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+  do {
+    const Scheduler_Node    *scheduler_node;
+    const Scheduler_Control *scheduler;
+
+    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+    if ( scheduler == iter_context->scheduler ) {
+      iter_context->status = RTEMS_RESOURCE_IN_USE;
+      break;
+    }
+
+    node = _Chain_Immutable_next( node );
+  } while ( node != tail );
+
+  _Thread_State_release( the_thread, &lock_context );
+  return iter_context->status != RTEMS_SUCCESSFUL;
+}
 #endif
 
 rtems_status_code rtems_scheduler_remove_processor(
@@ -116,7 +155,14 @@ rtems_status_code rtems_scheduler_remove_processor(
   _Scheduler_Release_critical( scheduler, &lock_context );
   _ISR_lock_ISR_enable( &lock_context );
 
-  _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context );
+  _Thread_Iterate( _Scheduler_Check_processor_not_required, &iter_context );
+
+  if (
+    _Processor_mask_Is_zero( &scheduler_context->Processors ) &&
+    iter_context.status == RTEMS_SUCCESSFUL
+  ) {
+    _Thread_Iterate( _Scheduler_Check_no_helping, &iter_context );
+  }
 
   _ISR_lock_ISR_disable( &lock_context );
   _Scheduler_Acquire_critical( scheduler, &lock_context );



More information about the vc mailing list