[rtems commit] score: Fix priority discipline handling

Sebastian Huber sebh at rtems.org
Thu Sep 2 05:47:59 UTC 2021


Module:    rtems
Branch:    master
Commit:    9c0591f12d450401746bc0bf7cd7a0e0b14a5f3b
Changeset: http://git.rtems.org/rtems/commit/?id=9c0591f12d450401746bc0bf7cd7a0e0b14a5f3b

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Thu Mar 25 09:11:26 2021 +0100

score: Fix priority discipline handling

The priority queues in clustered scheduling configurations use a per
scheduler priority queue rotation to ensure FIFO fairness across
schedulers.  This mechanism is implemented in the thread queue surrender
operation.  Unfortunately some semaphore and message queue directives
used wrongly the thread queue extract operation.  Fix this through the
use of _Thread_queue_Surrender().

Update #4358.

---

 cpukit/include/rtems/posix/muteximpl.h     |  48 ++-----------
 cpukit/include/rtems/score/coremuteximpl.h |  51 ++------------
 cpukit/include/rtems/score/coresemimpl.h   |  20 +++---
 cpukit/include/rtems/score/threadqimpl.h   |  43 ++++++++++++
 cpukit/posix/src/condsignalsupp.c          |  31 ++++----
 cpukit/posix/src/sempost.c                 |  14 ++--
 cpukit/score/src/semaphore.c               |  28 +++-----
 cpukit/score/src/threadqenqueue.c          | 109 +++++++++++++++++++++++++++++
 8 files changed, 196 insertions(+), 148 deletions(-)

diff --git a/cpukit/include/rtems/posix/muteximpl.h b/cpukit/include/rtems/posix/muteximpl.h
index 5d20bc1..3decb6f 100644
--- a/cpukit/include/rtems/posix/muteximpl.h
+++ b/cpukit/include/rtems/posix/muteximpl.h
@@ -382,10 +382,7 @@ RTEMS_INLINE_ROUTINE Status_Control _POSIX_Mutex_Ceiling_surrender(
   Thread_queue_Context *queue_context
 )
 {
-  unsigned int        nest_level;
-  ISR_lock_Context    lock_context;
-  Per_CPU_Control    *cpu_self;
-  Thread_queue_Heads *heads;
+  unsigned int nest_level;
 
   if ( !_POSIX_Mutex_Is_owner( the_mutex, executing ) ) {
     _POSIX_Mutex_Release( the_mutex, queue_context );
@@ -400,48 +397,13 @@ RTEMS_INLINE_ROUTINE Status_Control _POSIX_Mutex_Ceiling_surrender(
     return STATUS_SUCCESSFUL;
   }
 
-  _Thread_Resource_count_decrement( executing );
-
-  _Thread_queue_Context_clear_priority_updates( queue_context );
-  _Thread_Wait_acquire_default_critical( executing, &lock_context );
-  _Thread_Priority_remove(
+  return _Thread_queue_Surrender_priority_ceiling(
+    &the_mutex->Recursive.Mutex.Queue.Queue,
     executing,
     &the_mutex->Priority_ceiling,
-    queue_context
+    queue_context,
+    POSIX_MUTEX_PRIORITY_CEILING_TQ_OPERATIONS
   );
-  _Thread_Wait_release_default_critical( executing, &lock_context );
-
-  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
-
-  heads = the_mutex->Recursive.Mutex.Queue.Queue.heads;
-
-  if ( heads != NULL ) {
-    const Thread_queue_Operations *operations;
-    Thread_Control                *new_owner;
-
-    operations = POSIX_MUTEX_PRIORITY_CEILING_TQ_OPERATIONS;
-    new_owner = ( *operations->first )( heads );
-    _POSIX_Mutex_Set_owner( the_mutex, new_owner );
-    _Thread_Resource_count_increment( new_owner );
-    _Thread_Priority_add(
-      new_owner,
-      &the_mutex->Priority_ceiling,
-      queue_context
-    );
-    _Thread_queue_Extract_critical(
-      &the_mutex->Recursive.Mutex.Queue.Queue,
-      operations,
-      new_owner,
-      queue_context
-    );
-  } else {
-    _POSIX_Mutex_Set_owner( the_mutex, NULL );
-    _POSIX_Mutex_Release( the_mutex, queue_context );
-  }
-
-  _Thread_Priority_update( queue_context );
-  _Thread_Dispatch_enable( cpu_self );
-  return STATUS_SUCCESSFUL;
 }
 
 #define POSIX_MUTEX_ABSTIME_TRY_LOCK ((uintptr_t) 1)
diff --git a/cpukit/include/rtems/score/coremuteximpl.h b/cpukit/include/rtems/score/coremuteximpl.h
index 426c4c5..757efbd 100644
--- a/cpukit/include/rtems/score/coremuteximpl.h
+++ b/cpukit/include/rtems/score/coremuteximpl.h
@@ -529,10 +529,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
   Thread_queue_Context       *queue_context
 )
 {
-  unsigned int      nest_level;
-  ISR_lock_Context  lock_context;
-  Per_CPU_Control  *cpu_self;
-  Thread_Control   *new_owner;
+  unsigned int nest_level;
 
   _CORE_mutex_Acquire_critical( &the_mutex->Recursive.Mutex, queue_context );
 
@@ -549,53 +546,13 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_ceiling_mutex_Surrender(
     return STATUS_SUCCESSFUL;
   }
 
-  _Thread_Resource_count_decrement( executing );
-
-  _Thread_queue_Context_clear_priority_updates( queue_context );
-  _Thread_Wait_acquire_default_critical( executing, &lock_context );
-  _Thread_Priority_remove(
+  return _Thread_queue_Surrender_priority_ceiling(
+    &the_mutex->Recursive.Mutex.Wait_queue.Queue,
     executing,
     &the_mutex->Priority_ceiling,
-    queue_context
-  );
-  _Thread_Wait_release_default_critical( executing, &lock_context );
-
-  new_owner = _Thread_queue_First_locked(
-    &the_mutex->Recursive.Mutex.Wait_queue,
+    queue_context,
     CORE_MUTEX_TQ_OPERATIONS
   );
-  _CORE_mutex_Set_owner( &the_mutex->Recursive.Mutex, new_owner );
-
-  cpu_self = _Thread_Dispatch_disable_critical(
-    &queue_context->Lock_context.Lock_context
-  );
-
-  if ( new_owner != NULL ) {
-#if defined(RTEMS_MULTIPROCESSING)
-    if ( _Objects_Is_local_id( new_owner->Object.id ) )
-#endif
-    {
-      _Thread_Resource_count_increment( new_owner );
-      _Thread_Priority_add(
-        new_owner,
-        &the_mutex->Priority_ceiling,
-        queue_context
-      );
-    }
-
-    _Thread_queue_Extract_critical(
-      &the_mutex->Recursive.Mutex.Wait_queue.Queue,
-      CORE_MUTEX_TQ_OPERATIONS,
-      new_owner,
-      queue_context
-    );
-  } else {
-    _CORE_mutex_Release( &the_mutex->Recursive.Mutex, queue_context );
-  }
-
-  _Thread_Priority_update( queue_context );
-  _Thread_Dispatch_enable( cpu_self );
-  return STATUS_SUCCESSFUL;
 }
 
 /** @} */
diff --git a/cpukit/include/rtems/score/coresemimpl.h b/cpukit/include/rtems/score/coresemimpl.h
index ba4825b..40b58cb 100644
--- a/cpukit/include/rtems/score/coresemimpl.h
+++ b/cpukit/include/rtems/score/coresemimpl.h
@@ -133,23 +133,21 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Surrender(
   Thread_queue_Context          *queue_context
 )
 {
-  Thread_Control *the_thread;
-  Status_Control  status;
+  Status_Control      status;
+  Thread_queue_Heads *heads;
 
   status = STATUS_SUCCESSFUL;
 
   _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
 
-  the_thread = _Thread_queue_First_locked(
-    &the_semaphore->Wait_queue,
-    operations
-  );
-  if ( the_thread != NULL ) {
-    _Thread_queue_Extract_critical(
+  heads = the_semaphore->Wait_queue.Queue.heads;
+
+  if ( heads != NULL ) {
+    _Thread_queue_Surrender_no_priority(
       &the_semaphore->Wait_queue.Queue,
-      operations,
-      the_thread,
-      queue_context
+      heads,
+      queue_context,
+      operations
     );
   } else {
     if ( the_semaphore->count < maximum_count )
diff --git a/cpukit/include/rtems/score/threadqimpl.h b/cpukit/include/rtems/score/threadqimpl.h
index 44efc1f..2465fc4 100644
--- a/cpukit/include/rtems/score/threadqimpl.h
+++ b/cpukit/include/rtems/score/threadqimpl.h
@@ -1080,6 +1080,49 @@ void _Thread_queue_Surrender(
   const Thread_queue_Operations *operations
 );
 
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * This function releases the thread queue lock.  In addition it performs a
+ * thread dispatch if necessary.
+ *
+ * @param[in, out] queue The actual thread queue.
+ * @param heads The thread queue heads.  It must not be NULL.
+ * @param queue_context The thread queue context of the lock acquire.
+ * @param operations The thread queue operations.
+ */
+void _Thread_queue_Surrender_no_priority(
+  Thread_queue_Queue            *queue,
+  Thread_queue_Heads            *heads,
+  Thread_queue_Context          *queue_context,
+  const Thread_queue_Operations *operations
+);
+
+/**
+ * @brief Surrenders the thread queue previously owned by the thread to the
+ * first enqueued thread.
+ *
+ * The owner of the thread queue must be set to NULL by the caller.
+ *
+ * This function releases the thread queue lock.  In addition it performs a
+ * thread dispatch if necessary.
+ *
+ * @param[in, out] queue The actual thread queue.
+ * @param heads The thread queue heads.  It must not be NULL.
+ * @param queue_context The thread queue context of the lock acquire.
+ * @param operations The thread queue operations.
+ */
+Status_Control _Thread_queue_Surrender_priority_ceiling(
+  Thread_queue_Queue            *queue,
+  Thread_Control                *executing,
+  Priority_Node                 *ceiling_priority,
+  Thread_queue_Context          *queue_context,
+  const Thread_queue_Operations *operations
+);
+
 #if defined(RTEMS_SMP)
 /**
  * @brief Surrenders the thread queue previously owned by the thread to the
diff --git a/cpukit/posix/src/condsignalsupp.c b/cpukit/posix/src/condsignalsupp.c
index dd3886a..188e61e 100644
--- a/cpukit/posix/src/condsignalsupp.c
+++ b/cpukit/posix/src/condsignalsupp.c
@@ -35,36 +35,33 @@ int _POSIX_Condition_variables_Signal_support(
 {
   POSIX_Condition_variables_Control *the_cond;
   unsigned long                      flags;
-  const Thread_queue_Operations     *operations;
-  Thread_queue_Heads                *heads;
+  Thread_queue_Context               queue_context;
 
   the_cond = _POSIX_Condition_variables_Get( cond );
   POSIX_CONDITION_VARIABLES_VALIDATE_OBJECT( the_cond, flags );
-  operations = POSIX_CONDITION_VARIABLES_TQ_OPERATIONS;
+  _Thread_queue_Context_initialize( &queue_context );
 
   do {
-    Thread_queue_Context queue_context;
+    Thread_queue_Heads *heads;
 
-    _Thread_queue_Context_initialize( &queue_context );
     _POSIX_Condition_variables_Acquire( the_cond, &queue_context );
 
     heads = the_cond->Queue.Queue.heads;
 
-    if ( heads != NULL ) {
-      Thread_Control *the_thread;
-
-      the_thread = ( *operations->first )( heads );
-      _Thread_queue_Extract_critical(
-        &the_cond->Queue.Queue,
-        operations,
-        the_thread,
-        &queue_context
-      );
-    } else {
+    if ( heads == NULL ) {
       the_cond->mutex = POSIX_CONDITION_VARIABLES_NO_MUTEX;
       _POSIX_Condition_variables_Release( the_cond, &queue_context );
+
+      return 0;
     }
-  } while ( is_broadcast && heads != NULL );
+
+    _Thread_queue_Surrender_no_priority(
+      &the_cond->Queue.Queue,
+      heads,
+      &queue_context,
+      POSIX_CONDITION_VARIABLES_TQ_OPERATIONS
+    );
+  } while ( is_broadcast );
 
   return 0;
 }
diff --git a/cpukit/posix/src/sempost.c b/cpukit/posix/src/sempost.c
index f1fb7fa..49142b2 100644
--- a/cpukit/posix/src/sempost.c
+++ b/cpukit/posix/src/sempost.c
@@ -47,18 +47,12 @@ int sem_post( sem_t *_sem )
   }
 
   if ( RTEMS_PREDICT_TRUE( heads != NULL ) ) {
-    const Thread_queue_Operations *operations;
-    Thread_Control *first;
-
     _Thread_queue_Context_set_ISR_level( &queue_context, level );
-    operations = SEMAPHORE_TQ_OPERATIONS;
-    first = ( *operations->first )( heads );
-
-    _Thread_queue_Extract_critical(
+    _Thread_queue_Surrender_no_priority(
       &sem->Queue.Queue,
-      operations,
-      first,
-      &queue_context
+      heads,
+      &queue_context,
+      SEMAPHORE_TQ_OPERATIONS
     );
     return 0;
   }
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index db3c3a9..7743939 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -159,18 +159,12 @@ void _Semaphore_Post( struct _Semaphore_Control *_sem )
     ++sem->count;
     _Sem_Queue_release( sem, level, &queue_context );
   } else {
-    const Thread_queue_Operations *operations;
-    Thread_Control *first;
-
     _Thread_queue_Context_set_ISR_level( &queue_context, level );
-    operations = SEMAPHORE_TQ_OPERATIONS;
-    first = ( *operations->first )( heads );
-
-    _Thread_queue_Extract_critical(
+    _Thread_queue_Surrender_no_priority(
       &sem->Queue.Queue,
-      operations,
-      first,
-      &queue_context
+      heads,
+      &queue_context,
+      SEMAPHORE_TQ_OPERATIONS
     );
   }
 }
@@ -192,18 +186,12 @@ void _Semaphore_Post_binary( struct _Semaphore_Control *_sem )
     sem->count = 1;
     _Sem_Queue_release( sem, level, &queue_context );
   } else {
-    const Thread_queue_Operations *operations;
-    Thread_Control *first;
-
     _Thread_queue_Context_set_ISR_level( &queue_context, level );
-    operations = SEMAPHORE_TQ_OPERATIONS;
-    first = ( *operations->first )( heads );
-
-    _Thread_queue_Extract_critical(
+    _Thread_queue_Surrender_no_priority(
       &sem->Queue.Queue,
-      operations,
-      first,
-      &queue_context
+      heads,
+      &queue_context,
+      SEMAPHORE_TQ_OPERATIONS
     );
   }
 }
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index d187e32..d165e30 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -711,6 +711,115 @@ void _Thread_queue_Surrender(
   _Thread_Dispatch_enable( cpu_self );
 }
 
+void _Thread_queue_Surrender_no_priority(
+  Thread_queue_Queue            *queue,
+  Thread_queue_Heads            *heads,
+  Thread_queue_Context          *queue_context,
+  const Thread_queue_Operations *operations
+)
+{
+  Thread_Control  *the_thread;
+  bool             unblock;
+  Per_CPU_Control *cpu_self;
+
+  _Assert( heads != NULL );
+  _Assert( queue->owner == NULL );
+
+  the_thread = ( *operations->surrender )( queue, heads, NULL, queue_context );
+
+#if defined(RTEMS_MULTIPROCESSING)
+  _Thread_queue_MP_set_callout( the_thread, queue_context );
+#endif
+
+  unblock = _Thread_queue_Make_ready_again( the_thread );
+
+  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+  _Thread_queue_Queue_release(
+    queue,
+    &queue_context->Lock_context.Lock_context
+  );
+
+  if ( unblock ) {
+    _Thread_Remove_timer_and_unblock( the_thread, queue );
+  }
+
+  _Thread_Dispatch_enable( cpu_self );
+}
+
+Status_Control _Thread_queue_Surrender_priority_ceiling(
+  Thread_queue_Queue            *queue,
+  Thread_Control                *executing,
+  Priority_Node                 *priority_ceiling,
+  Thread_queue_Context          *queue_context,
+  const Thread_queue_Operations *operations
+)
+{
+  ISR_lock_Context    lock_context;
+  Thread_queue_Heads *heads;
+  Thread_Control     *new_owner;
+  bool                unblock;
+  Per_CPU_Control    *cpu_self;
+
+  _Thread_Resource_count_decrement( executing );
+
+  _Thread_queue_Context_clear_priority_updates( queue_context );
+  _Thread_Wait_acquire_default_critical( executing, &lock_context );
+  _Thread_Priority_remove( executing, priority_ceiling, queue_context );
+  _Thread_Wait_release_default_critical( executing, &lock_context );
+
+  heads = queue->heads;
+  queue->owner = NULL;
+
+  if ( heads == NULL ) {
+    cpu_self = _Thread_Dispatch_disable_critical(
+      &queue_context->Lock_context.Lock_context
+    );
+    _Thread_queue_Queue_release(
+      queue,
+      &queue_context->Lock_context.Lock_context
+    );
+    _Thread_Priority_update( queue_context );
+    _Thread_Dispatch_direct( cpu_self );
+    return STATUS_SUCCESSFUL;
+  }
+
+  new_owner = ( *operations->surrender )(
+    queue,
+    heads,
+    NULL,
+    queue_context
+  );
+
+  queue->owner = new_owner;
+
+  unblock = _Thread_queue_Make_ready_again( new_owner );
+
+#if defined(RTEMS_MULTIPROCESSING)
+  if ( _Objects_Is_local_id( new_owner->Object.id ) )
+#endif
+  {
+    _Thread_Resource_count_increment( new_owner );
+    _Thread_Wait_acquire_default_critical( new_owner, &lock_context );
+    _Thread_Priority_add( new_owner, priority_ceiling, queue_context );
+    _Thread_Wait_release_default_critical( new_owner, &lock_context );
+  }
+
+  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
+  _Thread_queue_Queue_release(
+    queue,
+    &queue_context->Lock_context.Lock_context
+  );
+
+  _Thread_Priority_update( queue_context );
+
+  if ( unblock ) {
+    _Thread_Remove_timer_and_unblock( new_owner, queue );
+  }
+
+  _Thread_Dispatch_direct( cpu_self );
+  return STATUS_SUCCESSFUL;
+}
+
 #if defined(RTEMS_SMP)
 void _Thread_queue_Surrender_sticky(
   Thread_queue_Queue            *queue,



More information about the vc mailing list