[rtems commit] score: Add thread queue enqueue callout

Sebastian Huber sebh at rtems.org
Wed Nov 23 11:54:15 UTC 2016


Module:    rtems
Branch:    master
Commit:    125f248231c173a038ed9fc00832e0b3d221ad43
Changeset: http://git.rtems.org/rtems/commit/?id=125f248231c173a038ed9fc00832e0b3d221ad43

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed Nov 16 16:39:43 2016 +0100

score: Add thread queue enqueue callout

Replace the expected thread dispatch disable level with a thread queue
enqueue callout.  This enables the use of _Thread_Dispatch_direct() in
the thread queue enqueue procedure.  This avoids impossible exection
paths, e.g. Per_CPU_Control::dispatch_necessary is always true.

---

 cpukit/posix/include/rtems/posix/condimpl.h    |   4 +
 cpukit/posix/src/condwaitsupp.c                |  75 ++++++------
 cpukit/posix/src/nanosleep.c                   |  31 +++--
 cpukit/posix/src/pthreadjoin.c                 |   2 +-
 cpukit/posix/src/sigtimedwait.c                |   2 +-
 cpukit/rtems/include/rtems/rtems/regionimpl.h  |   3 +
 cpukit/rtems/src/regiongetsegment.c            |  46 ++++----
 cpukit/rtems/src/taskdelete.c                  |  26 +++--
 cpukit/sapi/src/interrtext.c                   |   2 +-
 cpukit/score/include/rtems/score/coresemimpl.h |   2 +-
 cpukit/score/include/rtems/score/interr.h      |   2 +-
 cpukit/score/include/rtems/score/threadimpl.h  |  11 +-
 cpukit/score/include/rtems/score/threadq.h     |  32 ++++--
 cpukit/score/include/rtems/score/threadqimpl.h |  45 ++++++--
 cpukit/score/src/condition.c                   | 151 +++++++++++++------------
 cpukit/score/src/corebarrierwait.c             |   2 +-
 cpukit/score/src/coremsgseize.c                |   2 +-
 cpukit/score/src/coremsgsubmit.c               |   2 +-
 cpukit/score/src/coremutexseize.c              |   2 +-
 cpukit/score/src/corerwlockobtainread.c        |   2 +-
 cpukit/score/src/corerwlockobtainwrite.c       |   2 +-
 cpukit/score/src/futex.c                       |   2 +-
 cpukit/score/src/mpci.c                        |  66 ++++++-----
 cpukit/score/src/mutex.c                       |   2 +-
 cpukit/score/src/semaphore.c                   |   2 +-
 cpukit/score/src/threadqenqueue.c              |  22 ++--
 cpukit/score/src/threadrestart.c               |  35 ++++--
 testsuites/sptests/spfatal03/testcase.h        |   2 +-
 28 files changed, 351 insertions(+), 226 deletions(-)

diff --git a/cpukit/posix/include/rtems/posix/condimpl.h b/cpukit/posix/include/rtems/posix/condimpl.h
index 2ae0a6a..f508519 100644
--- a/cpukit/posix/include/rtems/posix/condimpl.h
+++ b/cpukit/posix/include/rtems/posix/condimpl.h
@@ -35,6 +35,10 @@ extern "C" {
 
 #define POSIX_CONDITION_VARIABLES_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
 
+#define POSIX_CONDITION_VARIABLE_OF_THREAD_QUEUE_QUEUE( queue ) \
+  RTEMS_CONTAINER_OF( \
+    queue, POSIX_Condition_variables_Control, Wait_queue.Queue )
+
 /**
  *  The following defines the information control block used to manage
  *  this class of objects.
diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c
index 52367f6..4935238 100644
--- a/cpukit/posix/src/condwaitsupp.c
+++ b/cpukit/posix/src/condwaitsupp.c
@@ -27,6 +27,30 @@
 
 THREAD_QUEUE_OBJECT_ASSERT( POSIX_Condition_variables_Control, Wait_queue );
 
+static void _POSIX_Condition_variables_Enqueue_callout(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
+{
+  POSIX_Condition_variables_Control *the_cond;
+  int                                mutex_error;
+
+  the_cond = POSIX_CONDITION_VARIABLE_OF_THREAD_QUEUE_QUEUE( queue );
+
+  mutex_error = pthread_mutex_unlock( &the_cond->mutex );
+  if ( mutex_error != 0 ) {
+    /*
+     *  Historically, we ignored the unlock status since the behavior
+     *  is undefined by POSIX. But GNU/Linux returns EPERM in this
+     *  case, so we follow their lead.
+     */
+    _Assert( mutex_error == EINVAL || mutex_error == EPERM );
+    _Thread_queue_Extract( the_thread );
+    the_thread->Wait.return_code= STATUS_NOT_OWNER;
+  }
+}
+
 int _POSIX_Condition_variables_Wait_support(
   pthread_cond_t            *cond,
   pthread_mutex_t           *mutex,
@@ -37,7 +61,6 @@ int _POSIX_Condition_variables_Wait_support(
   Thread_queue_Context               queue_context;
   int                                error;
   int                                mutex_error;
-  Per_CPU_Control                   *cpu_self;
   Thread_Control                    *executing;
   Watchdog_Interval                  timeout;
   bool                               already_timedout;
@@ -91,14 +114,13 @@ int _POSIX_Condition_variables_Wait_support(
   }
 
   the_cond->mutex = *mutex;
-
-  cpu_self = _Thread_Dispatch_disable_critical(
-    &queue_context.Lock_context.Lock_context
-  );
-  executing = _Per_CPU_Get_executing( cpu_self );
+  executing = _Thread_Executing;
 
   if ( !already_timedout ) {
-    _Thread_queue_Context_set_expected_level( &queue_context, 2 );
+    _Thread_queue_Context_set_enqueue_callout(
+      &queue_context,
+      _POSIX_Condition_variables_Enqueue_callout
+    );
     _Thread_queue_Enqueue_critical(
       &the_cond->Wait_queue.Queue,
       POSIX_CONDITION_VARIABLES_TQ_OPERATIONS,
@@ -106,34 +128,19 @@ int _POSIX_Condition_variables_Wait_support(
       STATES_WAITING_FOR_CONDITION_VARIABLE,
       &queue_context
     );
+    error = _POSIX_Get_error_after_wait( executing );
   } else {
     _POSIX_Condition_variables_Release( the_cond, &queue_context );
-    executing->Wait.return_code = STATUS_TIMEOUT;
-  }
 
-  mutex_error = pthread_mutex_unlock( mutex );
-  if ( mutex_error != 0 ) {
-    /*
-     *  Historically, we ignored the unlock status since the behavior
-     *  is undefined by POSIX. But GNU/Linux returns EPERM in this
-     *  case, so we follow their lead.
-     */
-    _Assert( mutex_error == EINVAL || mutex_error == EPERM );
-    _Thread_queue_Extract( executing );
-    _Thread_Dispatch_enable( cpu_self );
-    return EPERM;
+    mutex_error = pthread_mutex_unlock( &the_cond->mutex );
+    if ( mutex_error != 0 ) {
+      error = EPERM;
+    } else {
+      error = ETIMEDOUT;
+    }
   }
 
   /*
-   *  Switch ourself out because we blocked as a result of the
-   *  _Thread_queue_Enqueue_critical().
-   */
-
-  _Thread_Dispatch_enable( cpu_self );
-
-  error = _POSIX_Get_error_after_wait( executing );
-
-  /*
    *  If the thread is interrupted, while in the thread queue, by
    *  a POSIX signal, then pthread_cond_wait returns spuriously,
    *  according to the POSIX standard. It means that pthread_cond_wait
@@ -149,10 +156,12 @@ int _POSIX_Condition_variables_Wait_support(
    *  When we get here the dispatch disable level is 0.
    */
 
-  mutex_error = pthread_mutex_lock( mutex );
-  if ( mutex_error != 0 ) {
-    _Assert( mutex_error == EINVAL );
-    return EINVAL;
+  if ( error != EPERM ) {
+    mutex_error = pthread_mutex_lock( mutex );
+    if ( mutex_error != 0 ) {
+      _Assert( mutex_error == EINVAL );
+      error = EINVAL;
+    }
   }
 
   return error;
diff --git a/cpukit/posix/src/nanosleep.c b/cpukit/posix/src/nanosleep.c
index 0fec1e4..8addc87 100644
--- a/cpukit/posix/src/nanosleep.c
+++ b/cpukit/posix/src/nanosleep.c
@@ -40,23 +40,34 @@ static inline int nanosleep_helper(
   Watchdog_Discipline   discipline
 )
 {
-  Thread_Control  *executing;
-  struct timespec stop;
-  int err = 0;
+  Thread_queue_Context queue_context;
+  struct timespec      stop;
+  int                  err;
 
-  executing = _Thread_Get_executing();
+  err = 0;
+
+  _Thread_queue_Context_initialize( &queue_context );
+  _Thread_queue_Context_set_enqueue_callout(
+    &queue_context,
+    _Thread_queue_Enqueue_do_nothing
+  );
+
+  if ( discipline == WATCHDOG_ABSOLUTE ) {
+    _Thread_queue_Context_set_absolute_timeout( &queue_context, ticks );
+  } else {
+    _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
+  }
 
   /*
    *  Block for the desired amount of time
    */
-  _Thread_queue_Enqueue(
-    &_Nanosleep_Pseudo_queue,
+  _Thread_queue_Acquire( &_Nanosleep_Pseudo_queue, &queue_context );
+  _Thread_queue_Enqueue_critical(
+    &_Nanosleep_Pseudo_queue.Queue,
     &_Thread_queue_Operations_FIFO,
-    executing,
+    _Thread_Executing,
     STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL,
-    ticks,
-    discipline,
-    1
+    &queue_context
   );
 
   clock_gettime( clock_id, &stop );
diff --git a/cpukit/posix/src/pthreadjoin.c b/cpukit/posix/src/pthreadjoin.c
index 5ed0118..12f49e5 100644
--- a/cpukit/posix/src/pthreadjoin.c
+++ b/cpukit/posix/src/pthreadjoin.c
@@ -39,7 +39,7 @@ static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
   void                 *value;
 
   _Thread_queue_Context_initialize( &queue_context );
-  _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
   _Thread_queue_Context_set_no_timeout( &queue_context );
   the_thread = _Thread_Get( thread, &queue_context.Lock_context.Lock_context );
 
diff --git a/cpukit/posix/src/sigtimedwait.c b/cpukit/posix/src/sigtimedwait.c
index b85d48e..7853dc0 100644
--- a/cpukit/posix/src/sigtimedwait.c
+++ b/cpukit/posix/src/sigtimedwait.c
@@ -156,7 +156,7 @@ int sigtimedwait(
 
   executing->Wait.option          = *set;
   executing->Wait.return_argument = the_info;
-  _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
   _Thread_queue_Enqueue_critical(
     &_POSIX_signals_Wait_queue.Queue,
     POSIX_SIGNALS_TQ_OPERATIONS,
diff --git a/cpukit/rtems/include/rtems/rtems/regionimpl.h b/cpukit/rtems/include/rtems/rtems/regionimpl.h
index 1f31ad4..178b7ea 100644
--- a/cpukit/rtems/include/rtems/rtems/regionimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/regionimpl.h
@@ -35,6 +35,9 @@ extern "C" {
  * @{
  */
 
+#define REGION_OF_THREAD_QUEUE_QUEUE( queue ) \
+  RTEMS_CONTAINER_OF( queue, Region_Control, Wait_queue.Queue )
+
 /**
  *  The following defines the information control block used to
  *  manage this class of objects.
diff --git a/cpukit/rtems/src/regiongetsegment.c b/cpukit/rtems/src/regiongetsegment.c
index ec0c525..dc17b21 100644
--- a/cpukit/rtems/src/regiongetsegment.c
+++ b/cpukit/rtems/src/regiongetsegment.c
@@ -24,6 +24,18 @@
 #include <rtems/score/threadqimpl.h>
 #include <rtems/score/statesimpl.h>
 
+static void _Region_Enqueue_callout(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
+{
+  Region_Control *the_region;
+
+  the_region = REGION_OF_THREAD_QUEUE_QUEUE( queue );
+  _Region_Unlock( the_region );
+}
+
 rtems_status_code rtems_region_get_segment(
   rtems_id           id,
   uintptr_t          size,
@@ -64,35 +76,29 @@ rtems_status_code rtems_region_get_segment(
     } else if ( _Options_Is_no_wait( option_set ) ) {
       status = RTEMS_UNSATISFIED;
     } else {
-      Per_CPU_Control *cpu_self;
-      Thread_Control  *executing;
-
-      /*
-       *  Switch from using the memory allocation mutex to using a
-       *  dispatching disabled critical section.  We have to do this
-       *  because this thread is going to block.
-       */
-      /* FIXME: This is a home grown condition variable */
-      cpu_self = _Thread_Dispatch_disable();
-      _Region_Unlock( the_region );
+      Thread_queue_Context  queue_context;
+      Thread_Control       *executing;
 
-      executing  = _Per_CPU_Get_executing( cpu_self );
+      _Thread_queue_Context_initialize( &queue_context );
+      _Thread_queue_Acquire( &the_region->Wait_queue, &queue_context );
 
+      executing  = _Thread_Executing;
       executing->Wait.count           = size;
       executing->Wait.return_argument = segment;
 
-      _Thread_queue_Enqueue(
-        &the_region->Wait_queue,
+      /* FIXME: This is a home grown condition variable */
+      _Thread_queue_Context_set_enqueue_callout(
+        &queue_context,
+        _Region_Enqueue_callout
+      );
+      _Thread_queue_Context_set_relative_timeout( &queue_context, timeout );
+      _Thread_queue_Enqueue_critical(
+        &the_region->Wait_queue.Queue,
         the_region->wait_operations,
         executing,
         STATES_WAITING_FOR_SEGMENT,
-        timeout,
-        WATCHDOG_RELATIVE,
-        2
+        &queue_context
       );
-
-      _Thread_Dispatch_enable( cpu_self );
-
       return _Status_Get_after_wait( executing );
     }
   }
diff --git a/cpukit/rtems/src/taskdelete.c b/cpukit/rtems/src/taskdelete.c
index c3ddc77..de57fb3 100644
--- a/cpukit/rtems/src/taskdelete.c
+++ b/cpukit/rtems/src/taskdelete.c
@@ -25,12 +25,12 @@ rtems_status_code rtems_task_delete(
   rtems_id id
 )
 {
-  Thread_Control   *the_thread;
-  ISR_lock_Context  lock_context;
-  Thread_Control   *executing;
-  Per_CPU_Control  *cpu_self;
+  Thread_Control       *the_thread;
+  Thread_Close_context  context;
+  Thread_Control       *executing;
 
-  the_thread = _Thread_Get( id, &lock_context );
+  _Thread_queue_Context_initialize( &context.Base );
+  the_thread = _Thread_Get( id, &context.Base.Lock_context.Lock_context );
 
   if ( the_thread == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -42,12 +42,16 @@ rtems_status_code rtems_task_delete(
     return RTEMS_INVALID_ID;
   }
 
-  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
-  _ISR_lock_ISR_enable( &lock_context );
-
-  executing = _Per_CPU_Get_executing( cpu_self );
+  executing = _Thread_Executing;
 
   if ( the_thread == executing ) {
+    Per_CPU_Control *cpu_self;
+
+    cpu_self = _Thread_Dispatch_disable_critical(
+      &context.Base.Lock_context.Lock_context
+    );
+    _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
+
     /*
      * The Classic tasks are neither detached nor joinable.  In case of
      * self deletion, they are detached, otherwise joinable by default.
@@ -57,10 +61,10 @@ rtems_status_code rtems_task_delete(
       THREAD_LIFE_TERMINATING | THREAD_LIFE_DETACHED,
       NULL
     );
+    _Thread_Dispatch_enable( cpu_self );
   } else {
-    _Thread_Close( the_thread, executing );
+    _Thread_Close( the_thread, executing, &context );
   }
 
-  _Thread_Dispatch_enable( cpu_self );
   return RTEMS_SUCCESSFUL;
 }
diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
index ce69461..902493b 100644
--- a/cpukit/sapi/src/interrtext.c
+++ b/cpukit/sapi/src/interrtext.c
@@ -45,7 +45,7 @@ static const char *const internal_error_text[] = {
   "INTERNAL_ERROR_BAD_ATTRIBUTES",
   "INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY",
   "OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL",
-  "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
+  "OBSOLETE_INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
   "INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0",
   "OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP",
   "INTERNAL_ERROR_GXX_KEY_ADD_FAILED",
diff --git a/cpukit/score/include/rtems/score/coresemimpl.h b/cpukit/score/include/rtems/score/coresemimpl.h
index 44e1672..d097b3d 100644
--- a/cpukit/score/include/rtems/score/coresemimpl.h
+++ b/cpukit/score/include/rtems/score/coresemimpl.h
@@ -184,7 +184,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
     return STATUS_UNSATISFIED;
   }
 
-  _Thread_queue_Context_set_expected_level( queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
   _Thread_queue_Enqueue_critical(
     &the_semaphore->Wait_queue.Queue,
     operations,
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
index 4e499d8..33c0000 100644
--- a/cpukit/score/include/rtems/score/interr.h
+++ b/cpukit/score/include/rtems/score/interr.h
@@ -154,7 +154,7 @@ typedef enum {
   INTERNAL_ERROR_BAD_ATTRIBUTES,
   INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY,
   OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL,
-  INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
+  OBSOLETE_INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
   INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0,
   OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP,
   INTERNAL_ERROR_GXX_KEY_ADD_FAILED,
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 6169446..a38b23c 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -245,6 +245,11 @@ void _Thread_Cancel(
   void           *exit_value
 );
 
+typedef struct {
+  Thread_queue_Context  Base;
+  Thread_Control       *cancel;
+} Thread_Close_context;
+
 /**
  * @brief Closes the thread.
  *
@@ -252,7 +257,11 @@ void _Thread_Cancel(
  * case the executing thread is not terminated, then this function waits until
  * the terminating thread reached the zombie state.
  */
-void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing );
+void _Thread_Close(
+  Thread_Control       *the_thread,
+  Thread_Control       *executing,
+  Thread_Close_context *context
+);
 
 RTEMS_INLINE_ROUTINE bool _Thread_Is_ready( const Thread_Control *the_thread )
 {
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 084161c..df03cfb 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -45,11 +45,28 @@ struct Scheduler_Node;
 
 typedef struct _Thread_Control Thread_Control;
 
+typedef struct Thread_queue_Context Thread_queue_Context;
+
 typedef struct Thread_queue_Queue Thread_queue_Queue;
 
 typedef struct Thread_queue_Operations Thread_queue_Operations;
 
 /**
+ * @brief Thread queue enqueue callout.
+ *
+ * @param[in] queue The actual thread queue.
+ * @param[in] the_thread The thread to enqueue.
+ * @param[in] queue_context The thread queue context of the lock acquire.
+ *
+ * @see _Thread_queue_Context_set_enqueue_callout().
+ */
+typedef void ( *Thread_queue_Enqueue_callout )(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+);
+
+/**
  * @brief Thread queue deadlock callout.
  *
  * @param the_thread The thread that detected the deadlock.
@@ -168,7 +185,7 @@ typedef struct {
  *
  * @see _Thread_queue_Context_initialize().
  */
-typedef struct {
+struct Thread_queue_Context {
   /**
    * @brief The lock context for the thread queue acquire and release
    * operations.
@@ -176,13 +193,14 @@ typedef struct {
   Thread_queue_Lock_context Lock_context;
 
   /**
-   * @brief The expected thread dispatch disable level for
-   * _Thread_queue_Enqueue_critical().
+   * @brief The enqueue callout for _Thread_queue_Enqueue_critical().
+   *
+   * The callout is invoked after the release of the thread queue lock with
+   * thread dispatching disabled.  Afterwards the thread is blocked.
    *
-   * In case the actual thread dispatch disable level is not equal to the
-   * expected level, then a fatal error occurs.
+   * @see _Thread_queue_Enqueue_do_nothing().
    */
-  uint32_t expected_thread_dispatch_disable_level;
+  Thread_queue_Enqueue_callout enqueue_callout;
 
   /**
    * @brief The clock discipline for the interval timeout.
@@ -274,7 +292,7 @@ typedef struct {
    */
   Thread_queue_MP_callout mp_callout;
 #endif
-} Thread_queue_Context;
+};
 
 /**
  * @brief Thread priority queue.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 45f552a..232e960 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -61,6 +61,12 @@ typedef struct {
   Thread_queue_Queue Queue;
 } Thread_queue_Syslock_queue;
 
+void _Thread_queue_Enqueue_do_nothing(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+);
+
 /**
  * @brief Sets the thread wait return code to STATUS_DEADLOCK.
  */
@@ -82,7 +88,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
 {
 #if defined(RTEMS_DEBUG)
   memset( queue_context, 0, sizeof( *queue_context ) );
-  queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
+  queue_context->enqueue_callout = _Thread_queue_Enqueue_do_nothing;
   queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal;
 #else
   (void) queue_context;
@@ -90,21 +96,35 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
 }
 
 /**
- * @brief Sets the expected thread dispatch disable level in the thread queue
- * context.
+ * @brief Sets the enqueue callout in the thread queue context.
  *
  * @param queue_context The thread queue context.
- * @param expected_level The expected thread dispatch disable level.
+ * @param enqueue_callout The enqueue callout.
  *
  * @see _Thread_queue_Enqueue_critical().
  */
 RTEMS_INLINE_ROUTINE void
-_Thread_queue_Context_set_expected_level(
-  Thread_queue_Context *queue_context,
-  uint32_t              expected_level
+_Thread_queue_Context_set_enqueue_callout(
+  Thread_queue_Context         *queue_context,
+  Thread_queue_Enqueue_callout  enqueue_callout
+)
+{
+  queue_context->enqueue_callout = enqueue_callout;
+}
+
+/**
+ * @brief Sets the do nothing enqueue callout in the thread queue context.
+ *
+ * @param queue_context The thread queue context.
+ *
+ * @see _Thread_queue_Enqueue_critical().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_do_nothing_enqueue_callout(
+  Thread_queue_Context *queue_context
 )
 {
-  queue_context->expected_thread_dispatch_disable_level = expected_level;
+  queue_context->enqueue_callout = _Thread_queue_Enqueue_do_nothing;
 }
 
 /**
@@ -562,7 +582,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
  *     mutex->owner = executing;
  *     _Thread_queue_Release( &mutex->Queue, queue_context );
  *   } else {
- *     _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+ *     _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
  *     _Thread_queue_Enqueue_critical(
  *       &mutex->Queue.Queue,
  *       MUTEX_TQ_OPERATIONS,
@@ -638,12 +658,17 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
 
   _Thread_queue_Context_initialize( &queue_context );
   _Thread_queue_Acquire( the_thread_queue, &queue_context );
-  _Thread_queue_Context_set_expected_level( &queue_context, expected_level );
+  _Thread_queue_Context_set_enqueue_callout(
+    &queue_context,
+    _Thread_queue_Enqueue_do_nothing
+  );
+
   if ( discipline == WATCHDOG_ABSOLUTE ) {
     _Thread_queue_Context_set_absolute_timeout( &queue_context, timeout );
   } else {
     _Thread_queue_Context_set_relative_timeout( &queue_context, timeout );
   }
+
   _Thread_queue_Enqueue_critical(
     &the_thread_queue->Queue,
     operations,
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index 36ef989..917f119 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -80,31 +80,48 @@ static void _Condition_Queue_release(
   );
 }
 
-static Per_CPU_Control *_Condition_Do_wait(
+typedef struct {
+  Thread_queue_Context   Base;
+  struct _Mutex_Control *mutex;
+} Condition_Enqueue_context;
+
+static void _Condition_Enqueue_callout(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
+{
+  Condition_Enqueue_context *context;
+
+  context = (Condition_Enqueue_context *) queue_context;
+  _Mutex_Release( context->mutex );
+}
+
+static Thread_Control *_Condition_Do_wait(
   struct _Condition_Control *_condition,
-  Thread_queue_Context      *queue_context
+  struct _Mutex_Control     *_mutex,
+  Condition_Enqueue_context *context
 )
 {
   Condition_Control *condition;
   Thread_Control    *executing;
-  Per_CPU_Control   *cpu_self;
 
+  context->mutex = _mutex;
   condition = _Condition_Get( _condition );
-  executing = _Condition_Queue_acquire_critical( condition, queue_context );
-  cpu_self = _Thread_Dispatch_disable_critical(
-    &queue_context->Lock_context.Lock_context
+  executing = _Condition_Queue_acquire_critical( condition, &context->Base );
+  _Thread_queue_Context_set_enqueue_callout(
+    &context->Base,
+    _Condition_Enqueue_callout
   );
-
-  _Thread_queue_Context_set_expected_level( queue_context, 2 );
   _Thread_queue_Enqueue_critical(
     &condition->Queue.Queue,
     CONDITION_TQ_OPERATIONS,
     executing,
     STATES_WAITING_FOR_SYS_LOCK_CONDITION,
-    queue_context
+    &context->Base
   );
 
-  return cpu_self;
+  return executing;
 }
 
 void _Condition_Wait(
@@ -112,19 +129,12 @@ void _Condition_Wait(
   struct _Mutex_Control     *_mutex
 )
 {
-  Thread_queue_Context  queue_context;
-  Per_CPU_Control      *cpu_self;
-
-  _Thread_queue_Context_initialize( &queue_context );
-  _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
-  _Thread_queue_Context_set_no_timeout( &queue_context );
-  cpu_self = _Condition_Do_wait(
-    _condition,
-    &queue_context
-  );
+  Condition_Enqueue_context context;
 
-  _Mutex_Release( _mutex );
-  _Thread_Dispatch_enable( cpu_self );
+  _Thread_queue_Context_initialize( &context.Base );
+  _ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
+  _Thread_queue_Context_set_no_timeout( &context.Base );
+  _Condition_Do_wait( _condition, _mutex, &context );
   _Mutex_Acquire( _mutex );
 }
 
@@ -134,57 +144,59 @@ int _Condition_Wait_timed(
   const struct timespec     *abstime
 )
 {
-  Thread_queue_Context  queue_context;
-  Per_CPU_Control      *cpu_self;
-  Thread_Control       *executing;
-  int                   eno;
-  Watchdog_Interval     ticks;
+  Condition_Enqueue_context  context;
+  Thread_Control            *executing;
+  int                        eno;
+  Watchdog_Interval          ticks;
 
-  _Thread_queue_Context_initialize( &queue_context );
-  _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
+  _Thread_queue_Context_initialize( &context.Base );
+  _ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
 
   switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
     case TOD_ABSOLUTE_TIMEOUT_INVALID:
-      _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+      _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
       return EINVAL;
     case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
     case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
-      _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+      _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
       return ETIMEDOUT;
     default:
       break;
   }
 
-  _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
-  cpu_self = _Condition_Do_wait( _condition, &queue_context );
-
-  _Mutex_Release( _mutex );
-  executing = cpu_self->executing;
-  _Thread_Dispatch_enable( cpu_self );
+  _Thread_queue_Context_set_relative_timeout( &context.Base, ticks );
+  executing = _Condition_Do_wait( _condition, _mutex, &context );
   eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
   _Mutex_Acquire( _mutex );
 
   return eno;
 }
 
-void _Condition_Wait_recursive(
-  struct _Condition_Control       *_condition,
+static unsigned int _Condition_Unnest_mutex(
   struct _Mutex_recursive_Control *_mutex
 )
 {
-  Thread_queue_Context  queue_context;
-  Per_CPU_Control      *cpu_self;
-  unsigned int          nest_level;
-
-  _Thread_queue_Context_initialize( &queue_context );
-  _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
-  _Thread_queue_Context_set_no_timeout( &queue_context );
-  cpu_self = _Condition_Do_wait( _condition, &queue_context );
+  unsigned int nest_level;
 
   nest_level = _mutex->_nest_level;
   _mutex->_nest_level = 0;
-  _Mutex_recursive_Release( _mutex );
-  _Thread_Dispatch_enable( cpu_self );
+
+  return nest_level;
+}
+
+void _Condition_Wait_recursive(
+  struct _Condition_Control       *_condition,
+  struct _Mutex_recursive_Control *_mutex
+)
+{
+  Condition_Enqueue_context context;
+  unsigned int              nest_level;
+
+  _Thread_queue_Context_initialize( &context.Base );
+  _ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
+  _Thread_queue_Context_set_no_timeout( &context.Base );
+  nest_level = _Condition_Unnest_mutex( _mutex );
+  _Condition_Do_wait( _condition, &_mutex->_Mutex, &context );
   _Mutex_recursive_Acquire( _mutex );
   _mutex->_nest_level = nest_level;
 }
@@ -195,36 +207,29 @@ int _Condition_Wait_recursive_timed(
   const struct timespec           *abstime
 )
 {
-  Thread_queue_Context   queue_context;
-  Per_CPU_Control       *cpu_self;
-  Thread_Control        *executing;
-  int                    eno;
-  unsigned int           nest_level;
-  Watchdog_Interval      ticks;
+  Condition_Enqueue_context  context;
+  Thread_Control            *executing;
+  int                        eno;
+  unsigned int               nest_level;
+  Watchdog_Interval          ticks;
 
-  _Thread_queue_Context_initialize( &queue_context );
-  _ISR_lock_ISR_disable( &queue_context.Lock_context.Lock_context );
+  _Thread_queue_Context_initialize( &context.Base );
+  _ISR_lock_ISR_disable( &context.Base.Lock_context.Lock_context );
 
   switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
     case TOD_ABSOLUTE_TIMEOUT_INVALID:
-      _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+      _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
       return EINVAL;
     case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
     case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
-      _ISR_lock_ISR_enable( &queue_context.Lock_context.Lock_context );
+      _ISR_lock_ISR_enable( &context.Base.Lock_context.Lock_context );
       return ETIMEDOUT;
     default:
       break;
   }
-
-  _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
-  cpu_self = _Condition_Do_wait( _condition, &queue_context );
-
-  nest_level = _mutex->_nest_level;
-  _mutex->_nest_level = 0;
-  _Mutex_recursive_Release( _mutex );
-  executing = cpu_self->executing;
-  _Thread_Dispatch_enable( cpu_self );
+  _Thread_queue_Context_set_relative_timeout( &context.Base, ticks );
+  nest_level = _Condition_Unnest_mutex( _mutex );
+  executing = _Condition_Do_wait( _condition, &_mutex->_Mutex, &context );
   eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
   _Mutex_recursive_Acquire( _mutex );
   _mutex->_nest_level = nest_level;
@@ -235,7 +240,7 @@ int _Condition_Wait_recursive_timed(
 typedef struct {
   Thread_queue_Context Base;
   int                  count;
-} Condition_Context;
+} Condition_Flush_context;
 
 static Thread_Control *_Condition_Flush_filter(
   Thread_Control       *the_thread,
@@ -243,9 +248,9 @@ static Thread_Control *_Condition_Flush_filter(
   Thread_queue_Context *queue_context
 )
 {
-  Condition_Context *context;
+  Condition_Flush_context *context;
 
-  context = (Condition_Context *) queue_context;
+  context = (Condition_Flush_context *) queue_context;
 
   if ( context->count <= 0 ) {
     return NULL;
@@ -258,8 +263,8 @@ static Thread_Control *_Condition_Flush_filter(
 
 static void _Condition_Wake( struct _Condition_Control *_condition, int count )
 {
-  Condition_Control *condition;
-  Condition_Context  context;
+  Condition_Control       *condition;
+  Condition_Flush_context  context;
 
   condition = _Condition_Get( _condition );
   _Thread_queue_Context_initialize( &context.Base );
diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c
index 7e46c93..05f7876 100644
--- a/cpukit/score/src/corebarrierwait.c
+++ b/cpukit/score/src/corebarrierwait.c
@@ -44,7 +44,7 @@ Status_Control _CORE_barrier_Seize(
     return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
   } else {
     the_barrier->number_of_waiting_threads = number_of_waiting_threads;
-    _Thread_queue_Context_set_expected_level( queue_context, 1 );
+    _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
     _Thread_queue_Enqueue_critical(
       &the_barrier->Wait_queue.Queue,
       CORE_BARRIER_TQ_OPERATIONS,
diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c
index d86afd0..6c98b95 100644
--- a/cpukit/score/src/coremsgseize.c
+++ b/cpukit/score/src/coremsgseize.c
@@ -113,7 +113,7 @@ Status_Control _CORE_message_queue_Seize(
   executing->Wait.return_argument = size_p;
   /* Wait.count will be filled in with the message priority */
 
-  _Thread_queue_Context_set_expected_level( queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
   _Thread_queue_Enqueue_critical(
     &the_message_queue->Wait_queue.Queue,
     the_message_queue->operations,
diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c
index 97b4382..330f43c 100644
--- a/cpukit/score/src/coremsgsubmit.c
+++ b/cpukit/score/src/coremsgsubmit.c
@@ -131,7 +131,7 @@ Status_Control _CORE_message_queue_Submit(
     executing->Wait.option = (uint32_t) size;
     executing->Wait.count = submit_type;
 
-    _Thread_queue_Context_set_expected_level( queue_context, 1 );
+    _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
     _Thread_queue_Enqueue_critical(
       &the_message_queue->Wait_queue.Queue,
       the_message_queue->operations,
diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c
index 01a5ffb..7e54381 100644
--- a/cpukit/score/src/coremutexseize.c
+++ b/cpukit/score/src/coremutexseize.c
@@ -32,7 +32,7 @@ Status_Control _CORE_mutex_Seize_slow(
 )
 {
   if ( wait ) {
-    _Thread_queue_Context_set_expected_level( queue_context, 1 );
+    _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
     _Thread_queue_Context_set_deadlock_callout(
       queue_context,
       _Thread_queue_Deadlock_status
diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c
index 09b26af..4444afc 100644
--- a/cpukit/score/src/corerwlockobtainread.c
+++ b/cpukit/score/src/corerwlockobtainread.c
@@ -78,7 +78,7 @@ Status_Control _CORE_RWLock_Seize_for_reading(
 
   executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
 
-  _Thread_queue_Context_set_expected_level( queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
   _Thread_queue_Enqueue_critical(
      &the_rwlock->Wait_queue.Queue,
      CORE_RWLOCK_TQ_OPERATIONS,
diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c
index 9aac5e7..0e4290e 100644
--- a/cpukit/score/src/corerwlockobtainwrite.c
+++ b/cpukit/score/src/corerwlockobtainwrite.c
@@ -66,7 +66,7 @@ Status_Control _CORE_RWLock_Seize_for_writing(
 
   executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
 
-  _Thread_queue_Context_set_expected_level( queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
   _Thread_queue_Enqueue_critical(
      &the_rwlock->Wait_queue.Queue,
      CORE_RWLOCK_TQ_OPERATIONS,
diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c
index 38c3be4..d99accb 100644
--- a/cpukit/score/src/futex.c
+++ b/cpukit/score/src/futex.c
@@ -92,7 +92,7 @@ int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
   executing = _Futex_Queue_acquire_critical( futex, &queue_context );
 
   if ( *uaddr == val ) {
-    _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+    _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
     _Thread_queue_Context_set_no_timeout( &queue_context );
     _Thread_queue_Context_set_ISR_level( &queue_context, level );
     _Thread_queue_Enqueue_critical(
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 451592c..5330500 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -226,47 +226,59 @@ void _MPCI_Send_process_packet (
   (*_MPCI_table->send_packet)( destination, the_packet );
 }
 
+static void _MPCI_Enqueue_callout(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
+{
+  _Thread_Dispatch_unnest( _Per_CPU_Get() );
+}
+
 Status_Control _MPCI_Send_request_packet(
   uint32_t          destination,
   MP_packet_Prefix *the_packet,
   States_Control    extra_state
 )
 {
-  Per_CPU_Control *cpu_self;
-  Thread_Control  *executing;
-
-  cpu_self = _Thread_Dispatch_disable();
-
-    executing = _Per_CPU_Get_executing( cpu_self );
+  Per_CPU_Control      *cpu_self;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *executing;
 
-    the_packet->source_tid      = executing->Object.id;
-    the_packet->source_priority = _Thread_Get_priority( executing );
-    the_packet->to_convert =
-       ( the_packet->to_convert - sizeof(MP_packet_Prefix) ) / sizeof(uint32_t);
+  /*
+   *  See if we need a default timeout
+   */
 
-    executing->Wait.remote_id = the_packet->id;
+  if (the_packet->timeout == MPCI_DEFAULT_TIMEOUT)
+      the_packet->timeout = _MPCI_table->default_timeout;
 
-    (*_MPCI_table->send_packet)( destination, the_packet );
+  _Thread_queue_Context_initialize( &queue_context );
+  _Thread_queue_Context_set_enqueue_callout(
+    &queue_context,
+    _MPCI_Enqueue_callout
+  );
+  _Thread_queue_Context_set_relative_timeout( &queue_context, the_packet->timeout );
 
-    /*
-     *  See if we need a default timeout
-     */
+  cpu_self = _Thread_Dispatch_disable();
 
-    if (the_packet->timeout == MPCI_DEFAULT_TIMEOUT)
-        the_packet->timeout = _MPCI_table->default_timeout;
+  executing = _Per_CPU_Get_executing( cpu_self );
+  executing->Wait.remote_id = the_packet->id;
 
-    _Thread_queue_Enqueue(
-      &_MPCI_Remote_blocked_threads,
-      &_Thread_queue_Operations_FIFO,
-      executing,
-      STATES_WAITING_FOR_RPC_REPLY | extra_state,
-      the_packet->timeout,
-      WATCHDOG_RELATIVE,
-      2
-    );
+  the_packet->source_tid      = executing->Object.id;
+  the_packet->source_priority = _Thread_Get_priority( executing );
+  the_packet->to_convert =
+     ( the_packet->to_convert - sizeof(MP_packet_Prefix) ) / sizeof(uint32_t);
 
-  _Thread_Dispatch_enable( cpu_self );
+  (*_MPCI_table->send_packet)( destination, the_packet );
 
+  _Thread_queue_Acquire( &_MPCI_Remote_blocked_threads, &queue_context );
+  _Thread_queue_Enqueue_critical(
+    &_MPCI_Remote_blocked_threads.Queue,
+    &_Thread_queue_Operations_FIFO,
+    executing,
+    STATES_WAITING_FOR_RPC_REPLY | extra_state,
+    &queue_context
+  );
   return _Thread_Wait_get_status( executing );
 }
 
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 4e1d9ce..344d787 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -109,7 +109,7 @@ static void _Mutex_Acquire_slow(
   Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Context_set_expected_level( queue_context, 1 );
+  _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context );
   _Thread_queue_Context_set_deadlock_callout(
     queue_context,
     _Thread_queue_Deadlock_fatal
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index 29acef4..db5e079 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -103,7 +103,7 @@ void _Semaphore_Wait( struct _Semaphore_Control *_sem )
     sem->count = count - 1;
     _Semaphore_Queue_release( sem, level, &queue_context );
   } else {
-    _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+    _Thread_queue_Context_set_do_nothing_enqueue_callout( &queue_context );
     _Thread_queue_Context_set_no_timeout( &queue_context );
     _Thread_queue_Context_set_ISR_level( &queue_context, level );
     _Thread_queue_Enqueue_critical(
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 84d1765..ce0e80c 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -356,6 +356,15 @@ bool _Thread_queue_Path_acquire_critical(
   return true;
 }
 
+void _Thread_queue_Enqueue_do_nothing(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
+{
+  /* Do nothing */
+}
+
 void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
 {
   the_thread->Wait.return_code = STATUS_DEADLOCK;
@@ -442,16 +451,7 @@ void _Thread_queue_Enqueue_critical(
   );
   _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
 
-  if (
-    cpu_self->thread_dispatch_disable_level
-      != queue_context->expected_thread_dispatch_disable_level
-  ) {
-    _Terminate(
-      INTERNAL_ERROR_CORE,
-      false,
-      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
-    );
-  }
+  ( *queue_context->enqueue_callout )( queue, the_thread, queue_context );
 
   /*
    *  Set the blocking state for this thread queue in the thread.
@@ -482,7 +482,7 @@ void _Thread_queue_Enqueue_critical(
   }
 
   _Thread_Priority_update( queue_context );
-  _Thread_Dispatch_enable( cpu_self );
+  _Thread_Dispatch_direct( cpu_self );
 }
 
 #if defined(RTEMS_SMP)
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index a5ed837..46c8e4d 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -514,21 +514,40 @@ void _Thread_Cancel(
   _Thread_Dispatch_enable( cpu_self );
 }
 
-void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing )
+static void _Thread_Close_enqueue_callout(
+  Thread_queue_Queue   *queue,
+  Thread_Control       *the_thread,
+  Thread_queue_Context *queue_context
+)
 {
-  Thread_queue_Context queue_context;
+  Thread_Close_context *context;
+
+  context = (Thread_Close_context *) queue_context;
+  _Thread_Cancel( context->cancel, the_thread, NULL );
+}
 
-  _Thread_queue_Context_initialize( &queue_context );
-  _Thread_queue_Context_set_expected_level( &queue_context, 2 );
-  _Thread_queue_Context_set_no_timeout( &queue_context );
-  _Thread_State_acquire( the_thread, &queue_context.Lock_context.Lock_context );
+void _Thread_Close(
+  Thread_Control       *the_thread,
+  Thread_Control       *executing,
+  Thread_Close_context *context
+)
+{
+  context->cancel = the_thread;
+  _Thread_queue_Context_set_enqueue_callout(
+    &context->Base,
+    _Thread_Close_enqueue_callout
+  );
+  _Thread_queue_Context_set_no_timeout( &context->Base );
+  _Thread_State_acquire_critical(
+    the_thread,
+    &context->Base.Lock_context.Lock_context
+  );
   _Thread_Join(
     the_thread,
     STATES_WAITING_FOR_JOIN,
     executing,
-    &queue_context
+    &context->Base
   );
-  _Thread_Cancel( the_thread, executing, NULL );
 }
 
 void _Thread_Exit(
diff --git a/testsuites/sptests/spfatal03/testcase.h b/testsuites/sptests/spfatal03/testcase.h
index 34a20f6..7b98cc3 100644
--- a/testsuites/sptests/spfatal03/testcase.h
+++ b/testsuites/sptests/spfatal03/testcase.h
@@ -16,7 +16,7 @@
 #define FATAL_ERROR_EXPECTED_SOURCE      INTERNAL_ERROR_CORE
 #define FATAL_ERROR_EXPECTED_IS_INTERNAL FALSE
 #define FATAL_ERROR_EXPECTED_ERROR       \
-          INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
+          INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL
 
 void force_error(void)
 {



More information about the vc mailing list