[rtems commit] score: Move thread queue MP callout to context

Sebastian Huber sebh at rtems.org
Wed May 25 10:45:37 UTC 2016


Module:    rtems
Branch:    master
Commit:    631b3c8967a329cdd53e54365e4e4c0aa93a4251
Changeset: http://git.rtems.org/rtems/commit/?id=631b3c8967a329cdd53e54365e4e4c0aa93a4251

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon May 23 11:40:18 2016 +0200

score: Move thread queue MP callout to context

Drop the multiprocessing (MP) dependent callout parameter from the
thread queue extract, dequeue, flush and unblock methods.  Merge this
parameter with the lock context into new structure Thread_queue_Context.
This helps to gets rid of the conditionally compiled method call
helpers.

---

 cpukit/libnetworking/rtems/rtems_glue.c            |  15 +-
 cpukit/posix/include/rtems/posix/barrierimpl.h     |   7 +-
 cpukit/posix/include/rtems/posix/condimpl.h        |  15 +-
 cpukit/posix/include/rtems/posix/mqueueimpl.h      |  14 +-
 cpukit/posix/include/rtems/posix/muteximpl.h       |   4 +-
 cpukit/posix/include/rtems/posix/posixapi.h        |   8 +-
 cpukit/posix/include/rtems/posix/rwlockimpl.h      |   4 +-
 cpukit/posix/include/rtems/posix/semaphoreimpl.h   |   9 +-
 cpukit/posix/src/conddestroy.c                     |  10 +-
 cpukit/posix/src/condget.c                         |   6 +-
 cpukit/posix/src/condsignalsupp.c                  |  11 +-
 cpukit/posix/src/condwaitsupp.c                    |  21 +-
 cpukit/posix/src/mqueueclose.c                     |  10 +-
 cpukit/posix/src/mqueuedeletesupp.c                |  10 +-
 cpukit/posix/src/mqueuegetattr.c                   |  10 +-
 cpukit/posix/src/mqueuenotify.c                    |  16 +-
 cpukit/posix/src/mqueuerecvsupp.c                  |  14 +-
 cpukit/posix/src/mqueuesendsupp.c                  |  13 +-
 cpukit/posix/src/mqueuesetattr.c                   |  10 +-
 cpukit/posix/src/mqueueunlink.c                    |   6 +-
 cpukit/posix/src/mutexdestroy.c                    |  14 +-
 cpukit/posix/src/mutexget.c                        |   6 +-
 cpukit/posix/src/mutexgetprioceiling.c             |  10 +-
 cpukit/posix/src/mutexlocksupp.c                   |  10 +-
 cpukit/posix/src/mutexsetprioceiling.c             |   7 +-
 cpukit/posix/src/mutexunlock.c                     |  14 +-
 cpukit/posix/src/pbarrierdestroy.c                 |  10 +-
 cpukit/posix/src/pbarrierwait.c                    |   7 +-
 cpukit/posix/src/prwlockdestroy.c                  |  10 +-
 cpukit/posix/src/prwlockinit.c                     |   6 +-
 cpukit/posix/src/prwlockrdlock.c                   |   6 +-
 cpukit/posix/src/prwlocktimedrdlock.c              |   6 +-
 cpukit/posix/src/prwlocktimedwrlock.c              |   6 +-
 cpukit/posix/src/prwlocktryrdlock.c                |   6 +-
 cpukit/posix/src/prwlocktrywrlock.c                |   6 +-
 cpukit/posix/src/prwlockunlock.c                   |   6 +-
 cpukit/posix/src/prwlockwrlock.c                   |   6 +-
 cpukit/posix/src/semaphoredeletesupp.c             |  10 +-
 cpukit/posix/src/semaphorewaitsupp.c               |   6 +-
 cpukit/posix/src/semclose.c                        |   8 +-
 cpukit/posix/src/semdestroy.c                      |  10 +-
 cpukit/posix/src/semgetvalue.c                     |  15 +-
 cpukit/posix/src/sempost.c                         |   7 +-
 cpukit/posix/src/semunlink.c                       |   8 +-
 cpukit/rtems/include/rtems/rtems/barrierimpl.h     |   7 +-
 cpukit/rtems/include/rtems/rtems/messageimpl.h     |  21 +-
 cpukit/rtems/include/rtems/rtems/semimpl.h         |  21 +-
 cpukit/rtems/src/barrierdelete.c                   |  10 +-
 cpukit/rtems/src/barrierrelease.c                  |  11 +-
 cpukit/rtems/src/barrierwait.c                     |  11 +-
 cpukit/rtems/src/msgmp.c                           |  12 +-
 cpukit/rtems/src/msgqbroadcast.c                   |  11 +-
 cpukit/rtems/src/msgqdelete.c                      |  13 +-
 cpukit/rtems/src/msgqflush.c                       |   6 +-
 cpukit/rtems/src/msgqgetnumberpending.c            |   8 +-
 cpukit/rtems/src/msgqreceive.c                     |   8 +-
 cpukit/rtems/src/msgqsend.c                        |  13 +-
 cpukit/rtems/src/msgqurgent.c                      |  13 +-
 cpukit/rtems/src/semdelete.c                       |  30 +-
 cpukit/rtems/src/semflush.c                        |  24 +-
 cpukit/rtems/src/semobtain.c                       |  18 +-
 cpukit/rtems/src/semrelease.c                      |  16 +-
 cpukit/rtems/src/semsetpriority.c                  |  28 +-
 cpukit/score/include/rtems/score/corebarrierimpl.h | 150 +++------
 cpukit/score/include/rtems/score/coremsg.h         |   2 +-
 cpukit/score/include/rtems/score/coremsgimpl.h     | 341 ++++++---------------
 cpukit/score/include/rtems/score/coremuteximpl.h   | 135 ++++----
 cpukit/score/include/rtems/score/corerwlockimpl.h  |  42 +--
 cpukit/score/include/rtems/score/coresemimpl.h     | 166 +++++-----
 cpukit/score/include/rtems/score/mrspimpl.h        |  97 +++---
 cpukit/score/include/rtems/score/threadq.h         |  12 +
 cpukit/score/include/rtems/score/threadqimpl.h     | 191 +++++-------
 cpukit/score/src/apimutexlock.c                    |   9 +-
 cpukit/score/src/apimutexunlock.c                  |  15 +-
 cpukit/score/src/condition.c                       |  36 +--
 cpukit/score/src/corebarrier.c                     |   6 +-
 cpukit/score/src/corebarrierrelease.c              |  10 +-
 cpukit/score/src/corebarrierwait.c                 |  21 +-
 cpukit/score/src/coremsgbroadcast.c                |  18 +-
 cpukit/score/src/coremsgclose.c                    |  16 +-
 cpukit/score/src/coremsgflush.c                    |   6 +-
 cpukit/score/src/coremsgseize.c                    |  14 +-
 cpukit/score/src/coremsgsubmit.c                   |  26 +-
 cpukit/score/src/coremutex.c                       |  12 +-
 cpukit/score/src/coremutexsurrender.c              |  27 +-
 cpukit/score/src/corerwlockobtainread.c            |  20 +-
 cpukit/score/src/corerwlockobtainwrite.c           |  18 +-
 cpukit/score/src/corerwlockrelease.c               |  19 +-
 cpukit/score/src/coresem.c                         |  12 +-
 cpukit/score/src/futex.c                           |  31 +-
 cpukit/score/src/mpci.c                            |  19 +-
 cpukit/score/src/mutex.c                           |  49 +--
 cpukit/score/src/semaphore.c                       |  14 +-
 cpukit/score/src/threadqenqueue.c                  |  51 +--
 cpukit/score/src/threadqflush.c                    |  27 +-
 cpukit/score/src/threadrestart.c                   |  31 +-
 testsuites/sptests/spintrcritical22/init.c         |   6 +-
 testsuites/tmtests/tm26/task1.c                    |  11 +-
 98 files changed, 1048 insertions(+), 1306 deletions(-)

diff --git a/cpukit/libnetworking/rtems/rtems_glue.c b/cpukit/libnetworking/rtems/rtems_glue.c
index 9ece2b1..edf551e 100644
--- a/cpukit/libnetworking/rtems/rtems_glue.c
+++ b/cpukit/libnetworking/rtems/rtems_glue.c
@@ -371,18 +371,19 @@ void
 rtems_bsdnet_semaphore_obtain (void)
 {
 #ifdef RTEMS_FAST_MUTEX
-	ISR_lock_Context lock_context;
+	Thread_queue_Context queue_context;
 	Thread_Control *executing;
-	_ISR_lock_ISR_disable(&lock_context);
 	if (!the_networkSemaphore)
 		rtems_panic ("rtems-net: network sema obtain: network not initialised\n");
+	_Thread_queue_Context_initialize(&queue_context, NULL);
+	_ISR_lock_ISR_disable(&queue_context.Lock_context);
 	executing = _Thread_Executing;
 	_CORE_mutex_Seize (
 		&the_networkSemaphore->Core_control.mutex,
 		executing,
 		1,		/* wait */
 		0,		/* forever */
-		&lock_context
+		&queue_context
 		);
 	if (executing->Wait.return_code)
 		rtems_panic ("rtems-net: can't obtain network sema: %d\n",
@@ -404,16 +405,16 @@ void
 rtems_bsdnet_semaphore_release (void)
 {
 #ifdef RTEMS_FAST_MUTEX
-        ISR_lock_Context lock_context;
+	Thread_queue_Context queue_context;
 	CORE_mutex_Status status;
 
 	if (!the_networkSemaphore)
 		rtems_panic ("rtems-net: network sema obtain: network not initialised\n");
-        _ISR_lock_ISR_disable(&lock_context);
+	_Thread_queue_Context_initialize(&queue_context, NULL);
+	_ISR_lock_ISR_disable(&queue_context.Lock_context);
 	status = _CORE_mutex_Surrender (
 		&the_networkSemaphore->Core_control.mutex,
-		NULL,
-                &lock_context
+		&queue_context
 		);
 	if (status != CORE_MUTEX_STATUS_SUCCESSFUL)
 		rtems_panic ("rtems-net: can't release network sema: %i\n");
diff --git a/cpukit/posix/include/rtems/posix/barrierimpl.h b/cpukit/posix/include/rtems/posix/barrierimpl.h
index 5173531..e04f135 100644
--- a/cpukit/posix/include/rtems/posix/barrierimpl.h
+++ b/cpukit/posix/include/rtems/posix/barrierimpl.h
@@ -77,14 +77,15 @@ RTEMS_INLINE_ROUTINE void _POSIX_Barrier_Free (
   _Objects_Free( &_POSIX_Barrier_Information, &the_barrier->Object );
 }
 
-RTEMS_INLINE_ROUTINE POSIX_Barrier_Control *_POSIX_Barrier_Get (
+RTEMS_INLINE_ROUTINE POSIX_Barrier_Control *_POSIX_Barrier_Get(
   const pthread_barrier_t *barrier,
-  ISR_lock_Context        *lock_context
+  Thread_queue_Context    *queue_context
 )
 {
+  _Thread_queue_Context_initialize( queue_context, NULL );
   return (POSIX_Barrier_Control *) _Objects_Get(
     (Objects_Id) *barrier,
-    lock_context,
+    &queue_context->Lock_context,
     &_POSIX_Barrier_Information
   );
 }
diff --git a/cpukit/posix/include/rtems/posix/condimpl.h b/cpukit/posix/include/rtems/posix/condimpl.h
index b178869..736f06b 100644
--- a/cpukit/posix/include/rtems/posix/condimpl.h
+++ b/cpukit/posix/include/rtems/posix/condimpl.h
@@ -63,18 +63,21 @@ RTEMS_INLINE_ROUTINE void _POSIX_Condition_variables_Destroy(
 
 RTEMS_INLINE_ROUTINE void _POSIX_Condition_variables_Acquire_critical(
   POSIX_Condition_variables_Control *the_cond,
-  ISR_lock_Context                  *lock_context
+  Thread_queue_Context              *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_cond->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_cond->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _POSIX_Condition_variables_Release(
   POSIX_Condition_variables_Control *the_cond,
-  ISR_lock_Context                  *lock_context
+  Thread_queue_Context              *queue_context
 )
 {
-  _Thread_queue_Release( &the_cond->Wait_queue, lock_context );
+  _Thread_queue_Release( &the_cond->Wait_queue, &queue_context->Lock_context );
 }
 
 /**
@@ -107,8 +110,8 @@ RTEMS_INLINE_ROUTINE void _POSIX_Condition_variables_Free (
 }
 
 POSIX_Condition_variables_Control *_POSIX_Condition_variables_Get(
-  pthread_cond_t   *cond,
-  ISR_lock_Context *lock_context
+  pthread_cond_t       *cond,
+  Thread_queue_Context *queue_context
 );
 
 /**
diff --git a/cpukit/posix/include/rtems/posix/mqueueimpl.h b/cpukit/posix/include/rtems/posix/mqueueimpl.h
index 3185970..2317358 100644
--- a/cpukit/posix/include/rtems/posix/mqueueimpl.h
+++ b/cpukit/posix/include/rtems/posix/mqueueimpl.h
@@ -43,7 +43,7 @@ extern Objects_Information _POSIX_Message_queue_Information;
  */
 void _POSIX_Message_queue_Delete(
   POSIX_Message_queue_Control *the_mq,
-  ISR_lock_Context            *lock_context
+  Thread_queue_Context        *queue_context
 );
 
 /*@
@@ -107,12 +107,16 @@ RTEMS_INLINE_ROUTINE void _POSIX_Message_queue_Free(
 
 
 RTEMS_INLINE_ROUTINE POSIX_Message_queue_Control *_POSIX_Message_queue_Get(
-  Objects_Id        id,
-  ISR_lock_Context *lock_context
+  Objects_Id            id,
+  Thread_queue_Context *queue_context
 )
 {
-  return (POSIX_Message_queue_Control *)
-    _Objects_Get( id, lock_context, &_POSIX_Message_queue_Information );
+  _Thread_queue_Context_initialize( queue_context, NULL );
+  return (POSIX_Message_queue_Control *) _Objects_Get(
+    id,
+    &queue_context->Lock_context,
+    &_POSIX_Message_queue_Information
+  );
 }
 
 /*
diff --git a/cpukit/posix/include/rtems/posix/muteximpl.h b/cpukit/posix/include/rtems/posix/muteximpl.h
index 7627d3d..4c7852b 100644
--- a/cpukit/posix/include/rtems/posix/muteximpl.h
+++ b/cpukit/posix/include/rtems/posix/muteximpl.h
@@ -127,8 +127,8 @@ RTEMS_INLINE_ROUTINE int _POSIX_Mutex_Translate_core_mutex_return_code(
  *  @note: This version of the method uses an interrupt critical section.
  */
 POSIX_Mutex_Control *_POSIX_Mutex_Get(
-  pthread_mutex_t  *mutex,
-  ISR_lock_Context *lock_context
+  pthread_mutex_t      *mutex,
+  Thread_queue_Context *queue_context
 );
 
 RTEMS_INLINE_ROUTINE POSIX_Mutex_Control *_POSIX_Mutex_Get_no_protection(
diff --git a/cpukit/posix/include/rtems/posix/posixapi.h b/cpukit/posix/include/rtems/posix/posixapi.h
index a16b6ad..a5afe92 100644
--- a/cpukit/posix/include/rtems/posix/posixapi.h
+++ b/cpukit/posix/include/rtems/posix/posixapi.h
@@ -71,7 +71,7 @@ RTEMS_INLINE_ROUTINE int _POSIX_Get_by_name_error(
 #define _POSIX_Get_object_body( \
   type, \
   id, \
-  lock_context, \
+  queue_context, \
   info, \
   initializer, \
   init \
@@ -80,14 +80,16 @@ RTEMS_INLINE_ROUTINE int _POSIX_Get_by_name_error(
   if ( id == NULL ) { \
     return NULL; \
   } \
-  the_object = _Objects_Get( (Objects_Id) *id, lock_context, info ); \
+  the_object = \
+    _Objects_Get( (Objects_Id) *id, &queue_context->Lock_context, info ); \
   if ( the_object == NULL ) { \
     _Once_Lock(); \
     if ( *id == initializer ) { \
       init( id, NULL ); \
     } \
     _Once_Unlock(); \
-    the_object = _Objects_Get( (Objects_Id) *id, lock_context, info ); \
+    the_object = \
+      _Objects_Get( (Objects_Id) *id, &queue_context->Lock_context, info ); \
   } \
   return (type *) the_object
 
diff --git a/cpukit/posix/include/rtems/posix/rwlockimpl.h b/cpukit/posix/include/rtems/posix/rwlockimpl.h
index 6f3088e..4ab9395 100644
--- a/cpukit/posix/include/rtems/posix/rwlockimpl.h
+++ b/cpukit/posix/include/rtems/posix/rwlockimpl.h
@@ -87,8 +87,8 @@ RTEMS_INLINE_ROUTINE void _POSIX_RWLock_Free (
 }
 
 POSIX_RWLock_Control *_POSIX_RWLock_Get(
-  pthread_rwlock_t *rwlock,
-  ISR_lock_Context *lock_context
+  pthread_rwlock_t     *rwlock,
+  Thread_queue_Context *queue_context
 );
 
 #ifdef __cplusplus
diff --git a/cpukit/posix/include/rtems/posix/semaphoreimpl.h b/cpukit/posix/include/rtems/posix/semaphoreimpl.h
index 402329a..10d7cee 100644
--- a/cpukit/posix/include/rtems/posix/semaphoreimpl.h
+++ b/cpukit/posix/include/rtems/posix/semaphoreimpl.h
@@ -61,13 +61,14 @@ RTEMS_INLINE_ROUTINE void _POSIX_Semaphore_Free (
 }
 
 RTEMS_INLINE_ROUTINE POSIX_Semaphore_Control *_POSIX_Semaphore_Get(
-  const sem_t       *id,
-  ISR_lock_Context  *lock_context
+  const sem_t          *id,
+  Thread_queue_Context *queue_context
 )
 {
+  _Thread_queue_Context_initialize( queue_context, NULL );
   return (POSIX_Semaphore_Control *) _Objects_Get(
     (Objects_Id) *id,
-    lock_context,
+    &queue_context->Lock_context,
     &_POSIX_Semaphore_Information
   );
 }
@@ -93,7 +94,7 @@ int _POSIX_Semaphore_Create_support(
  */
 void _POSIX_Semaphore_Delete(
   POSIX_Semaphore_Control *the_semaphore,
-  ISR_lock_Context        *lock_context
+  Thread_queue_Context    *queue_context
 );
 
 /**
diff --git a/cpukit/posix/src/conddestroy.c b/cpukit/posix/src/conddestroy.c
index d47c6b2..a2c8dc1 100644
--- a/cpukit/posix/src/conddestroy.c
+++ b/cpukit/posix/src/conddestroy.c
@@ -29,20 +29,20 @@ int pthread_cond_destroy(
 )
 {
   POSIX_Condition_variables_Control *the_cond;
-  ISR_lock_Context                   lock_context;
+  Thread_queue_Context               queue_context;
 
   _Objects_Allocator_lock();
-  the_cond = _POSIX_Condition_variables_Get( cond, &lock_context );
+  the_cond = _POSIX_Condition_variables_Get( cond, &queue_context );
 
   if ( the_cond == NULL ) {
     _Objects_Allocator_unlock();
     return EINVAL;
   }
 
-  _POSIX_Condition_variables_Acquire_critical( the_cond, &lock_context );
+  _POSIX_Condition_variables_Acquire_critical( the_cond, &queue_context );
 
   if ( !_Thread_queue_Is_empty( &the_cond->Wait_queue.Queue ) ) {
-    _POSIX_Condition_variables_Release( the_cond, &lock_context );
+    _POSIX_Condition_variables_Release( the_cond, &queue_context );
     _Objects_Allocator_unlock();
     return EBUSY;
   }
@@ -51,7 +51,7 @@ int pthread_cond_destroy(
     &_POSIX_Condition_variables_Information,
     &the_cond->Object
   );
-  _POSIX_Condition_variables_Release( the_cond, &lock_context );
+  _POSIX_Condition_variables_Release( the_cond, &queue_context );
   _POSIX_Condition_variables_Destroy( the_cond );
   _POSIX_Condition_variables_Free( the_cond );
   _Objects_Allocator_unlock();
diff --git a/cpukit/posix/src/condget.c b/cpukit/posix/src/condget.c
index 5676de8..5b086ef 100644
--- a/cpukit/posix/src/condget.c
+++ b/cpukit/posix/src/condget.c
@@ -15,14 +15,14 @@
 #include <rtems/posix/posixapi.h>
 
 POSIX_Condition_variables_Control *_POSIX_Condition_variables_Get(
-  pthread_cond_t   *cond,
-  ISR_lock_Context *lock_context
+  pthread_cond_t       *cond,
+  Thread_queue_Context *queue_context
 )
 {
   _POSIX_Get_object_body(
     POSIX_Condition_variables_Control,
     cond,
-    lock_context,
+    queue_context,
     &_POSIX_Condition_variables_Information,
     PTHREAD_COND_INITIALIZER,
     pthread_cond_init
diff --git a/cpukit/posix/src/condsignalsupp.c b/cpukit/posix/src/condsignalsupp.c
index 38aa053..e42476e 100644
--- a/cpukit/posix/src/condsignalsupp.c
+++ b/cpukit/posix/src/condsignalsupp.c
@@ -36,15 +36,15 @@ int _POSIX_Condition_variables_Signal_support(
 
   do {
     POSIX_Condition_variables_Control *the_cond;
-    ISR_lock_Context                   lock_context;
+    Thread_queue_Context               queue_context;
 
-    the_cond = _POSIX_Condition_variables_Get( cond, &lock_context );
+    the_cond = _POSIX_Condition_variables_Get( cond, &queue_context );
 
     if ( the_cond == NULL ) {
       return EINVAL;
     }
 
-    _POSIX_Condition_variables_Acquire_critical( the_cond, &lock_context );
+    _POSIX_Condition_variables_Acquire_critical( the_cond, &queue_context );
 
     the_thread = _Thread_queue_First_locked(
       &the_cond->Wait_queue,
@@ -56,12 +56,11 @@ int _POSIX_Condition_variables_Signal_support(
         &the_cond->Wait_queue.Queue,
         POSIX_CONDITION_VARIABLES_TQ_OPERATIONS,
         the_thread,
-        NULL,
-        &lock_context
+        &queue_context
       );
     } else {
       the_cond->mutex = POSIX_CONDITION_VARIABLES_NO_MUTEX;
-      _POSIX_Condition_variables_Release( the_cond, &lock_context );
+      _POSIX_Condition_variables_Release( the_cond, &queue_context );
     }
   } while ( is_broadcast && the_thread != NULL );
 
diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c
index 11b0587..4e89ef0 100644
--- a/cpukit/posix/src/condwaitsupp.c
+++ b/cpukit/posix/src/condwaitsupp.c
@@ -35,7 +35,7 @@ int _POSIX_Condition_variables_Wait_support(
 {
   POSIX_Condition_variables_Control *the_cond;
   POSIX_Mutex_Control               *the_mutex;
-  ISR_lock_Context                   lock_context;
+  Thread_queue_Context               queue_context;
   int                                status;
   int                                mutex_status;
   CORE_mutex_Status                  core_mutex_status;
@@ -46,25 +46,25 @@ int _POSIX_Condition_variables_Wait_support(
     return EINVAL;
   }
 
-  the_cond = _POSIX_Condition_variables_Get( cond, &lock_context );
+  the_cond = _POSIX_Condition_variables_Get( cond, &queue_context );
 
   if ( the_cond == NULL ) {
     return EINVAL;
   }
 
-  _POSIX_Condition_variables_Acquire_critical( the_cond, &lock_context );
+  _POSIX_Condition_variables_Acquire_critical( the_cond, &queue_context );
 
   if (
     the_cond->mutex != POSIX_CONDITION_VARIABLES_NO_MUTEX
       && the_cond->mutex != *mutex
   ) {
-    _POSIX_Condition_variables_Release( the_cond, &lock_context );
+    _POSIX_Condition_variables_Release( the_cond, &queue_context );
     return EINVAL;
   }
 
   the_cond->mutex = *mutex;
 
-  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
+  cpu_self = _Thread_Dispatch_disable_critical( &queue_context.Lock_context );
   executing = _Per_CPU_Get_executing( cpu_self );
 
   /*
@@ -78,7 +78,7 @@ int _POSIX_Condition_variables_Wait_support(
     the_mutex == NULL
       || !_CORE_mutex_Is_owner( &the_mutex->Mutex, executing )
   ) {
-    _POSIX_Condition_variables_Release( the_cond, &lock_context );
+    _POSIX_Condition_variables_Release( the_cond, &queue_context );
     _Thread_Dispatch_enable( cpu_self );
     return EPERM;
   }
@@ -92,18 +92,17 @@ int _POSIX_Condition_variables_Wait_support(
       STATES_WAITING_FOR_CONDITION_VARIABLE,
       timeout,
       ETIMEDOUT,
-      &lock_context
+      &queue_context.Lock_context
     );
   } else {
-    _POSIX_Condition_variables_Release( the_cond, &lock_context );
+    _POSIX_Condition_variables_Release( the_cond, &queue_context );
     executing->Wait.return_code = ETIMEDOUT;
   }
 
-  _ISR_lock_ISR_disable( &lock_context );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
   core_mutex_status = _CORE_mutex_Surrender(
     &the_mutex->Mutex,
-    NULL,
-    &lock_context
+    &queue_context
   );
   _Assert( core_mutex_status == CORE_MUTEX_STATUS_SUCCESSFUL );
   (void) core_mutex_status;
diff --git a/cpukit/posix/src/mqueueclose.c b/cpukit/posix/src/mqueueclose.c
index 28a7d16..60d3ce3 100644
--- a/cpukit/posix/src/mqueueclose.c
+++ b/cpukit/posix/src/mqueueclose.c
@@ -42,10 +42,10 @@ int mq_close(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
 
   _Objects_Allocator_lock();
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     _Objects_Allocator_unlock();
@@ -54,17 +54,17 @@ int mq_close(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     _Objects_Allocator_unlock();
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
   the_mq->open_count -= 1;
-  _POSIX_Message_queue_Delete( the_mq, &lock_context );
+  _POSIX_Message_queue_Delete( the_mq, &queue_context );
 
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/mqueuedeletesupp.c b/cpukit/posix/src/mqueuedeletesupp.c
index 485136c..850707a 100644
--- a/cpukit/posix/src/mqueuedeletesupp.c
+++ b/cpukit/posix/src/mqueuedeletesupp.c
@@ -22,17 +22,13 @@
 
 void _POSIX_Message_queue_Delete(
   POSIX_Message_queue_Control *the_mq,
-  ISR_lock_Context            *lock_context
+  Thread_queue_Context        *queue_context
 )
 {
   if ( !the_mq->linked && the_mq->open_count == 0 ) {
-    _CORE_message_queue_Close(
-      &the_mq->Message_queue,
-      NULL,        /* no MP support */
-      lock_context
-    );
+    _CORE_message_queue_Close( &the_mq->Message_queue, queue_context );
     _POSIX_Message_queue_Free( the_mq );
   } else {
-    _CORE_message_queue_Release( &the_mq->Message_queue, lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, queue_context );
   }
 }
diff --git a/cpukit/posix/src/mqueuegetattr.c b/cpukit/posix/src/mqueuegetattr.c
index dae14e3..5efcc0b 100644
--- a/cpukit/posix/src/mqueuegetattr.c
+++ b/cpukit/posix/src/mqueuegetattr.c
@@ -42,13 +42,13 @@ int mq_getattr(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
 
   if ( mqstat == NULL ) {
     rtems_set_errno_and_return_minus_one( EINVAL );
   }
 
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     rtems_set_errno_and_return_minus_one( EBADF );
@@ -56,11 +56,11 @@ int mq_getattr(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
@@ -72,6 +72,6 @@ int mq_getattr(
   mqstat->mq_maxmsg  = the_mq->Message_queue.maximum_pending_messages;
   mqstat->mq_curmsgs = the_mq->Message_queue.number_of_pending_messages;
 
-  _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+  _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
   return 0;
 }
diff --git a/cpukit/posix/src/mqueuenotify.c b/cpukit/posix/src/mqueuenotify.c
index adcfdcb..a1f9a08 100644
--- a/cpukit/posix/src/mqueuenotify.c
+++ b/cpukit/posix/src/mqueuenotify.c
@@ -24,7 +24,7 @@
 
 static void _POSIX_Message_queue_Notify_handler(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
   POSIX_Message_queue_Control *the_mq;
@@ -38,7 +38,7 @@ static void _POSIX_Message_queue_Notify_handler(
 
   signo = the_mq->notification.sigev_signo;
   _CORE_message_queue_Set_notify( &the_mq->Message_queue, NULL );
-  _CORE_message_queue_Release( &the_mq->Message_queue, lock_context );
+  _CORE_message_queue_Release( &the_mq->Message_queue, queue_context );
 
   kill( getpid(), signo );
 }
@@ -49,9 +49,9 @@ int mq_notify(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
 
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     rtems_set_errno_and_return_minus_one( EBADF );
@@ -59,17 +59,17 @@ int mq_notify(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
   if ( notification != NULL ) {
     if ( _CORE_message_queue_Is_notify_enabled( &the_mq->Message_queue ) ) {
-      _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+      _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
       rtems_set_errno_and_return_minus_one( EBUSY );
     }
 
@@ -83,6 +83,6 @@ int mq_notify(
     _CORE_message_queue_Set_notify( &the_mq->Message_queue, NULL );
   }
 
-  _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+  _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
   return 0;
 }
diff --git a/cpukit/posix/src/mqueuerecvsupp.c b/cpukit/posix/src/mqueuerecvsupp.c
index b5a790a..541786a 100644
--- a/cpukit/posix/src/mqueuerecvsupp.c
+++ b/cpukit/posix/src/mqueuerecvsupp.c
@@ -44,24 +44,24 @@ ssize_t _POSIX_Message_queue_Receive_support(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
   size_t                       length_out;
   bool                         do_wait;
   Thread_Control              *executing;
 
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
   if ( ( the_mq->oflag & O_ACCMODE ) == O_WRONLY ) {
-    _ISR_lock_ISR_enable( &lock_context );
+    _ISR_lock_ISR_enable( &queue_context.Lock_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
   if ( msg_len < the_mq->Message_queue.maximum_message_size ) {
-    _ISR_lock_ISR_enable( &lock_context );
+    _ISR_lock_ISR_enable( &queue_context.Lock_context );
     rtems_set_errno_and_return_minus_one( EMSGSIZE );
   }
 
@@ -83,11 +83,11 @@ ssize_t _POSIX_Message_queue_Receive_support(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
@@ -102,7 +102,7 @@ ssize_t _POSIX_Message_queue_Receive_support(
     &length_out,
     do_wait,
     timeout,
-    &lock_context
+    &queue_context
   );
 
   if ( msg_prio != NULL ) {
diff --git a/cpukit/posix/src/mqueuesendsupp.c b/cpukit/posix/src/mqueuesendsupp.c
index 8933fc6..2d88eec 100644
--- a/cpukit/posix/src/mqueuesendsupp.c
+++ b/cpukit/posix/src/mqueuesendsupp.c
@@ -44,7 +44,7 @@ int _POSIX_Message_queue_Send_support(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
   CORE_message_queue_Status    msg_status;
   bool                         do_wait;
   Thread_Control              *executing;
@@ -58,14 +58,14 @@ int _POSIX_Message_queue_Send_support(
     rtems_set_errno_and_return_minus_one( EINVAL );
   }
 
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
   if ( ( the_mq->oflag & O_ACCMODE ) == O_RDONLY ) {
-    _ISR_lock_ISR_enable( &lock_context );
+    _ISR_lock_ISR_enable( &queue_context.Lock_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
@@ -80,11 +80,11 @@ int _POSIX_Message_queue_Send_support(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
@@ -97,11 +97,10 @@ int _POSIX_Message_queue_Send_support(
     executing,
     msg_ptr,
     msg_len,
-    NULL,
     _POSIX_Message_queue_Priority_to_core( msg_prio ),
     do_wait,
     timeout,
-    &lock_context
+    &queue_context
   );
 
   if ( msg_status != CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL ) {
diff --git a/cpukit/posix/src/mqueuesetattr.c b/cpukit/posix/src/mqueuesetattr.c
index dfd9bab..fc9fc75 100644
--- a/cpukit/posix/src/mqueuesetattr.c
+++ b/cpukit/posix/src/mqueuesetattr.c
@@ -27,13 +27,13 @@ int mq_setattr(
 )
 {
   POSIX_Message_queue_Control *the_mq;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
 
   if ( mqstat == NULL ) {
     rtems_set_errno_and_return_minus_one( EINVAL );
   }
 
-  the_mq = _POSIX_Message_queue_Get( mqdes, &lock_context );
+  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
 
   if ( the_mq == NULL ) {
     rtems_set_errno_and_return_minus_one( EBADF );
@@ -41,11 +41,11 @@ int mq_setattr(
 
   _CORE_message_queue_Acquire_critical(
     &the_mq->Message_queue,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mq->open_count == 0 ) {
-    _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+    _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
     rtems_set_errno_and_return_minus_one( EBADF );
   }
 
@@ -62,6 +62,6 @@ int mq_setattr(
 
   the_mq->oflag = mqstat->mq_flags;
 
-  _CORE_message_queue_Release( &the_mq->Message_queue, &lock_context );
+  _CORE_message_queue_Release( &the_mq->Message_queue, &queue_context );
   return 0;
 }
diff --git a/cpukit/posix/src/mqueueunlink.c b/cpukit/posix/src/mqueueunlink.c
index dc485ba..812f249 100644
--- a/cpukit/posix/src/mqueueunlink.c
+++ b/cpukit/posix/src/mqueueunlink.c
@@ -30,7 +30,7 @@ int mq_unlink(
 {
   POSIX_Message_queue_Control *the_mq;
   Objects_Get_by_name_error    error;
-  ISR_lock_Context             lock_context;
+  Thread_queue_Context         queue_context;
 
   _Objects_Allocator_lock();
 
@@ -42,10 +42,10 @@ int mq_unlink(
 
   _POSIX_Message_queue_Namespace_remove( the_mq );
 
-  _CORE_message_queue_Acquire( &the_mq->Message_queue, &lock_context );
+  _CORE_message_queue_Acquire( &the_mq->Message_queue, &queue_context );
 
   the_mq->linked = false;
-  _POSIX_Message_queue_Delete( the_mq, &lock_context );
+  _POSIX_Message_queue_Delete( the_mq, &queue_context );
 
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/mutexdestroy.c b/cpukit/posix/src/mutexdestroy.c
index 640acc0..7fda7d3 100644
--- a/cpukit/posix/src/mutexdestroy.c
+++ b/cpukit/posix/src/mutexdestroy.c
@@ -28,16 +28,16 @@ int pthread_mutex_destroy(
   pthread_mutex_t           *mutex
 )
 {
-  POSIX_Mutex_Control *the_mutex;
-  ISR_lock_Context     lock_context;
-  int                  eno;
+  POSIX_Mutex_Control  *the_mutex;
+  Thread_queue_Context  queue_context;
+  int                   eno;
 
   _Objects_Allocator_lock();
 
-  the_mutex = _POSIX_Mutex_Get( mutex, &lock_context );
+  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
 
   if ( the_mutex != NULL ) {
-    _CORE_mutex_Acquire_critical( &the_mutex->Mutex, &lock_context );
+    _CORE_mutex_Acquire_critical( &the_mutex->Mutex, &queue_context );
 
     /*
      * XXX: There is an error for the mutex being locked
@@ -46,12 +46,12 @@ int pthread_mutex_destroy(
 
     if ( !_CORE_mutex_Is_locked( &the_mutex->Mutex ) ) {
       _Objects_Close( &_POSIX_Mutex_Information, &the_mutex->Object );
-      _CORE_mutex_Release( &the_mutex->Mutex, &lock_context );
+      _CORE_mutex_Release( &the_mutex->Mutex, &queue_context );
       _CORE_mutex_Destroy( &the_mutex->Mutex );
       _POSIX_Mutex_Free( the_mutex );
       eno = 0;
     } else {
-      _CORE_mutex_Release( &the_mutex->Mutex, &lock_context );
+      _CORE_mutex_Release( &the_mutex->Mutex, &queue_context );
       eno = EBUSY;
     }
   } else {
diff --git a/cpukit/posix/src/mutexget.c b/cpukit/posix/src/mutexget.c
index f8f0033..e90c41c 100644
--- a/cpukit/posix/src/mutexget.c
+++ b/cpukit/posix/src/mutexget.c
@@ -22,14 +22,14 @@
 #include <rtems/posix/posixapi.h>
 
 POSIX_Mutex_Control *_POSIX_Mutex_Get(
-  pthread_mutex_t  *mutex,
-  ISR_lock_Context *lock_context
+  pthread_mutex_t      *mutex,
+  Thread_queue_Context *queue_context
 )
 {
   _POSIX_Get_object_body(
     POSIX_Mutex_Control,
     mutex,
-    lock_context,
+    queue_context,
     &_POSIX_Mutex_Information,
     PTHREAD_MUTEX_INITIALIZER,
     pthread_mutex_init
diff --git a/cpukit/posix/src/mutexgetprioceiling.c b/cpukit/posix/src/mutexgetprioceiling.c
index 232b2e2..268457a 100644
--- a/cpukit/posix/src/mutexgetprioceiling.c
+++ b/cpukit/posix/src/mutexgetprioceiling.c
@@ -30,26 +30,26 @@ int pthread_mutex_getprioceiling(
   int               *prioceiling
 )
 {
-  POSIX_Mutex_Control *the_mutex;
-  ISR_lock_Context     lock_context;
+  POSIX_Mutex_Control  *the_mutex;
+  Thread_queue_Context  queue_context;
 
   if ( prioceiling == NULL ) {
     return EINVAL;
   }
 
-  the_mutex = _POSIX_Mutex_Get( mutex, &lock_context );
+  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
 
   if ( the_mutex == NULL ) {
     return EINVAL;
   }
 
-  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, &lock_context );
+  _CORE_mutex_Acquire_critical( &the_mutex->Mutex, &queue_context );
 
   *prioceiling = _POSIX_Priority_From_core(
     the_mutex->Mutex.Attributes.priority_ceiling
   );
 
-  _CORE_mutex_Release( &the_mutex->Mutex, &lock_context );
+  _CORE_mutex_Release( &the_mutex->Mutex, &queue_context );
 
   return 0;
 }
diff --git a/cpukit/posix/src/mutexlocksupp.c b/cpukit/posix/src/mutexlocksupp.c
index 0c70cf0..f17269b 100644
--- a/cpukit/posix/src/mutexlocksupp.c
+++ b/cpukit/posix/src/mutexlocksupp.c
@@ -28,11 +28,11 @@ int _POSIX_Mutex_Lock_support(
   Watchdog_Interval  timeout
 )
 {
-  POSIX_Mutex_Control *the_mutex;
-  ISR_lock_Context     lock_context;
-  Thread_Control      *executing;
+  POSIX_Mutex_Control  *the_mutex;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *executing;
 
-  the_mutex = _POSIX_Mutex_Get( mutex, &lock_context );
+  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
 
   if ( the_mutex == NULL ) {
     return EINVAL;
@@ -44,7 +44,7 @@ int _POSIX_Mutex_Lock_support(
     executing,
     blocking,
     timeout,
-    &lock_context
+    &queue_context
   );
   return _POSIX_Mutex_Translate_core_mutex_return_code(
     (CORE_mutex_Status) executing->Wait.return_code
diff --git a/cpukit/posix/src/mutexsetprioceiling.c b/cpukit/posix/src/mutexsetprioceiling.c
index f29c0ad..09554f8 100644
--- a/cpukit/posix/src/mutexsetprioceiling.c
+++ b/cpukit/posix/src/mutexsetprioceiling.c
@@ -33,7 +33,7 @@ int pthread_mutex_setprioceiling(
 {
   register POSIX_Mutex_Control *the_mutex;
   Priority_Control              the_priority;
-  ISR_lock_Context              lock_context;
+  Thread_queue_Context          queue_context;
 
   if ( !old_ceiling )
     return EINVAL;
@@ -57,7 +57,7 @@ int pthread_mutex_setprioceiling(
    *  NOTE: This makes it easier to get 100% binary coverage since the
    *        bad Id case is handled by the switch.
    */
-  the_mutex = _POSIX_Mutex_Get( mutex, &lock_context );
+  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
 
   if ( the_mutex == NULL ) {
     return EINVAL;
@@ -73,8 +73,7 @@ int pthread_mutex_setprioceiling(
    */
   _CORE_mutex_Surrender(
     &the_mutex->Mutex,
-    NULL,
-    &lock_context
+    &queue_context
   );
   return 0;
 }
diff --git a/cpukit/posix/src/mutexunlock.c b/cpukit/posix/src/mutexunlock.c
index 94fcc63..ff7fc1c 100644
--- a/cpukit/posix/src/mutexunlock.c
+++ b/cpukit/posix/src/mutexunlock.c
@@ -30,20 +30,16 @@ int pthread_mutex_unlock(
   pthread_mutex_t           *mutex
 )
 {
-  POSIX_Mutex_Control *the_mutex;
-  CORE_mutex_Status    status;
-  ISR_lock_Context     lock_context;
+  POSIX_Mutex_Control  *the_mutex;
+  CORE_mutex_Status     status;
+  Thread_queue_Context  queue_context;
 
-  the_mutex = _POSIX_Mutex_Get( mutex, &lock_context );
+  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
 
   if ( the_mutex == NULL ) {
     return EINVAL;
   }
 
-  status = _CORE_mutex_Surrender(
-    &the_mutex->Mutex,
-    NULL,
-    &lock_context
-  );
+  status = _CORE_mutex_Surrender( &the_mutex->Mutex, &queue_context );
   return _POSIX_Mutex_Translate_core_mutex_return_code( status );
 }
diff --git a/cpukit/posix/src/pbarrierdestroy.c b/cpukit/posix/src/pbarrierdestroy.c
index 709644b..8f85762 100644
--- a/cpukit/posix/src/pbarrierdestroy.c
+++ b/cpukit/posix/src/pbarrierdestroy.c
@@ -36,30 +36,30 @@ int pthread_barrier_destroy(
 )
 {
   POSIX_Barrier_Control *the_barrier;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
 
   if ( barrier == NULL ) {
     return EINVAL;
   }
 
   _Objects_Allocator_lock();
-  the_barrier = _POSIX_Barrier_Get( barrier, &lock_context );
+  the_barrier = _POSIX_Barrier_Get( barrier, &queue_context );
 
   if ( the_barrier == NULL ) {
     _Objects_Allocator_unlock();
     return EINVAL;
   }
 
-  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &lock_context );
+  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &queue_context );
 
   if ( the_barrier->Barrier.number_of_waiting_threads != 0 ) {
-    _CORE_barrier_Release( &the_barrier->Barrier, &lock_context );
+    _CORE_barrier_Release( &the_barrier->Barrier, &queue_context );
     _Objects_Allocator_unlock();
     return EBUSY;
   }
 
   _Objects_Close( &_POSIX_Barrier_Information, &the_barrier->Object );
-  _CORE_barrier_Release( &the_barrier->Barrier, &lock_context );
+  _CORE_barrier_Release( &the_barrier->Barrier, &queue_context );
   _POSIX_Barrier_Free( the_barrier );
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/pbarrierwait.c b/cpukit/posix/src/pbarrierwait.c
index adfd40d..522c18e 100644
--- a/cpukit/posix/src/pbarrierwait.c
+++ b/cpukit/posix/src/pbarrierwait.c
@@ -37,14 +37,14 @@ int pthread_barrier_wait(
 )
 {
   POSIX_Barrier_Control *the_barrier;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
   Thread_Control        *executing;
 
   if ( barrier == NULL ) {
     return EINVAL;
   }
 
-  the_barrier = _POSIX_Barrier_Get( barrier, &lock_context );
+  the_barrier = _POSIX_Barrier_Get( barrier, &queue_context );
 
   if ( the_barrier == NULL ) {
     return EINVAL;
@@ -56,8 +56,7 @@ int pthread_barrier_wait(
     executing,
     true,
     0,
-    NULL,
-    &lock_context
+    &queue_context
   );
   return _POSIX_Barrier_Translate_core_barrier_return_code(
     executing->Wait.return_code
diff --git a/cpukit/posix/src/prwlockdestroy.c b/cpukit/posix/src/prwlockdestroy.c
index 6f9eec8..0ced556 100644
--- a/cpukit/posix/src/prwlockdestroy.c
+++ b/cpukit/posix/src/prwlockdestroy.c
@@ -24,24 +24,24 @@ int pthread_rwlock_destroy(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
 
   _Objects_Allocator_lock();
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     _Objects_Allocator_unlock();
     return EINVAL;
   }
 
-  _CORE_RWLock_Acquire_critical( &the_rwlock->RWLock, &lock_context );
+  _CORE_RWLock_Acquire_critical( &the_rwlock->RWLock, &queue_context );
 
   /*
    *  If there is at least one thread waiting, then do not delete it.
    */
 
   if ( !_Thread_queue_Is_empty( &the_rwlock->RWLock.Wait_queue.Queue ) ) {
-    _CORE_RWLock_Release( &the_rwlock->RWLock, &lock_context );
+    _CORE_RWLock_Release( &the_rwlock->RWLock, &queue_context );
     _Objects_Allocator_unlock();
     return EBUSY;
   }
@@ -51,7 +51,7 @@ int pthread_rwlock_destroy(
    */
 
   _Objects_Close( &_POSIX_RWLock_Information, &the_rwlock->Object );
-  _CORE_RWLock_Release( &the_rwlock->RWLock, &lock_context );
+  _CORE_RWLock_Release( &the_rwlock->RWLock, &queue_context );
   _POSIX_RWLock_Free( the_rwlock );
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/prwlockinit.c b/cpukit/posix/src/prwlockinit.c
index fcf4e29..34ab1ae 100644
--- a/cpukit/posix/src/prwlockinit.c
+++ b/cpukit/posix/src/prwlockinit.c
@@ -24,14 +24,14 @@
 #include <rtems/posix/posixapi.h>
 
 POSIX_RWLock_Control *_POSIX_RWLock_Get(
-  pthread_rwlock_t *rwlock,
-  ISR_lock_Context *lock_context
+  pthread_rwlock_t     *rwlock,
+  Thread_queue_Context *queue_context
 )
 {
   _POSIX_Get_object_body(
     POSIX_RWLock_Control,
     rwlock,
-    lock_context,
+    queue_context,
     &_POSIX_RWLock_Information,
     PTHREAD_RWLOCK_INITIALIZER,
     pthread_rwlock_init
diff --git a/cpukit/posix/src/prwlockrdlock.c b/cpukit/posix/src/prwlockrdlock.c
index 3459399..e59aaf9 100644
--- a/cpukit/posix/src/prwlockrdlock.c
+++ b/cpukit/posix/src/prwlockrdlock.c
@@ -25,10 +25,10 @@ int pthread_rwlock_rdlock(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
   Thread_Control       *executing;
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -40,7 +40,7 @@ int pthread_rwlock_rdlock(
     executing,
     true,                 /* we are willing to wait forever */
     0,
-    &lock_context
+    &queue_context
   );
   return _POSIX_RWLock_Translate_core_RWLock_return_code(
     (CORE_RWLock_Status) executing->Wait.return_code
diff --git a/cpukit/posix/src/prwlocktimedrdlock.c b/cpukit/posix/src/prwlocktimedrdlock.c
index 829e169..9e84943 100644
--- a/cpukit/posix/src/prwlocktimedrdlock.c
+++ b/cpukit/posix/src/prwlocktimedrdlock.c
@@ -27,7 +27,7 @@ int pthread_rwlock_timedrdlock(
 )
 {
   POSIX_RWLock_Control                    *the_rwlock;
-  ISR_lock_Context                         lock_context;
+  Thread_queue_Context                     queue_context;
   Watchdog_Interval                        ticks;
   bool                                     do_wait;
   TOD_Absolute_timeout_conversion_results  status;
@@ -49,7 +49,7 @@ int pthread_rwlock_timedrdlock(
   status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
   do_wait = ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -61,7 +61,7 @@ int pthread_rwlock_timedrdlock(
     executing,
     do_wait,
     ticks,
-    &lock_context
+    &queue_context
   );
 
   if (
diff --git a/cpukit/posix/src/prwlocktimedwrlock.c b/cpukit/posix/src/prwlocktimedwrlock.c
index 1258c2b..6be8397 100644
--- a/cpukit/posix/src/prwlocktimedwrlock.c
+++ b/cpukit/posix/src/prwlocktimedwrlock.c
@@ -29,7 +29,7 @@ int pthread_rwlock_timedwrlock(
 )
 {
   POSIX_RWLock_Control                    *the_rwlock;
-  ISR_lock_Context                         lock_context;
+  Thread_queue_Context                     queue_context;
   Watchdog_Interval                        ticks;
   bool                                     do_wait;
   TOD_Absolute_timeout_conversion_results  status;
@@ -51,7 +51,7 @@ int pthread_rwlock_timedwrlock(
   status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
   do_wait = ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -63,7 +63,7 @@ int pthread_rwlock_timedwrlock(
     executing,
     do_wait,
     ticks,
-    &lock_context
+    &queue_context
   );
 
   if (
diff --git a/cpukit/posix/src/prwlocktryrdlock.c b/cpukit/posix/src/prwlocktryrdlock.c
index a73d122..1a7cf9e 100644
--- a/cpukit/posix/src/prwlocktryrdlock.c
+++ b/cpukit/posix/src/prwlocktryrdlock.c
@@ -25,10 +25,10 @@ int pthread_rwlock_tryrdlock(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
   Thread_Control       *executing;
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -40,7 +40,7 @@ int pthread_rwlock_tryrdlock(
     executing,
     false,                  /* do not wait for the rwlock */
     0,
-    &lock_context
+    &queue_context
   );
   return _POSIX_RWLock_Translate_core_RWLock_return_code(
     (CORE_RWLock_Status) executing->Wait.return_code
diff --git a/cpukit/posix/src/prwlocktrywrlock.c b/cpukit/posix/src/prwlocktrywrlock.c
index a44a54f..b6031f9 100644
--- a/cpukit/posix/src/prwlocktrywrlock.c
+++ b/cpukit/posix/src/prwlocktrywrlock.c
@@ -25,10 +25,10 @@ int pthread_rwlock_trywrlock(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
   Thread_Control       *executing;
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -40,7 +40,7 @@ int pthread_rwlock_trywrlock(
     executing,
     false,                 /* we are not willing to wait */
     0,
-    &lock_context
+    &queue_context
   );
   return _POSIX_RWLock_Translate_core_RWLock_return_code(
     (CORE_RWLock_Status) executing->Wait.return_code
diff --git a/cpukit/posix/src/prwlockunlock.c b/cpukit/posix/src/prwlockunlock.c
index bf6b4de..dd4337d 100644
--- a/cpukit/posix/src/prwlockunlock.c
+++ b/cpukit/posix/src/prwlockunlock.c
@@ -27,15 +27,15 @@ int pthread_rwlock_unlock(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
   CORE_RWLock_Status    status;
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
   }
 
-  status = _CORE_RWLock_Surrender( &the_rwlock->RWLock, &lock_context );
+  status = _CORE_RWLock_Surrender( &the_rwlock->RWLock, &queue_context );
   return _POSIX_RWLock_Translate_core_RWLock_return_code( status );
 }
diff --git a/cpukit/posix/src/prwlockwrlock.c b/cpukit/posix/src/prwlockwrlock.c
index 5683163..84a4e0f 100644
--- a/cpukit/posix/src/prwlockwrlock.c
+++ b/cpukit/posix/src/prwlockwrlock.c
@@ -32,10 +32,10 @@ int pthread_rwlock_wrlock(
 )
 {
   POSIX_RWLock_Control *the_rwlock;
-  ISR_lock_Context      lock_context;
+  Thread_queue_Context  queue_context;
   Thread_Control       *executing;
 
-  the_rwlock = _POSIX_RWLock_Get( rwlock, &lock_context );
+  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
 
   if ( the_rwlock == NULL ) {
     return EINVAL;
@@ -47,7 +47,7 @@ int pthread_rwlock_wrlock(
     executing,
     true,          /* do not timeout -- wait forever */
     0,
-    &lock_context
+    &queue_context
   );
   return _POSIX_RWLock_Translate_core_RWLock_return_code(
     (CORE_RWLock_Status) executing->Wait.return_code
diff --git a/cpukit/posix/src/semaphoredeletesupp.c b/cpukit/posix/src/semaphoredeletesupp.c
index c205b12..2d39b2a 100644
--- a/cpukit/posix/src/semaphoredeletesupp.c
+++ b/cpukit/posix/src/semaphoredeletesupp.c
@@ -22,18 +22,14 @@
 
 void _POSIX_Semaphore_Delete(
   POSIX_Semaphore_Control *the_semaphore,
-  ISR_lock_Context        *lock_context
+  Thread_queue_Context    *queue_context
 )
 {
   if ( !the_semaphore->linked && !the_semaphore->open_count ) {
     _Objects_Close( &_POSIX_Semaphore_Information, &the_semaphore->Object );
-    _CORE_semaphore_Destroy(
-      &the_semaphore->Semaphore,
-      NULL,
-      lock_context
-    );
+    _CORE_semaphore_Destroy( &the_semaphore->Semaphore, queue_context );
     _POSIX_Semaphore_Free( the_semaphore );
   } else {
-    _CORE_semaphore_Release( &the_semaphore->Semaphore, lock_context );
+    _CORE_semaphore_Release( &the_semaphore->Semaphore, queue_context );
   }
 }
diff --git a/cpukit/posix/src/semaphorewaitsupp.c b/cpukit/posix/src/semaphorewaitsupp.c
index f08efa8..9382107 100644
--- a/cpukit/posix/src/semaphorewaitsupp.c
+++ b/cpukit/posix/src/semaphorewaitsupp.c
@@ -32,9 +32,9 @@ int _POSIX_Semaphore_Wait_support(
 {
   POSIX_Semaphore_Control *the_semaphore;
   Thread_Control          *executing;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
 
-  the_semaphore = _POSIX_Semaphore_Get( sem, &lock_context );
+  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
 
   if ( the_semaphore == NULL ) {
     rtems_set_errno_and_return_minus_one( EINVAL );
@@ -47,7 +47,7 @@ int _POSIX_Semaphore_Wait_support(
     executing,
     blocking,
     timeout,
-    &lock_context
+    &queue_context
   );
 
   if ( executing->Wait.return_code == CORE_SEMAPHORE_STATUS_SUCCESSFUL ) {
diff --git a/cpukit/posix/src/semclose.c b/cpukit/posix/src/semclose.c
index 3f18ff1..ebcf7a2 100644
--- a/cpukit/posix/src/semclose.c
+++ b/cpukit/posix/src/semclose.c
@@ -27,10 +27,10 @@ int sem_close(
 )
 {
   POSIX_Semaphore_Control *the_semaphore;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
 
   _Objects_Allocator_lock();
-  the_semaphore = _POSIX_Semaphore_Get( sem, &lock_context );
+  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
 
   if ( the_semaphore == NULL ) {
     _Objects_Allocator_unlock();
@@ -39,10 +39,10 @@ int sem_close(
 
   _CORE_semaphore_Acquire_critical(
     &the_semaphore->Semaphore,
-    &lock_context
+    &queue_context
   );
   the_semaphore->open_count -= 1;
-  _POSIX_Semaphore_Delete( the_semaphore, &lock_context );
+  _POSIX_Semaphore_Delete( the_semaphore, &queue_context );
 
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/semdestroy.c b/cpukit/posix/src/semdestroy.c
index 5264472..8b81470 100644
--- a/cpukit/posix/src/semdestroy.c
+++ b/cpukit/posix/src/semdestroy.c
@@ -27,10 +27,10 @@ int sem_destroy(
 )
 {
   POSIX_Semaphore_Control *the_semaphore;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
 
   _Objects_Allocator_lock();
-  the_semaphore = _POSIX_Semaphore_Get( sem, &lock_context );
+  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
 
   if ( the_semaphore == NULL ) {
     _Objects_Allocator_unlock();
@@ -39,17 +39,17 @@ int sem_destroy(
 
   _CORE_semaphore_Acquire_critical(
     &the_semaphore->Semaphore,
-    &lock_context
+    &queue_context
   );
 
   if ( the_semaphore->named ) {
     /* Undefined operation on a named semaphore */
-    _CORE_semaphore_Release( &the_semaphore->Semaphore, &lock_context );
+    _CORE_semaphore_Release( &the_semaphore->Semaphore, &queue_context );
     _Objects_Allocator_unlock();
     rtems_set_errno_and_return_minus_one( EINVAL );
   }
 
-  _POSIX_Semaphore_Delete( the_semaphore, &lock_context );
+  _POSIX_Semaphore_Delete( the_semaphore, &queue_context );
 
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/posix/src/semgetvalue.c b/cpukit/posix/src/semgetvalue.c
index 63e3823..1b75294 100644
--- a/cpukit/posix/src/semgetvalue.c
+++ b/cpukit/posix/src/semgetvalue.c
@@ -28,20 +28,21 @@ int sem_getvalue(
 )
 {
   POSIX_Semaphore_Control *the_semaphore;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
 
-  the_semaphore = _POSIX_Semaphore_Get( sem, &lock_context );
+  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
 
   if ( the_semaphore == NULL ) {
     rtems_set_errno_and_return_minus_one( EINVAL );
   }
 
-  /*
-   * Assume a relaxed atomic load of the value on SMP configurations.
-   * Thus, there is no need to acquire a lock.
-   */
+  _CORE_semaphore_Acquire_critical(
+    &the_semaphore->Semaphore,
+    &queue_context
+  );
+
   *sval = _CORE_semaphore_Get_count( &the_semaphore->Semaphore );
 
-  _ISR_lock_ISR_enable( &lock_context );
+  _CORE_semaphore_Release( &the_semaphore->Semaphore, &queue_context );
   return 0;
 }
diff --git a/cpukit/posix/src/sempost.c b/cpukit/posix/src/sempost.c
index 313ff3c..86d2f5a 100644
--- a/cpukit/posix/src/sempost.c
+++ b/cpukit/posix/src/sempost.c
@@ -27,9 +27,9 @@ int sem_post(
 )
 {
   POSIX_Semaphore_Control *the_semaphore;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
 
-  the_semaphore = _POSIX_Semaphore_Get( sem, &lock_context );
+  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
 
   if ( the_semaphore == NULL ) {
     rtems_set_errno_and_return_minus_one( EINVAL );
@@ -37,8 +37,7 @@ int sem_post(
 
   _CORE_semaphore_Surrender(
     &the_semaphore->Semaphore,
-    NULL,
-    &lock_context
+    &queue_context
   );
   return 0;
 }
diff --git a/cpukit/posix/src/semunlink.c b/cpukit/posix/src/semunlink.c
index 665aa73..6ba1df1 100644
--- a/cpukit/posix/src/semunlink.c
+++ b/cpukit/posix/src/semunlink.c
@@ -28,7 +28,7 @@ int sem_unlink(
 {
   POSIX_Semaphore_Control   *the_semaphore;
   Objects_Get_by_name_error  error;
-  ISR_lock_Context           lock_context;
+  Thread_queue_Context       queue_context;
 
   _Objects_Allocator_lock();
 
@@ -40,10 +40,10 @@ int sem_unlink(
 
   _POSIX_Semaphore_Namespace_remove( the_semaphore );
 
-  _ISR_lock_ISR_disable( &lock_context );
-  _CORE_semaphore_Acquire_critical( &the_semaphore->Semaphore, &lock_context );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
+  _CORE_semaphore_Acquire_critical( &the_semaphore->Semaphore, &queue_context );
   the_semaphore->linked = false;
-  _POSIX_Semaphore_Delete( the_semaphore, &lock_context );
+  _POSIX_Semaphore_Delete( the_semaphore, &queue_context );
 
   _Objects_Allocator_unlock();
   return 0;
diff --git a/cpukit/rtems/include/rtems/rtems/barrierimpl.h b/cpukit/rtems/include/rtems/rtems/barrierimpl.h
index 39632fc..0eaadfc 100644
--- a/cpukit/rtems/include/rtems/rtems/barrierimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/barrierimpl.h
@@ -70,12 +70,13 @@ RTEMS_INLINE_ROUTINE void _Barrier_Free (
 }
 
 RTEMS_INLINE_ROUTINE Barrier_Control *_Barrier_Get(
-  Objects_Id        id,
-  ISR_lock_Context *lock_context
+  Objects_Id            id,
+  Thread_queue_Context *queue_context
 )
 {
+  _Thread_queue_Context_initialize( queue_context, NULL );
   return (Barrier_Control *)
-    _Objects_Get( id, lock_context, &_Barrier_Information );
+    _Objects_Get( id, &queue_context->Lock_context, &_Barrier_Information );
 }
 
 /**
diff --git a/cpukit/rtems/include/rtems/rtems/messageimpl.h b/cpukit/rtems/include/rtems/rtems/messageimpl.h
index 199af1c..d6217f1 100644
--- a/cpukit/rtems/include/rtems/rtems/messageimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/messageimpl.h
@@ -101,18 +101,31 @@ RTEMS_INLINE_ROUTINE void _Message_queue_Free (
   _Objects_Free( &_Message_queue_Information, &the_message_queue->Object );
 }
 
-RTEMS_INLINE_ROUTINE Message_queue_Control * _Message_queue_Get(
-  Objects_Id         id,
-  ISR_lock_Context  *lock_context
+RTEMS_INLINE_ROUTINE Message_queue_Control *_Message_queue_Do_get(
+  Objects_Id               id,
+  Thread_queue_Context    *queue_context
+#if defined(RTEMS_MULTIPROCESSING)
+  ,
+  Thread_queue_MP_callout  mp_callout
+#endif
 )
 {
+  _Thread_queue_Context_initialize( queue_context, mp_callout );
   return (Message_queue_Control *) _Objects_Get(
     id,
-    lock_context,
+    &queue_context->Lock_context,
     &_Message_queue_Information
   );
 }
 
+#if defined(RTEMS_MULTIPROCESSING)
+  #define _Message_queue_Get( id, queue_context, mp_callout ) \
+    _Message_queue_Do_get( id, queue_context, mp_callout )
+#else
+  #define _Message_queue_Get( id, queue_context, mp_callout ) \
+    _Message_queue_Do_get( id, queue_context )
+#endif
+
 RTEMS_INLINE_ROUTINE Message_queue_Control *_Message_queue_Allocate( void )
 {
   return (Message_queue_Control *)
diff --git a/cpukit/rtems/include/rtems/rtems/semimpl.h b/cpukit/rtems/include/rtems/rtems/semimpl.h
index ac55bc7..c23bef9 100644
--- a/cpukit/rtems/include/rtems/rtems/semimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/semimpl.h
@@ -134,18 +134,31 @@ RTEMS_INLINE_ROUTINE void _Semaphore_Free (
   _Objects_Free( &_Semaphore_Information, &the_semaphore->Object );
 }
 
-RTEMS_INLINE_ROUTINE Semaphore_Control *_Semaphore_Get(
-  Objects_Id         id,
-  ISR_lock_Context  *lock_context
+RTEMS_INLINE_ROUTINE Semaphore_Control *_Semaphore_Do_get(
+  Objects_Id               id,
+  Thread_queue_Context    *queue_context
+#if defined(RTEMS_MULTIPROCESSING)
+  ,
+  Thread_queue_MP_callout  mp_callout
+#endif
 )
 {
+  _Thread_queue_Context_initialize( queue_context, mp_callout );
   return (Semaphore_Control *) _Objects_Get(
     id,
-    lock_context,
+    &queue_context->Lock_context,
     &_Semaphore_Information
   );
 }
 
+#if defined(RTEMS_MULTIPROCESSING)
+  #define _Semaphore_Get( id, queue_context, mp_callout ) \
+    _Semaphore_Do_get( id, queue_context, mp_callout )
+#else
+  #define _Semaphore_Get( id, queue_context, mp_callout ) \
+    _Semaphore_Do_get( id, queue_context )
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/cpukit/rtems/src/barrierdelete.c b/cpukit/rtems/src/barrierdelete.c
index 05d6a3f..3d57908 100644
--- a/cpukit/rtems/src/barrierdelete.c
+++ b/cpukit/rtems/src/barrierdelete.c
@@ -24,20 +24,20 @@ rtems_status_code rtems_barrier_delete(
   rtems_id   id
 )
 {
-  Barrier_Control  *the_barrier;
-  ISR_lock_Context  lock_context;
+  Barrier_Control      *the_barrier;
+  Thread_queue_Context  queue_context;
 
   _Objects_Allocator_lock();
-  the_barrier = _Barrier_Get( id, &lock_context );
+  the_barrier = _Barrier_Get( id, &queue_context );
 
   if ( the_barrier == NULL ) {
     _Objects_Allocator_unlock();
     return RTEMS_INVALID_ID;
   }
 
-  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &lock_context );
+  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &queue_context );
   _Objects_Close( &_Barrier_Information, &the_barrier->Object );
-  _CORE_barrier_Flush( &the_barrier->Barrier, NULL, &lock_context );
+  _CORE_barrier_Flush( &the_barrier->Barrier, &queue_context );
   _Barrier_Free( the_barrier );
   _Objects_Allocator_unlock();
   return RTEMS_SUCCESSFUL;
diff --git a/cpukit/rtems/src/barrierrelease.c b/cpukit/rtems/src/barrierrelease.c
index 2c7d222..2c7f72d 100644
--- a/cpukit/rtems/src/barrierrelease.c
+++ b/cpukit/rtems/src/barrierrelease.c
@@ -25,24 +25,23 @@ rtems_status_code rtems_barrier_release(
   uint32_t         *released
 )
 {
-  Barrier_Control  *the_barrier;
-  ISR_lock_Context  lock_context;
+  Barrier_Control      *the_barrier;
+  Thread_queue_Context  queue_context;
 
   if ( released == NULL ) {
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_barrier = _Barrier_Get( id, &lock_context );
+  the_barrier = _Barrier_Get( id, &queue_context );
 
   if ( the_barrier == NULL ) {
     return RTEMS_INVALID_ID;
   }
 
-  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &lock_context );
+  _CORE_barrier_Acquire_critical( &the_barrier->Barrier, &queue_context );
   *released = _CORE_barrier_Surrender(
     &the_barrier->Barrier,
-    NULL,
-    &lock_context
+    &queue_context
   );
   return RTEMS_SUCCESSFUL;
 }
diff --git a/cpukit/rtems/src/barrierwait.c b/cpukit/rtems/src/barrierwait.c
index 7f463aa..6e13375 100644
--- a/cpukit/rtems/src/barrierwait.c
+++ b/cpukit/rtems/src/barrierwait.c
@@ -27,11 +27,11 @@ rtems_status_code rtems_barrier_wait(
   rtems_interval  timeout
 )
 {
-  Barrier_Control  *the_barrier;
-  ISR_lock_Context  lock_context;
-  Thread_Control   *executing;
+  Barrier_Control      *the_barrier;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *executing;
 
-  the_barrier = _Barrier_Get( id, &lock_context );
+  the_barrier = _Barrier_Get( id, &queue_context );
 
   if ( the_barrier == NULL ) {
     return RTEMS_INVALID_ID;
@@ -43,8 +43,7 @@ rtems_status_code rtems_barrier_wait(
     executing,
     true,
     timeout,
-    NULL,
-    &lock_context
+    &queue_context
   );
   return _Barrier_Translate_core_barrier_return_code(
     executing->Wait.return_code
diff --git a/cpukit/rtems/src/msgmp.c b/cpukit/rtems/src/msgmp.c
index c336ba4..d8077a7 100644
--- a/cpukit/rtems/src/msgmp.c
+++ b/cpukit/rtems/src/msgmp.c
@@ -576,17 +576,7 @@ void _Message_queue_MP_Send_extract_proxy (
   );
 }
 
-/*
- *  _Message_queue_Core_message_queue_mp_support
- *
- *  Input parameters:
- *    the_thread - the remote thread the message was submitted to
- *    id         - id of the message queue
- *
- *  Output parameters: NONE
- */
-
-void  _Message_queue_Core_message_queue_mp_support (
+void  _Message_queue_Core_message_queue_mp_support(
   Thread_Control *the_thread,
   Objects_Id      id
 )
diff --git a/cpukit/rtems/src/msgqbroadcast.c b/cpukit/rtems/src/msgqbroadcast.c
index 45d9097..75f9d60 100644
--- a/cpukit/rtems/src/msgqbroadcast.c
+++ b/cpukit/rtems/src/msgqbroadcast.c
@@ -28,7 +28,7 @@ rtems_status_code rtems_message_queue_broadcast(
 )
 {
   Message_queue_Control     *the_message_queue;
-  ISR_lock_Context           lock_context;
+  Thread_queue_Context       queue_context;
   CORE_message_queue_Status  status;
 
   if ( buffer == NULL ) {
@@ -39,7 +39,11 @@ rtems_status_code rtems_message_queue_broadcast(
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get(
+    id,
+    &queue_context,
+    _Message_queue_Core_message_queue_mp_support
+  );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -53,9 +57,8 @@ rtems_status_code rtems_message_queue_broadcast(
     &the_message_queue->message_queue,
     buffer,
     size,
-    _Message_queue_Core_message_queue_mp_support,
     count,
-    &lock_context
+    &queue_context
   );
   return _Message_queue_Translate_core_message_queue_return_code( status );
 }
diff --git a/cpukit/rtems/src/msgqdelete.c b/cpukit/rtems/src/msgqdelete.c
index c39cfea..94175e6 100644
--- a/cpukit/rtems/src/msgqdelete.c
+++ b/cpukit/rtems/src/msgqdelete.c
@@ -26,10 +26,14 @@ rtems_status_code rtems_message_queue_delete(
 )
 {
   Message_queue_Control *the_message_queue;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
 
   _Objects_Allocator_lock();
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get(
+    id,
+    &queue_context,
+    _Message_queue_MP_Send_object_was_deleted
+  );
 
   if ( the_message_queue == NULL ) {
     _Objects_Allocator_unlock();
@@ -45,15 +49,14 @@ rtems_status_code rtems_message_queue_delete(
 
   _CORE_message_queue_Acquire_critical(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
 
   _Objects_Close( &_Message_queue_Information, &the_message_queue->Object );
 
   _CORE_message_queue_Close(
     &the_message_queue->message_queue,
-    _Message_queue_MP_Send_object_was_deleted,
-    &lock_context
+    &queue_context
   );
 
 #if defined(RTEMS_MULTIPROCESSING)
diff --git a/cpukit/rtems/src/msgqflush.c b/cpukit/rtems/src/msgqflush.c
index baca176..b4419d1 100644
--- a/cpukit/rtems/src/msgqflush.c
+++ b/cpukit/rtems/src/msgqflush.c
@@ -26,13 +26,13 @@ rtems_status_code rtems_message_queue_flush(
 )
 {
   Message_queue_Control *the_message_queue;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
 
   if ( count == NULL ) {
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get( id, &queue_context, NULL );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -44,7 +44,7 @@ rtems_status_code rtems_message_queue_flush(
 
   *count = _CORE_message_queue_Flush(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
   return RTEMS_SUCCESSFUL;
 }
diff --git a/cpukit/rtems/src/msgqgetnumberpending.c b/cpukit/rtems/src/msgqgetnumberpending.c
index f655c0b..d2f7820 100644
--- a/cpukit/rtems/src/msgqgetnumberpending.c
+++ b/cpukit/rtems/src/msgqgetnumberpending.c
@@ -26,13 +26,13 @@ rtems_status_code rtems_message_queue_get_number_pending(
 )
 {
   Message_queue_Control *the_message_queue;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
 
   if ( count == NULL ) {
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get( id, &queue_context, NULL );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -44,12 +44,12 @@ rtems_status_code rtems_message_queue_get_number_pending(
 
   _CORE_message_queue_Acquire_critical(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
   *count = the_message_queue->message_queue.number_of_pending_messages;
   _CORE_message_queue_Release(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
   return RTEMS_SUCCESSFUL;
 }
diff --git a/cpukit/rtems/src/msgqreceive.c b/cpukit/rtems/src/msgqreceive.c
index dada8df..6ed3d25 100644
--- a/cpukit/rtems/src/msgqreceive.c
+++ b/cpukit/rtems/src/msgqreceive.c
@@ -32,7 +32,7 @@ rtems_status_code rtems_message_queue_receive(
 )
 {
   Message_queue_Control *the_message_queue;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
   Thread_Control        *executing;
 
   if ( buffer == NULL ) {
@@ -43,7 +43,7 @@ rtems_status_code rtems_message_queue_receive(
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get( id, &queue_context, NULL );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -55,7 +55,7 @@ rtems_status_code rtems_message_queue_receive(
 
   _CORE_message_queue_Acquire_critical(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
 
   executing = _Thread_Executing;
@@ -66,7 +66,7 @@ rtems_status_code rtems_message_queue_receive(
     size,
     !_Options_Is_no_wait( option_set ),
     timeout,
-    &lock_context
+    &queue_context
   );
   return _Message_queue_Translate_core_message_queue_return_code(
     executing->Wait.return_code
diff --git a/cpukit/rtems/src/msgqsend.c b/cpukit/rtems/src/msgqsend.c
index c306deb..c5bde9d 100644
--- a/cpukit/rtems/src/msgqsend.c
+++ b/cpukit/rtems/src/msgqsend.c
@@ -27,14 +27,18 @@ rtems_status_code rtems_message_queue_send(
 )
 {
   Message_queue_Control     *the_message_queue;
-  ISR_lock_Context           lock_context;
+  Thread_queue_Context       queue_context;
   CORE_message_queue_Status  status;
 
   if ( buffer == NULL ) {
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get(
+    id,
+    &queue_context,
+    _Message_queue_Core_message_queue_mp_support
+  );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -46,16 +50,15 @@ rtems_status_code rtems_message_queue_send(
 
   _CORE_message_queue_Acquire_critical(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
   status = _CORE_message_queue_Send(
     &the_message_queue->message_queue,
     buffer,
     size,
-    _Message_queue_Core_message_queue_mp_support,
     false,   /* sender does not block */
     0,       /* no timeout */
-    &lock_context
+    &queue_context
   );
 
   /*
diff --git a/cpukit/rtems/src/msgqurgent.c b/cpukit/rtems/src/msgqurgent.c
index 56522a1..add5f09 100644
--- a/cpukit/rtems/src/msgqurgent.c
+++ b/cpukit/rtems/src/msgqurgent.c
@@ -27,14 +27,18 @@ rtems_status_code rtems_message_queue_urgent(
 )
 {
   Message_queue_Control     *the_message_queue;
-  ISR_lock_Context           lock_context;
+  Thread_queue_Context       queue_context;
   CORE_message_queue_Status  status;
 
   if ( buffer == NULL ) {
     return RTEMS_INVALID_ADDRESS;
   }
 
-  the_message_queue = _Message_queue_Get( id, &lock_context );
+  the_message_queue = _Message_queue_Get(
+    id,
+    &queue_context,
+    _Message_queue_Core_message_queue_mp_support
+  );
 
   if ( the_message_queue == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -46,16 +50,15 @@ rtems_status_code rtems_message_queue_urgent(
 
   _CORE_message_queue_Acquire_critical(
     &the_message_queue->message_queue,
-    &lock_context
+    &queue_context
   );
   status = _CORE_message_queue_Urgent(
     &the_message_queue->message_queue,
     buffer,
     size,
-    _Message_queue_Core_message_queue_mp_support,
     false,   /* sender does not block */
     0,       /* no timeout */
-    &lock_context
+    &queue_context
   );
 
   /*
diff --git a/cpukit/rtems/src/semdelete.c b/cpukit/rtems/src/semdelete.c
index cf22838..023a57c 100644
--- a/cpukit/rtems/src/semdelete.c
+++ b/cpukit/rtems/src/semdelete.c
@@ -25,12 +25,16 @@ rtems_status_code rtems_semaphore_delete(
   rtems_id   id
 )
 {
-  Semaphore_Control *the_semaphore;
-  ISR_lock_Context   lock_context;
-  rtems_attribute    attribute_set;
+  Semaphore_Control    *the_semaphore;
+  Thread_queue_Context  queue_context;
+  rtems_attribute       attribute_set;
 
   _Objects_Allocator_lock();
-  the_semaphore = _Semaphore_Get( id, &lock_context );
+  the_semaphore = _Semaphore_Get(
+    id,
+    &queue_context,
+    _Semaphore_MP_Send_object_was_deleted
+  );
 
   if ( the_semaphore == NULL ) {
     _Objects_Allocator_unlock();
@@ -52,13 +56,13 @@ rtems_status_code rtems_semaphore_delete(
 
     _MRSP_Acquire_critical(
       &the_semaphore->Core_control.mrsp,
-      &lock_context
+      &queue_context
     );
     mrsp_status = _MRSP_Can_destroy( &the_semaphore->Core_control.mrsp );
     if ( mrsp_status != MRSP_SUCCESSFUL ) {
       _MRSP_Release(
         &the_semaphore->Core_control.mrsp,
-        &lock_context
+        &queue_context
       );
       _Objects_Allocator_unlock();
       return _Semaphore_Translate_MRSP_status_code( mrsp_status );
@@ -68,7 +72,7 @@ rtems_status_code rtems_semaphore_delete(
   if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
     _CORE_mutex_Acquire_critical(
       &the_semaphore->Core_control.mutex,
-      &lock_context
+      &queue_context
     );
 
     if (
@@ -77,7 +81,7 @@ rtems_status_code rtems_semaphore_delete(
     ) {
       _CORE_mutex_Release(
         &the_semaphore->Core_control.mutex,
-        &lock_context
+        &queue_context
       );
       _Objects_Allocator_unlock();
       return RTEMS_RESOURCE_IN_USE;
@@ -85,7 +89,7 @@ rtems_status_code rtems_semaphore_delete(
   } else {
     _CORE_semaphore_Acquire_critical(
       &the_semaphore->Core_control.semaphore,
-      &lock_context
+      &queue_context
     );
   }
 
@@ -93,22 +97,20 @@ rtems_status_code rtems_semaphore_delete(
 
 #if defined(RTEMS_SMP)
   if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
-    _MRSP_Destroy( &the_semaphore->Core_control.mrsp, &lock_context );
+    _MRSP_Destroy( &the_semaphore->Core_control.mrsp, &queue_context );
   } else
 #endif
   if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
     _CORE_mutex_Flush(
       &the_semaphore->Core_control.mutex,
       _CORE_mutex_Was_deleted,
-      _Semaphore_MP_Send_object_was_deleted,
-      &lock_context
+      &queue_context
     );
     _CORE_mutex_Destroy( &the_semaphore->Core_control.mutex );
   } else {
     _CORE_semaphore_Destroy(
       &the_semaphore->Core_control.semaphore,
-      _Semaphore_MP_Send_object_was_deleted,
-      &lock_context
+      &queue_context
     );
   }
 
diff --git a/cpukit/rtems/src/semflush.c b/cpukit/rtems/src/semflush.c
index 73e9ad0..b18de6d 100644
--- a/cpukit/rtems/src/semflush.c
+++ b/cpukit/rtems/src/semflush.c
@@ -23,11 +23,15 @@
 
 rtems_status_code rtems_semaphore_flush( rtems_id id )
 {
-  Semaphore_Control *the_semaphore;
-  ISR_lock_Context   lock_context;
-  rtems_attribute    attribute_set;
+  Semaphore_Control    *the_semaphore;
+  Thread_queue_Context  queue_context;
+  rtems_attribute       attribute_set;
 
-  the_semaphore = _Semaphore_Get( id, &lock_context );
+  the_semaphore = _Semaphore_Get(
+    id,
+    &queue_context,
+    _Semaphore_MP_Send_object_was_deleted
+  );
 
   if ( the_semaphore == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -43,30 +47,28 @@ rtems_status_code rtems_semaphore_flush( rtems_id id )
 
 #if defined(RTEMS_SMP)
   if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
-    _ISR_lock_ISR_enable( &lock_context );
+    _ISR_lock_ISR_enable( &queue_context.Lock_context );
     return RTEMS_NOT_DEFINED;
   } else
 #endif
   if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
     _CORE_mutex_Acquire_critical(
       &the_semaphore->Core_control.mutex,
-      &lock_context
+      &queue_context
     );
     _CORE_mutex_Flush(
       &the_semaphore->Core_control.mutex,
       _CORE_mutex_Unsatisfied_nowait,
-      _Semaphore_MP_Send_object_was_deleted,
-      &lock_context
+      &queue_context
     );
   } else {
     _CORE_semaphore_Acquire_critical(
       &the_semaphore->Core_control.semaphore,
-      &lock_context
+      &queue_context
     );
     _CORE_semaphore_Flush(
       &the_semaphore->Core_control.semaphore,
-      _Semaphore_MP_Send_object_was_deleted,
-      &lock_context
+      &queue_context
     );
   }
   return RTEMS_SUCCESSFUL;
diff --git a/cpukit/rtems/src/semobtain.c b/cpukit/rtems/src/semobtain.c
index 3cf780a..6d994f4 100644
--- a/cpukit/rtems/src/semobtain.c
+++ b/cpukit/rtems/src/semobtain.c
@@ -38,13 +38,13 @@ rtems_status_code rtems_semaphore_obtain(
   rtems_interval  timeout
 )
 {
-  Semaphore_Control *the_semaphore;
-  ISR_lock_Context   lock_context;
-  Thread_Control    *executing;
-  rtems_attribute    attribute_set;
-  bool               wait;
+  Semaphore_Control    *the_semaphore;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *executing;
+  rtems_attribute       attribute_set;
+  bool                  wait;
 
-  the_semaphore = _Semaphore_Get( id, &lock_context );
+  the_semaphore = _Semaphore_Get( id, &queue_context, NULL );
 
   if ( the_semaphore == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -66,7 +66,7 @@ rtems_status_code rtems_semaphore_obtain(
       executing,
       wait,
       timeout,
-      &lock_context
+      &queue_context
     );
     return _Semaphore_Translate_MRSP_status_code( mrsp_status );
   } else
@@ -77,7 +77,7 @@ rtems_status_code rtems_semaphore_obtain(
       executing,
       wait,
       timeout,
-      &lock_context
+      &queue_context
     );
     return _Semaphore_Translate_core_mutex_return_code(
       executing->Wait.return_code
@@ -90,7 +90,7 @@ rtems_status_code rtems_semaphore_obtain(
     executing,
     wait,
     timeout,
-    &lock_context
+    &queue_context
   );
   return _Semaphore_Translate_core_semaphore_return_code(
     executing->Wait.return_code
diff --git a/cpukit/rtems/src/semrelease.c b/cpukit/rtems/src/semrelease.c
index c39455c..197e4d8 100644
--- a/cpukit/rtems/src/semrelease.c
+++ b/cpukit/rtems/src/semrelease.c
@@ -30,9 +30,13 @@ rtems_status_code rtems_semaphore_release( rtems_id id )
   CORE_mutex_Status      mutex_status;
   CORE_semaphore_Status  semaphore_status;
   rtems_attribute        attribute_set;
-  ISR_lock_Context       lock_context;
+  Thread_queue_Context   queue_context;
 
-  the_semaphore = _Semaphore_Get( id, &lock_context );
+  the_semaphore = _Semaphore_Get(
+    id,
+    &queue_context,
+    _Semaphore_Core_mutex_mp_support
+  );
 
   if ( the_semaphore == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -50,7 +54,7 @@ rtems_status_code rtems_semaphore_release( rtems_id id )
     mrsp_status = _MRSP_Surrender(
       &the_semaphore->Core_control.mrsp,
       _Thread_Executing,
-      &lock_context
+      &queue_context
     );
     return _Semaphore_Translate_MRSP_status_code( mrsp_status );
   } else
@@ -58,15 +62,13 @@ rtems_status_code rtems_semaphore_release( rtems_id id )
   if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
     mutex_status = _CORE_mutex_Surrender(
       &the_semaphore->Core_control.mutex,
-      _Semaphore_Core_mutex_mp_support,
-      &lock_context
+      &queue_context
     );
     return _Semaphore_Translate_core_mutex_return_code( mutex_status );
   } else {
     semaphore_status = _CORE_semaphore_Surrender(
       &the_semaphore->Core_control.semaphore,
-      _Semaphore_Core_mutex_mp_support,
-      &lock_context
+      &queue_context
     );
     return _Semaphore_Translate_core_semaphore_return_code( semaphore_status );
   }
diff --git a/cpukit/rtems/src/semsetpriority.c b/cpukit/rtems/src/semsetpriority.c
index 18fb66a..4deee3d 100644
--- a/cpukit/rtems/src/semsetpriority.c
+++ b/cpukit/rtems/src/semsetpriority.c
@@ -22,11 +22,11 @@
 #include <rtems/score/schedulerimpl.h>
 
 static rtems_status_code _Semaphore_Set_priority(
-  Semaphore_Control   *the_semaphore,
-  rtems_id             scheduler_id,
-  rtems_task_priority  new_priority,
-  rtems_task_priority *old_priority_p,
-  ISR_lock_Context    *lock_context
+  Semaphore_Control    *the_semaphore,
+  rtems_id              scheduler_id,
+  rtems_task_priority   new_priority,
+  rtems_task_priority  *old_priority_p,
+  Thread_queue_Context *queue_context
 )
 {
   rtems_status_code   sc;
@@ -40,7 +40,7 @@ static rtems_status_code _Semaphore_Set_priority(
     MRSP_Control *mrsp = &the_semaphore->Core_control.mrsp;
     uint32_t scheduler_index = _Scheduler_Get_index_by_id( scheduler_id );
 
-    _MRSP_Acquire_critical( mrsp, lock_context );
+    _MRSP_Acquire_critical( mrsp, queue_context );
 
     old_priority = _MRSP_Get_ceiling_priority( mrsp, scheduler_index );
 
@@ -48,7 +48,7 @@ static rtems_status_code _Semaphore_Set_priority(
       _MRSP_Set_ceiling_priority( mrsp, scheduler_index, new_priority );
     }
 
-    _MRSP_Release( mrsp, lock_context );
+    _MRSP_Release( mrsp, queue_context );
 
     sc = RTEMS_SUCCESSFUL;
   } else
@@ -56,7 +56,7 @@ static rtems_status_code _Semaphore_Set_priority(
   if ( _Attributes_Is_priority_ceiling( attribute_set ) ) {
     CORE_mutex_Control *mutex = &the_semaphore->Core_control.mutex;
 
-    _CORE_mutex_Acquire_critical( mutex, lock_context );
+    _CORE_mutex_Acquire_critical( mutex, queue_context );
 
     old_priority = mutex->Attributes.priority_ceiling;
 
@@ -64,11 +64,11 @@ static rtems_status_code _Semaphore_Set_priority(
       mutex->Attributes.priority_ceiling = new_priority;
     }
 
-    _CORE_mutex_Release( mutex, lock_context );
+    _CORE_mutex_Release( mutex, queue_context );
 
     sc = RTEMS_SUCCESSFUL;
   } else {
-    _ISR_lock_ISR_enable( lock_context );
+    _ISR_lock_ISR_enable( &queue_context->Lock_context );
 
     old_priority = 0;
 
@@ -87,8 +87,8 @@ rtems_status_code rtems_semaphore_set_priority(
   rtems_task_priority *old_priority
 )
 {
-  Semaphore_Control *the_semaphore;
-  ISR_lock_Context   lock_context;
+  Semaphore_Control    *the_semaphore;
+  Thread_queue_Context  queue_context;
 
   if ( new_priority != RTEMS_CURRENT_PRIORITY &&
        !_RTEMS_tasks_Priority_is_valid( new_priority ) ) {
@@ -103,7 +103,7 @@ rtems_status_code rtems_semaphore_set_priority(
     return RTEMS_INVALID_ID;
   }
 
-  the_semaphore = _Semaphore_Get( semaphore_id, &lock_context );
+  the_semaphore = _Semaphore_Get( semaphore_id, &queue_context, NULL );
 
   if ( the_semaphore == NULL ) {
 #if defined(RTEMS_MULTIPROCESSING)
@@ -120,6 +120,6 @@ rtems_status_code rtems_semaphore_set_priority(
     scheduler_id,
     new_priority,
     old_priority,
-    &lock_context
+    &queue_context
   );
 }
diff --git a/cpukit/score/include/rtems/score/corebarrierimpl.h b/cpukit/score/include/rtems/score/corebarrierimpl.h
index d5133cd..051990e 100644
--- a/cpukit/score/include/rtems/score/corebarrierimpl.h
+++ b/cpukit/score/include/rtems/score/corebarrierimpl.h
@@ -84,31 +84,26 @@ RTEMS_INLINE_ROUTINE void _CORE_barrier_Destroy(
 
 RTEMS_INLINE_ROUTINE void _CORE_barrier_Acquire_critical(
   CORE_barrier_Control *the_barrier,
-  ISR_lock_Context     *lock_context
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_barrier->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_barrier->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_barrier_Release(
   CORE_barrier_Control *the_barrier,
-  ISR_lock_Context     *lock_context
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Release( &the_barrier->Wait_queue, lock_context );
+  _Thread_queue_Release(
+    &the_barrier->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
-void _CORE_barrier_Do_seize(
-  CORE_barrier_Control    *the_barrier,
-  Thread_Control          *executing,
-  bool                     wait,
-  Watchdog_Interval        timeout,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout  mp_callout,
-#endif
-  ISR_lock_Context        *lock_context
-);
-
 /**
  *  @brief Wait for the barrier.
  *
@@ -127,48 +122,18 @@ void _CORE_barrier_Do_seize(
  *
  * @note Status is returned via the thread control block.
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_barrier_Seize( \
-    the_barrier, \
-    executing, \
-    wait, \
-    timeout, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_seize( \
-      the_barrier, \
-      executing, \
-      wait, \
-      timeout, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_barrier_Seize( \
-    the_barrier, \
-    executing, \
-    wait, \
-    timeout, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_seize( \
-      the_barrier, \
-      executing, \
-      wait, \
-      timeout, \
-      lock_context \
-    )
-#endif
+void _CORE_barrier_Seize(
+  CORE_barrier_Control *the_barrier,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
+);
 
-uint32_t _CORE_barrier_Do_surrender(
+uint32_t _CORE_barrier_Do_flush(
   CORE_barrier_Control      *the_barrier,
   Thread_queue_Flush_filter  filter,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout    mp_callout,
-#endif
-  ISR_lock_Context          *lock_context
+  Thread_queue_Context      *queue_context
 );
 
 /**
@@ -183,62 +148,35 @@ uint32_t _CORE_barrier_Do_surrender(
  *
  *  @retval the number of unblocked threads
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_barrier_Surrender( \
-    the_barrier, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_surrender( \
-      the_barrier, \
-      _Thread_queue_Flush_default_filter, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_barrier_Surrender( \
-    the_barrier, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_surrender( \
-      the_barrier, \
-      _Thread_queue_Flush_default_filter, \
-      lock_context \
-    )
-#endif
+RTEMS_INLINE_ROUTINE uint32_t _CORE_barrier_Surrender(
+  CORE_barrier_Control *the_barrier,
+  Thread_queue_Context *queue_context
+)
+{
+  return _CORE_barrier_Do_flush(
+    the_barrier,
+    _Thread_queue_Flush_default_filter,
+    queue_context
+  );
+}
 
 Thread_Control *_CORE_barrier_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
-/* Must be a macro due to the multiprocessing dependent parameters */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_barrier_Flush( \
-    the_barrier, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_surrender( \
-      the_barrier, \
-      _CORE_barrier_Was_deleted, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_barrier_Flush( \
-    the_barrier, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_barrier_Do_surrender( \
-      the_barrier, \
-      _CORE_barrier_Was_deleted, \
-      lock_context \
-    )
-#endif
+RTEMS_INLINE_ROUTINE void _CORE_barrier_Flush(
+  CORE_barrier_Control *the_barrier,
+  Thread_queue_Context *queue_context
+)
+{
+  _CORE_barrier_Do_flush(
+    the_barrier,
+    _CORE_barrier_Was_deleted,
+    queue_context
+  );
+}
 
 /**
  * This function returns true if the automatic release attribute is
diff --git a/cpukit/score/include/rtems/score/coremsg.h b/cpukit/score/include/rtems/score/coremsg.h
index af42e7d..8d25529 100644
--- a/cpukit/score/include/rtems/score/coremsg.h
+++ b/cpukit/score/include/rtems/score/coremsg.h
@@ -122,7 +122,7 @@ typedef enum {
    */
   typedef void (*CORE_message_queue_Notify_Handler)(
     CORE_message_queue_Control *,
-    ISR_lock_Context *
+    Thread_queue_Context *
   );
 #endif
 
diff --git a/cpukit/score/include/rtems/score/coremsgimpl.h b/cpukit/score/include/rtems/score/coremsgimpl.h
index 4113aa6..30abe04 100644
--- a/cpukit/score/include/rtems/score/coremsgimpl.h
+++ b/cpukit/score/include/rtems/score/coremsgimpl.h
@@ -126,14 +126,6 @@ bool _CORE_message_queue_Initialize(
   size_t                          maximum_message_size
 );
 
-void _CORE_message_queue_Do_close(
-  CORE_message_queue_Control *the_message_queue,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout     mp_callout,
-#endif
-  ISR_lock_Context           *lock_context
-);
-
 /**
  *  @brief Close a message queue.
  *
@@ -145,33 +137,13 @@ void _CORE_message_queue_Do_close(
  *  flushing @a the_message_queue's task wait queue.
  *
  *  @param[in] the_message_queue points to the message queue to close
- *  @param[in] mp_callout is the routine to call for each thread
- *         that is extracted from the set of waiting threads
- *  @param[in] lock_context The lock context of the
+ *  @param[in] queue_context The thread queue context used for
  *    _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_message_queue_Close( \
-    the_message_queue, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_close( \
-      the_message_queue, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_message_queue_Close( \
-    the_message_queue, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_close( \
-      the_message_queue, \
-      lock_context \
-    )
-#endif
+void _CORE_message_queue_Close(
+  CORE_message_queue_Control *the_message_queue,
+  Thread_queue_Context       *queue_context
+);
 
 /**
  *  @brief Flush pending messages.
@@ -184,13 +156,13 @@ void _CORE_message_queue_Do_close(
  *  number of messages flushed from the queue is returned.
  *
  *  @param[in] the_message_queue points to the message queue to flush
- *  @param[in] lock_context The lock context of the interrupt disable.
+ *  @param[in] queue_context The thread queue context with interrupts disabled.
  *
  *  @retval This method returns the number of message pending messages flushed.
  */
 uint32_t   _CORE_message_queue_Flush(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 );
 
 #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
@@ -210,17 +182,6 @@ uint32_t   _CORE_message_queue_Flush(
   );
 #endif
 
-CORE_message_queue_Status _CORE_message_queue_Do_broadcast(
-  CORE_message_queue_Control *the_message_queue,
-  const void                 *buffer,
-  size_t                      size,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout     mp_callout,
-#endif
-  uint32_t                   *count,
-  ISR_lock_Context           *lock_context
-);
-
 /**
  *  @brief Broadcast a message to the message queue.
  *
@@ -234,61 +195,19 @@ CORE_message_queue_Status _CORE_message_queue_Do_broadcast(
  *  @param[in] the_message_queue points to the message queue
  *  @param[in] buffer is the starting address of the message to broadcast
  *  @param[in] size is the size of the message being broadcast
- *  @param[in] mp_callout is the routine to invoke if
- *         a thread that is unblocked is actually a remote thread.
  *  @param[out] count points to the variable that will contain the
  *         number of tasks that are sent this message
- *  @param[in] lock_context The lock context of the interrupt disable.
+ *  @param[in] queue_context The thread queue context used for
+ *    _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
  *  @retval @a *count will contain the number of messages sent
  *  @retval indication of the successful completion or reason for failure
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_message_queue_Broadcast( \
-    the_message_queue, \
-    buffer, \
-    size, \
-    mp_callout, \
-    count, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_broadcast( \
-      the_message_queue, \
-      buffer, \
-      size, \
-      mp_callout, \
-      count, \
-      lock_context \
-    )
-#else
-  #define _CORE_message_queue_Broadcast( \
-    the_message_queue, \
-    buffer, \
-    size, \
-    mp_callout, \
-    count, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_broadcast( \
-      the_message_queue, \
-      buffer, \
-      size, \
-      count, \
-      lock_context \
-    )
-#endif
-
-CORE_message_queue_Status _CORE_message_queue_Do_submit(
-  CORE_message_queue_Control       *the_message_queue,
-  Thread_Control                   *executing,
-  const void                       *buffer,
-  size_t                            size,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout           mp_callout,
-#endif
-  CORE_message_queue_Submit_types   submit_type,
-  bool                              wait,
-  Watchdog_Interval                 timeout,
-  ISR_lock_Context                 *lock_context
+CORE_message_queue_Status _CORE_message_queue_Broadcast(
+  CORE_message_queue_Control *the_message_queue,
+  const void                 *buffer,
+  size_t                      size,
+  uint32_t                   *count,
+  Thread_queue_Context       *queue_context
 );
 
 /**
@@ -304,63 +223,26 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
  *  @param[in] the_message_queue points to the message queue
  *  @param[in] buffer is the starting address of the message to send
  *  @param[in] size is the size of the message being send
- *  @param[in] mp_callout is the routine to invoke if
- *         a thread that is unblocked is actually a remote thread.
  *  @param[in] submit_type determines whether the message is prepended,
  *         appended, or enqueued in priority order.
  *  @param[in] wait indicates whether the calling thread is willing to block
  *         if the message queue is full.
  *  @param[in] timeout is the maximum number of clock ticks that the calling
  *         thread is willing to block if the message queue is full.
- *  @param[in] lock_context The lock context of the interrupt disable.
+ *  @param[in] queue_context The thread queue context used for
+ *    _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
  *  @retval indication of the successful completion or reason for failure
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_message_queue_Submit( \
-    the_message_queue, \
-    executing, \
-    buffer, \
-    size, \
-    mp_callout, \
-    submit_type, \
-    wait, \
-    timeout, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_submit( \
-      the_message_queue, \
-      executing, \
-      buffer, \
-      size, \
-      mp_callout, \
-      submit_type, \
-      wait, \
-      timeout, \
-      lock_context \
-    )
-#else
-  #define _CORE_message_queue_Submit( \
-    the_message_queue, \
-    executing, \
-    buffer, \
-    size, \
-    mp_callout, \
-    submit_type, \
-    wait, \
-    timeout, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_submit( \
-      the_message_queue, \
-      executing, \
-      buffer, \
-      size, \
-      submit_type, \
-      wait, \
-      timeout, \
-      lock_context \
-    )
-#endif
+CORE_message_queue_Status _CORE_message_queue_Submit(
+  CORE_message_queue_Control       *the_message_queue,
+  Thread_Control                   *executing,
+  const void                       *buffer,
+  size_t                            size,
+  CORE_message_queue_Submit_types   submit_type,
+  bool                              wait,
+  Watchdog_Interval                 timeout,
+  Thread_queue_Context             *queue_context
+);
 
 /**
  *  @brief Size a message from the message queue.
@@ -383,7 +265,8 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
  *         if the message queue is empty.
  *  @param[in] timeout is the maximum number of clock ticks that the calling
  *         thread is willing to block if the message queue is empty.
- *  @param[in] lock_context The lock context of the interrupt disable.
+ *  @param[in] queue_context The thread queue context used for
+ *    _CORE_message_queue_Acquire() or _CORE_message_queue_Acquire_critical().
  *
  *  @retval indication of the successful completion or reason for failure.
  *          On success, the location pointed to @a size_p will contain the
@@ -396,13 +279,13 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
  *    + wait
  */
 void _CORE_message_queue_Seize(
-  CORE_message_queue_Control      *the_message_queue,
-  Thread_Control                  *executing,
-  void                            *buffer,
-  size_t                          *size_p,
-  bool                             wait,
-  Watchdog_Interval                timeout,
-  ISR_lock_Context                *lock_context
+  CORE_message_queue_Control *the_message_queue,
+  Thread_Control             *executing,
+  void                       *buffer,
+  size_t                     *size_p,
+  bool                        wait,
+  Watchdog_Interval           timeout,
+  Thread_queue_Context       *queue_context
 );
 
 /**
@@ -426,76 +309,79 @@ void _CORE_message_queue_Insert_message(
   CORE_message_queue_Submit_types    submit_type
 );
 
-/**
- * This routine sends a message to the end of the specified message queue.
- */
-#define _CORE_message_queue_Send( \
-  the_message_queue, \
-  buffer, \
-  size, \
-  mp_callout, \
-  wait, \
-  timeout, \
-  lock_context \
-) \
-  _CORE_message_queue_Submit( \
-    the_message_queue, \
-    _Thread_Executing, \
-    buffer, \
-    size, \
-    mp_callout, \
-    CORE_MESSAGE_QUEUE_SEND_REQUEST, \
-    wait, \
-    timeout, \
-    lock_context \
-  )
+RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Send(
+  CORE_message_queue_Control       *the_message_queue,
+  const void                       *buffer,
+  size_t                            size,
+  bool                              wait,
+  Watchdog_Interval                 timeout,
+  Thread_queue_Context             *queue_context
+)
+{
+  return _CORE_message_queue_Submit(
+    the_message_queue,
+    _Thread_Executing,
+    buffer,
+    size,
+    CORE_MESSAGE_QUEUE_SEND_REQUEST,
+    wait,
+    timeout,
+    queue_context
+  );
+}
 
-/**
- * This routine sends a message to the front of the specified message queue.
- */
-#define _CORE_message_queue_Urgent( \
-  the_message_queue, \
-  buffer, \
-  size, \
-  mp_callout, \
-  wait, \
-  timeout, \
-  lock_context \
-) \
-  _CORE_message_queue_Submit( \
-    the_message_queue, \
-    _Thread_Executing, \
-    buffer, \
-    size, \
-    mp_callout, \
-    CORE_MESSAGE_QUEUE_URGENT_REQUEST, \
-    wait,\
-    timeout, \
-    lock_context \
- )
+RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Urgent(
+  CORE_message_queue_Control       *the_message_queue,
+  const void                       *buffer,
+  size_t                            size,
+  bool                              wait,
+  Watchdog_Interval                 timeout,
+  Thread_queue_Context             *queue_context
+)
+{
+  return _CORE_message_queue_Submit(
+    the_message_queue,
+    _Thread_Executing,
+    buffer,
+    size,
+    CORE_MESSAGE_QUEUE_URGENT_REQUEST,
+    wait,
+    timeout,
+    queue_context
+  );
+}
 
 RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
-  _Thread_queue_Acquire( &the_message_queue->Wait_queue, lock_context );
+  _Thread_queue_Acquire(
+    &the_message_queue->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_message_queue_Acquire_critical(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_message_queue->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_message_queue->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_message_queue_Release(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
-  _Thread_queue_Release( &the_message_queue->Wait_queue, lock_context );
+  _Thread_queue_Release(
+    &the_message_queue->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 /**
@@ -597,15 +483,12 @@ RTEMS_INLINE_ROUTINE
     do { } while ( 0 )
 #endif
 
-RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Do_dequeue_receiver(
+RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Dequeue_receiver(
   CORE_message_queue_Control      *the_message_queue,
   const void                      *buffer,
   size_t                           size,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout          mp_callout,
-#endif
   CORE_message_queue_Submit_types  submit_type,
-  ISR_lock_Context                *lock_context
+  Thread_queue_Context            *queue_context
 )
 {
   Thread_Control *the_thread;
@@ -647,48 +530,12 @@ RTEMS_INLINE_ROUTINE Thread_Control *_CORE_message_queue_Do_dequeue_receiver(
     &the_message_queue->Wait_queue.Queue,
     the_message_queue->operations,
     the_thread,
-    mp_callout,
-    lock_context
+    queue_context
   );
 
   return the_thread;
 }
 
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_message_queue_Dequeue_receiver( \
-    the_message_queue, \
-    buffer, \
-    size, \
-    mp_callout, \
-    submit_type, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_dequeue_receiver( \
-      the_message_queue, \
-      buffer, \
-      size, \
-      mp_callout, \
-      submit_type, \
-      lock_context \
-    )
-#else
-  #define _CORE_message_queue_Dequeue_receiver( \
-    the_message_queue, \
-    buffer, \
-    size, \
-    mp_callout, \
-    submit_type, \
-    lock_context \
-  ) \
-    _CORE_message_queue_Do_dequeue_receiver( \
-      the_message_queue, \
-      buffer, \
-      size, \
-      submit_type, \
-      lock_context \
-    )
-#endif
-
 /** @} */
 
 #ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h
index 4531e46..a32022a 100644
--- a/cpukit/score/include/rtems/score/coremuteximpl.h
+++ b/cpukit/score/include/rtems/score/coremuteximpl.h
@@ -107,19 +107,25 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Destroy( CORE_mutex_Control *the_mutex )
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_mutex_Acquire_critical(
-  CORE_mutex_Control *the_mutex,
-  ISR_lock_Context   *lock_context
+  CORE_mutex_Control   *the_mutex,
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_mutex->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
-  CORE_mutex_Control *the_mutex,
-  ISR_lock_Context   *lock_context
+  CORE_mutex_Control   *the_mutex,
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+  _Thread_queue_Release(
+    &the_mutex->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 /**
@@ -219,7 +225,7 @@ RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_priority_ceiling(
  *
  *  @param[in,out] executing The currently executing thread.
  *  @param[in,out] the_mutex is the mutex to attempt to lock
- *  @param[in] lock_context is the interrupt level
+ *  @param[in] queue_context is the interrupt level
  *
  *  @retval This routine returns 0 if "trylock" can resolve whether or not
  *  the mutex is immediately obtained or there was an error attempting to
@@ -227,9 +233,9 @@ RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_priority_ceiling(
  *  the mutex and will have to block to do so.
  */
 RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
-  CORE_mutex_Control  *the_mutex,
-  Thread_Control      *executing,
-  ISR_lock_Context    *lock_context
+  CORE_mutex_Control   *the_mutex,
+  Thread_Control       *executing,
+  Thread_queue_Context *queue_context
 )
 {
   /* disabled when you get here */
@@ -244,7 +250,7 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
     }
 
     if ( !_CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
-      _CORE_mutex_Release( the_mutex, lock_context );
+      _CORE_mutex_Release( the_mutex, queue_context );
       return 0;
     } /* else must be CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING
        *
@@ -258,15 +264,17 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
       ceiling = the_mutex->Attributes.priority_ceiling;
       current = executing->current_priority;
       if ( current == ceiling ) {
-        _CORE_mutex_Release( the_mutex, lock_context );
+        _CORE_mutex_Release( the_mutex, queue_context );
         return 0;
       }
 
       if ( current > ceiling ) {
         Per_CPU_Control *cpu_self;
 
-        cpu_self = _Thread_Dispatch_disable_critical( lock_context );
-        _CORE_mutex_Release( the_mutex, lock_context );
+        cpu_self = _Thread_Dispatch_disable_critical(
+          &queue_context->Lock_context
+        );
+        _CORE_mutex_Release( the_mutex, queue_context );
         _Thread_Raise_priority( executing, ceiling );
         _Thread_Dispatch_enable( cpu_self );
         return 0;
@@ -276,7 +284,7 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
         the_mutex->holder = NULL;
         the_mutex->nest_count = 0;     /* undo locking above */
         executing->resource_count--;   /* undo locking above */
-        _CORE_mutex_Release( the_mutex, lock_context );
+        _CORE_mutex_Release( the_mutex, queue_context );
         return 0;
       }
     }
@@ -292,12 +300,12 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
     switch ( the_mutex->Attributes.lock_nesting_behavior ) {
       case CORE_MUTEX_NESTING_ACQUIRES:
         the_mutex->nest_count++;
-        _CORE_mutex_Release( the_mutex, lock_context );
+        _CORE_mutex_Release( the_mutex, queue_context );
         return 0;
       #if defined(RTEMS_POSIX_API)
         case CORE_MUTEX_NESTING_IS_ERROR:
           executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
-          _CORE_mutex_Release( the_mutex, lock_context );
+          _CORE_mutex_Release( the_mutex, queue_context );
           return 0;
       #endif
       case CORE_MUTEX_NESTING_BLOCKS:
@@ -322,7 +330,7 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
  *  @param[in] the_mutex is the mutex to attempt to lock
  *  @param[in] wait is true if the thread is willing to wait
  *  @param[in] timeout is the maximum number of ticks to block
- *  @param[in] lock_context is a temporary variable used to contain the ISR
+ *  @param[in] queue_context is a temporary variable used to contain the ISR
  *         disable level cookie
  *
  *  @note If the mutex is called from an interrupt service routine,
@@ -339,11 +347,11 @@ RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
  *      then they are blocked.
  */
 RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize(
-  CORE_mutex_Control  *the_mutex,
-  Thread_Control      *executing,
-  bool                 wait,
-  Watchdog_Interval    timeout,
-  ISR_lock_Context    *lock_context
+  CORE_mutex_Control   *the_mutex,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   if ( _CORE_mutex_Check_dispatch_for_seize( wait ) ) {
@@ -353,10 +361,12 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize(
       INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
     );
   }
-  _CORE_mutex_Acquire_critical( the_mutex, lock_context );
-  if ( _CORE_mutex_Seize_interrupt_trylock( the_mutex, executing, lock_context ) ) {
+  _CORE_mutex_Acquire_critical( the_mutex, queue_context );
+  if (
+    _CORE_mutex_Seize_interrupt_trylock( the_mutex, executing, queue_context )
+  ) {
     if ( !wait ) {
-      _CORE_mutex_Release( the_mutex, lock_context );
+      _CORE_mutex_Release( the_mutex, queue_context );
       executing->Wait.return_code =
         CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
     } else {
@@ -364,69 +374,42 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize(
         the_mutex,
         executing,
         timeout,
-        lock_context
+        &queue_context->Lock_context
       );
     }
   }
 }
 
-CORE_mutex_Status _CORE_mutex_Do_surrender(
-  CORE_mutex_Control      *the_mutex,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout  mp_callout,
-#endif
-  ISR_lock_Context        *lock_context
+CORE_mutex_Status _CORE_mutex_Surrender(
+  CORE_mutex_Control   *the_mutex,
+  Thread_queue_Context *queue_context
 );
 
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_mutex_Surrender( \
-    the_mutex, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_mutex_Do_surrender( \
-      the_mutex, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_mutex_Surrender( \
-    the_mutex, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_mutex_Do_surrender( \
-      the_mutex, \
-      lock_context \
-    )
-#endif
-
 Thread_Control *_CORE_mutex_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
 Thread_Control *_CORE_mutex_Unsatisfied_nowait(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
-/* Must be a macro due to the multiprocessing dependent parameters */
-#define _CORE_mutex_Flush( \
-  the_mutex, \
-  filter, \
-  mp_callout, \
-  lock_context \
-) \
-  _Thread_queue_Flush_critical( \
-    &( the_mutex )->Wait_queue.Queue, \
-    ( the_mutex )->operations, \
-    filter, \
-    mp_callout, \
-    lock_context \
-  )
+RTEMS_INLINE_ROUTINE void _CORE_mutex_Flush(
+  CORE_mutex_Control        *the_mutex,
+  Thread_queue_Flush_filter  filter,
+  Thread_queue_Context      *queue_context
+)
+{
+  _Thread_queue_Flush_critical(
+    &the_mutex->Wait_queue.Queue,
+    the_mutex->operations,
+    filter,
+    queue_context
+  );
+}
 
 RTEMS_INLINE_ROUTINE bool _CORE_mutex_Is_owner(
   const CORE_mutex_Control *the_mutex,
diff --git a/cpukit/score/include/rtems/score/corerwlockimpl.h b/cpukit/score/include/rtems/score/corerwlockimpl.h
index ed59d69..67084c1 100644
--- a/cpukit/score/include/rtems/score/corerwlockimpl.h
+++ b/cpukit/score/include/rtems/score/corerwlockimpl.h
@@ -87,19 +87,25 @@ RTEMS_INLINE_ROUTINE void _CORE_RWLock_Destroy(
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_RWLock_Acquire_critical(
-  CORE_RWLock_Control *the_rwlock,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_rwlock->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_rwlock->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_RWLock_Release(
-  CORE_RWLock_Control *the_rwlock,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_queue_Context *queue_context
 )
 {
-  _Thread_queue_Release( &the_rwlock->Wait_queue, lock_context );
+  _Thread_queue_Release(
+    &the_rwlock->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 /**
@@ -116,11 +122,11 @@ RTEMS_INLINE_ROUTINE void _CORE_RWLock_Release(
  */
 
 void _CORE_RWLock_Seize_for_reading(
-  CORE_RWLock_Control *the_rwlock,
-  Thread_Control      *executing,
-  bool                 wait,
-  Watchdog_Interval    timeout,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 );
 
 /**
@@ -136,11 +142,11 @@ void _CORE_RWLock_Seize_for_reading(
  * @note Status is returned via the thread control block.
  */
 void _CORE_RWLock_Seize_for_writing(
-  CORE_RWLock_Control *the_rwlock,
-  Thread_Control      *executing,
-  bool                 wait,
-  Watchdog_Interval    timeout,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 );
 
 /**
@@ -154,8 +160,8 @@ void _CORE_RWLock_Seize_for_writing(
  *  @retval Status is returned to indicate successful or failure.
  */
 CORE_RWLock_Status _CORE_RWLock_Surrender(
-  CORE_RWLock_Control *the_rwlock,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_queue_Context *queue_context
 );
 
 /** @} */
diff --git a/cpukit/score/include/rtems/score/coresemimpl.h b/cpukit/score/include/rtems/score/coresemimpl.h
index 0e04cc9..1660c1d 100644
--- a/cpukit/score/include/rtems/score/coresemimpl.h
+++ b/cpukit/score/include/rtems/score/coresemimpl.h
@@ -88,54 +88,68 @@ void _CORE_semaphore_Initialize(
 
 RTEMS_INLINE_ROUTINE void _CORE_semaphore_Acquire_critical(
   CORE_semaphore_Control *the_semaphore,
-  ISR_lock_Context       *lock_context
+  Thread_queue_Context   *queue_context
 )
 {
-  _Thread_queue_Acquire_critical( &the_semaphore->Wait_queue, lock_context );
+  _Thread_queue_Acquire_critical(
+    &the_semaphore->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 RTEMS_INLINE_ROUTINE void _CORE_semaphore_Release(
   CORE_semaphore_Control *the_semaphore,
-  ISR_lock_Context       *lock_context
+  Thread_queue_Context   *queue_context
 )
 {
-  _Thread_queue_Release( &the_semaphore->Wait_queue, lock_context );
+  _Thread_queue_Release(
+    &the_semaphore->Wait_queue,
+    &queue_context->Lock_context
+  );
 }
 
 Thread_Control *_CORE_semaphore_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
 Thread_Control *_CORE_semaphore_Unsatisfied_nowait(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
-#define _CORE_semaphore_Destroy( \
-  the_semaphore, \
-  mp_callout, \
-  lock_context \
-) \
-  do { \
-    _Thread_queue_Flush_critical( \
-      &( the_semaphore )->Wait_queue.Queue, \
-      ( the_semaphore )->operations, \
-      _CORE_semaphore_Was_deleted, \
-      mp_callout, \
-      lock_context \
-    ); \
-    _Thread_queue_Destroy( &( the_semaphore )->Wait_queue ); \
-  } while ( 0 )
+RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy(
+  CORE_semaphore_Control *the_semaphore,
+  Thread_queue_Context   *queue_context
+)
+{
+  _Thread_queue_Flush_critical(
+    &the_semaphore->Wait_queue.Queue,
+    the_semaphore->operations,
+    _CORE_semaphore_Was_deleted,
+    queue_context
+  );
+  _Thread_queue_Destroy( &the_semaphore->Wait_queue );
+}
 
-RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Do_surrender(
+/**
+ *  @brief Surrender a unit to a semaphore.
+ *
+ *  This routine frees a unit to the semaphore.  If a task was blocked waiting
+ *  for a unit from this semaphore, then that task will be readied and the unit
+ *  given to that task.  Otherwise, the unit will be returned to the semaphore.
+ *
+ *  @param[in] the_semaphore is the semaphore to surrender
+ *  @param[in] queue_context is a temporary variable used to contain the ISR
+ *        disable level cookie
+ *
+ *  @retval an indication of whether the routine succeeded or failed
+ */
+RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Surrender(
   CORE_semaphore_Control  *the_semaphore,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout  mp_callout,
-#endif
-  ISR_lock_Context        *lock_context
+  Thread_queue_Context    *queue_context
 )
 {
   Thread_Control *the_thread;
@@ -143,7 +157,7 @@ RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Do_surrender(
 
   status = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
 
-  _CORE_semaphore_Acquire_critical( the_semaphore, lock_context );
+  _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
 
   the_thread = _Thread_queue_First_locked(
     &the_semaphore->Wait_queue,
@@ -154,8 +168,7 @@ RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Do_surrender(
       &the_semaphore->Wait_queue.Queue,
       the_semaphore->operations,
       the_thread,
-      mp_callout,
-      lock_context
+      queue_context
     );
   } else {
     if ( the_semaphore->count < UINT32_MAX )
@@ -163,65 +176,24 @@ RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Do_surrender(
     else
       status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED;
 
-    _CORE_semaphore_Release( the_semaphore, lock_context );
+    _CORE_semaphore_Release( the_semaphore, queue_context );
   }
 
   return status;
 }
 
-/**
- *  @brief Surrender a unit to a semaphore.
- *
- *  This routine frees a unit to the semaphore.  If a task was blocked waiting
- *  for a unit from this semaphore, then that task will be readied and the unit
- *  given to that task.  Otherwise, the unit will be returned to the semaphore.
- *
- *  @param[in] the_semaphore is the semaphore to surrender
- *  @param[in] mp_callout is the routine to invoke if the
- *         thread unblocked is remote
- *  @param[in] lock_context is a temporary variable used to contain the ISR
- *        disable level cookie
- *
- *  @retval an indication of whether the routine succeeded or failed
- */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _CORE_semaphore_Surrender( \
-    the_semaphore, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_semaphore_Do_surrender( \
-      the_semaphore, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _CORE_semaphore_Surrender( \
-    the_semaphore, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _CORE_semaphore_Do_surrender( \
-      the_semaphore, \
-      lock_context \
-    )
-#endif
-
-/* Must be a macro due to the multiprocessing dependent parameters */
-#define _CORE_semaphore_Flush( \
-  the_semaphore, \
-  mp_callout, \
-  lock_context \
-) \
-  do { \
-    _Thread_queue_Flush_critical( \
-      &( the_semaphore )->Wait_queue.Queue, \
-      ( the_semaphore )->operations, \
-      _CORE_semaphore_Unsatisfied_nowait, \
-      mp_callout, \
-      lock_context \
-    ); \
-  } while ( 0 )
+RTEMS_INLINE_ROUTINE void _CORE_semaphore_Flush(
+  CORE_semaphore_Control *the_semaphore,
+  Thread_queue_Context   *queue_context
+)
+{
+  _Thread_queue_Flush_critical(
+    &the_semaphore->Wait_queue.Queue,
+    the_semaphore->operations,
+    _CORE_semaphore_Unsatisfied_nowait,
+    queue_context
+  );
+}
 
 /**
  * This routine returns the current count associated with the semaphore.
@@ -247,31 +219,31 @@ RTEMS_INLINE_ROUTINE uint32_t  _CORE_semaphore_Get_count(
  * @param[in,out] executing The currently executing thread.
  * @param[in] wait is true if the thread is willing to wait
  * @param[in] timeout is the maximum number of ticks to block
- * @param[in] lock_context is a temporary variable used to contain the ISR
+ * @param[in] queue_context is a temporary variable used to contain the ISR
  *        disable level cookie
  *
  * @note There is currently no MACRO version of this routine.
  */
 RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize(
-  CORE_semaphore_Control  *the_semaphore,
-  Thread_Control          *executing,
-  bool                     wait,
-  Watchdog_Interval        timeout,
-  ISR_lock_Context        *lock_context
+  CORE_semaphore_Control *the_semaphore,
+  Thread_Control         *executing,
+  bool                    wait,
+  Watchdog_Interval       timeout,
+  Thread_queue_Context   *queue_context
 )
 {
   /* disabled when you get here */
 
   executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
-  _CORE_semaphore_Acquire_critical( the_semaphore, lock_context );
+  _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
   if ( the_semaphore->count != 0 ) {
     the_semaphore->count -= 1;
-    _CORE_semaphore_Release( the_semaphore, lock_context );
+    _CORE_semaphore_Release( the_semaphore, queue_context );
     return;
   }
 
   if ( !wait ) {
-    _CORE_semaphore_Release( the_semaphore, lock_context );
+    _CORE_semaphore_Release( the_semaphore, queue_context );
     executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
     return;
   }
@@ -283,7 +255,7 @@ RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize(
     STATES_WAITING_FOR_SEMAPHORE,
     timeout,
     CORE_SEMAPHORE_TIMEOUT,
-    lock_context
+    &queue_context->Lock_context
   );
 }
 
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index d5cf55d..5173343 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -54,19 +54,19 @@ RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
 }
 
 RTEMS_INLINE_ROUTINE void _MRSP_Acquire_critical(
-  MRSP_Control     *mrsp,
-  ISR_lock_Context *lock_context
+  MRSP_Control         *mrsp,
+  Thread_queue_Context *queue_context
 )
 {
-  _ISR_lock_Acquire( &mrsp->Lock, lock_context );
+  _ISR_lock_Acquire( &mrsp->Lock, &queue_context->Lock_context );
 }
 
 RTEMS_INLINE_ROUTINE void _MRSP_Release(
-  MRSP_Control     *mrsp,
-  ISR_lock_Context *lock_context
+  MRSP_Control         *mrsp,
+  Thread_queue_Context *queue_context
 )
 {
-  _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context );
+  _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &queue_context->Lock_context );
 }
 
 RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter(
@@ -104,11 +104,11 @@ RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority(
 }
 
 RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
-  MRSP_Control     *mrsp,
-  Thread_Control   *new_owner,
-  Priority_Control  initial_priority,
-  Priority_Control  ceiling_priority,
-  ISR_lock_Context *lock_context
+  MRSP_Control         *mrsp,
+  Thread_Control       *new_owner,
+  Priority_Control      initial_priority,
+  Priority_Control      ceiling_priority,
+  Thread_queue_Context *queue_context
 )
 {
   Per_CPU_Control *cpu_self;
@@ -118,8 +118,8 @@ RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
   mrsp->initial_priority_of_owner = initial_priority;
   _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
 
-  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
-  _MRSP_Release( mrsp, lock_context );
+  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+  _MRSP_Release( mrsp, queue_context );
 
   _Thread_Raise_priority( new_owner, ceiling_priority );
 
@@ -180,10 +180,11 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
   MRSP_Rival *rival = RTEMS_CONTAINER_OF( watchdog, MRSP_Rival, Watchdog );
   MRSP_Control *mrsp = rival->resource;
   Thread_Control *thread = rival->thread;
-  ISR_lock_Context lock_context;
+  Thread_queue_Context queue_context;
 
-  _ISR_lock_ISR_disable( &lock_context );
-  _MRSP_Acquire_critical( mrsp, &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
+  _MRSP_Acquire_critical( mrsp, &queue_context );
 
   if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
     ISR_lock_Context giant_lock_context;
@@ -200,20 +201,20 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout( Watchdog_Control *watchdog )
 
     rival->status = MRSP_TIMEOUT;
 
-    _MRSP_Release( mrsp, &lock_context );
+    _MRSP_Release( mrsp, &queue_context );
   } else {
-    _MRSP_Release( mrsp, &lock_context );
+    _MRSP_Release( mrsp, &queue_context );
   }
 }
 
 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
-  MRSP_Control      *mrsp,
-  Resource_Node     *owner,
-  Thread_Control    *executing,
-  Priority_Control   initial_priority,
-  Priority_Control   ceiling_priority,
-  Watchdog_Interval  timeout,
-  ISR_lock_Context  *lock_context
+  MRSP_Control         *mrsp,
+  Resource_Node        *owner,
+  Thread_Control       *executing,
+  Priority_Control      initial_priority,
+  Priority_Control      ceiling_priority,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   MRSP_Status status;
@@ -243,8 +244,8 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
 
   _MRSP_Giant_release( &giant_lock_context );
 
-  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
-  _MRSP_Release( mrsp, lock_context );
+  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+  _MRSP_Release( mrsp, queue_context );
 
   _Thread_Raise_priority( executing, ceiling_priority );
 
@@ -286,11 +287,11 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
 }
 
 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
-  MRSP_Control      *mrsp,
-  Thread_Control    *executing,
-  bool               wait,
-  Watchdog_Interval  timeout,
-  ISR_lock_Context  *lock_context
+  MRSP_Control         *mrsp,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   MRSP_Status status;
@@ -306,11 +307,11 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
   Resource_Node *owner;
 
   if ( !priority_ok) {
-    _ISR_lock_ISR_enable( lock_context );
+    _ISR_lock_ISR_enable( &queue_context->Lock_context );
     return MRSP_INVALID_PRIORITY;
   }
 
-  _MRSP_Acquire_critical( mrsp, lock_context );
+  _MRSP_Acquire_critical( mrsp, queue_context );
   owner = _Resource_Get_owner( &mrsp->Resource );
   if ( owner == NULL ) {
     _MRSP_Claim_ownership(
@@ -318,7 +319,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
       executing,
       initial_priority,
       ceiling_priority,
-      lock_context
+      queue_context
     );
     status = MRSP_SUCCESSFUL;
   } else if (
@@ -332,10 +333,10 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
       initial_priority,
       ceiling_priority,
       timeout,
-      lock_context
+      queue_context
     );
   } else {
-    _MRSP_Release( mrsp, lock_context );
+    _MRSP_Release( mrsp, queue_context );
     /* Not available, nested access or deadlock */
     status = MRSP_UNSATISFIED;
   }
@@ -344,9 +345,9 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
 }
 
 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Surrender(
-  MRSP_Control     *mrsp,
-  Thread_Control   *executing,
-  ISR_lock_Context *lock_context
+  MRSP_Control         *mrsp,
+  Thread_Control       *executing,
+  Thread_queue_Context *queue_context
 )
 {
   Priority_Control initial_priority;
@@ -354,7 +355,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Surrender(
   ISR_lock_Context giant_lock_context;
 
   if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
-    _ISR_lock_ISR_enable( lock_context );
+    _ISR_lock_ISR_enable( &queue_context->Lock_context );
     return MRSP_NOT_OWNER_OF_RESOURCE;
   }
 
@@ -364,13 +365,13 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Surrender(
       &executing->Resource_node
     )
   ) {
-    _ISR_lock_ISR_enable( lock_context );
+    _ISR_lock_ISR_enable( &queue_context->Lock_context );
     return MRSP_INCORRECT_STATE;
   }
 
   initial_priority = mrsp->initial_priority_of_owner;
 
-  _MRSP_Acquire_critical( mrsp, lock_context );
+  _MRSP_Acquire_critical( mrsp, queue_context );
 
   _MRSP_Giant_acquire( &giant_lock_context );
 
@@ -405,8 +406,8 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Surrender(
 
   _MRSP_Giant_release( &giant_lock_context );
 
-  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
-  _MRSP_Release( mrsp, lock_context );
+  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+  _MRSP_Release( mrsp, queue_context );
 
   _MRSP_Restore_priority( executing, initial_priority );
 
@@ -425,11 +426,11 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Can_destroy( MRSP_Control *mrsp )
 }
 
 RTEMS_INLINE_ROUTINE void _MRSP_Destroy(
-  MRSP_Control     *mrsp,
-  ISR_lock_Context *lock_context
+  MRSP_Control         *mrsp,
+  Thread_queue_Context *queue_context
 )
 {
-  _MRSP_Release( mrsp, lock_context );
+  _MRSP_Release( mrsp, queue_context );
   _ISR_lock_Destroy( &mrsp->Lock );
   _Workspace_Free( mrsp->ceiling_priorities );
 }
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 7ada3fc..27af89f 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -58,6 +58,18 @@ typedef void ( *Thread_queue_MP_callout )(
 #endif
 
 /**
+ * @brief Thread queue context for the thread queue methods.
+ *
+ * @see _Thread_queue_Context_initialize().
+ */
+typedef struct {
+  ISR_lock_Context Lock_context;
+#if defined(RTEMS_MULTIPROCESSING)
+  Thread_queue_MP_callout mp_callout;
+#endif
+} Thread_queue_Context;
+
+/**
  * @brief Thread priority queue.
  */
 typedef struct {
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 011c410..752e13e 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -53,6 +53,49 @@ typedef struct {
 #endif
 } Thread_queue_Syslock_queue;
 
+RTEMS_INLINE_ROUTINE void _Thread_queue_Do_context_initialize(
+  Thread_queue_Context    *queue_context
+#if defined(RTEMS_MULTIPROCESSING)
+  ,
+  Thread_queue_MP_callout  mp_callout
+#endif
+)
+{
+#if defined(RTEMS_MULTIPROCESSING)
+  queue_context->mp_callout = mp_callout;
+#else
+  (void) queue_context;
+#endif
+}
+
+/**
+ * @brief Initializes a thread queue context.
+ *
+ * @param queue_context The thread queue context to initialize.
+ * @param mp_callout Callout to unblock the thread in case it is actually a
+ *   thread proxy.  This parameter is only used on multiprocessing
+ *   configurations.  Used by thread queue extract and unblock methods for
+ *   objects with multiprocessing (MP) support.
+ */
+#if defined(RTEMS_MULTIPROCESSING)
+  #define _Thread_queue_Context_initialize( \
+    queue_context, \
+    mp_callout \
+  ) \
+    _Thread_queue_Do_context_initialize( \
+      queue_context, \
+      mp_callout \
+    )
+#else
+  #define _Thread_queue_Context_initialize( \
+    queue_context, \
+    mp_callout \
+  ) \
+    _Thread_queue_Do_context_initialize( \
+      queue_context \
+    )
+#endif
+
 RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
   Thread_queue_Heads *heads
 )
@@ -344,7 +387,7 @@ bool _Thread_queue_Do_extract_locked(
   Thread_Control                *the_thread
 #if defined(RTEMS_MULTIPROCESSING)
   ,
-  Thread_queue_MP_callout        mp_callout
+  const Thread_queue_Context    *queue_context
 #endif
 );
 
@@ -358,9 +401,8 @@ bool _Thread_queue_Do_extract_locked(
  * @param[in] queue The actual thread queue.
  * @param[in] operations The thread queue operations.
  * @param[in] the_thread The thread to extract.
- * @param[in] mp_callout Callout to unblock the thread in case it is actually a
- *   thread proxy.  This parameter is only used on multiprocessing
- *   configurations.
+ * @param[in] queue_context The thread queue context.  This parameter is only
+ *   used on multiprocessing configurations.
  *
  * @return Returns the unblock indicator for _Thread_queue_Unblock_critical().
  * True indicates, that this thread must be unblocked by the scheduler later in
@@ -375,20 +417,20 @@ bool _Thread_queue_Do_extract_locked(
     unblock, \
     queue, \
     the_thread, \
-    mp_callout \
+    queue_context \
   ) \
     _Thread_queue_Do_extract_locked( \
       unblock, \
       queue, \
       the_thread, \
-      mp_callout \
+      queue_context \
     )
 #else
   #define _Thread_queue_Extract_locked( \
     unblock, \
     queue, \
     the_thread, \
-    mp_callout \
+    queue_context \
   ) \
     _Thread_queue_Do_extract_locked( \
       unblock, \
@@ -418,16 +460,6 @@ void _Thread_queue_Unblock_critical(
   ISR_lock_Context   *lock_context
 );
 
-void _Thread_queue_Do_extract_critical(
-  Thread_queue_Queue            *queue,
-  const Thread_queue_Operations *operations,
-  Thread_Control                *the_thread,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout        mp_callout,
-#endif
-  ISR_lock_Context              *lock_context
-);
-
 /**
  * @brief Extracts the thread from the thread queue and unblocks it.
  *
@@ -450,10 +482,11 @@ void _Thread_queue_Do_extract_critical(
  *
  * void _Mutex_Release( Mutex *mutex )
  * {
- *   ISR_lock_Context  lock_context;
- *   Thread_Control   *first;
+ *   Thread_queue_Context  queue_context;
+ *   Thread_Control       *first;
  *
- *   _Thread_queue_Acquire( &mutex->Queue, &lock_context );
+ *   _Thread_queue_Context_initialize( &queue_context, NULL );
+ *   _Thread_queue_Acquire( &mutex->Queue, &queue_context.Lock_context );
  *
  *   first = _Thread_queue_First_locked( &mutex->Queue );
  *   mutex->owner = first;
@@ -463,9 +496,7 @@ void _Thread_queue_Do_extract_critical(
  *       &mutex->Queue.Queue,
  *       mutex->Queue.operations,
  *       first,
- *       NULL,
- *       0,
- *       &lock_context
+ *       &queue_context
  *   );
  * }
  * @endcode
@@ -473,41 +504,14 @@ void _Thread_queue_Do_extract_critical(
  * @param[in] queue The actual thread queue.
  * @param[in] operations The thread queue operations.
  * @param[in] the_thread The thread to extract.
- * @param[in] mp_callout Callout to unblock the thread in case it is actually a
- *   thread proxy.  This parameter is only used on multiprocessing
- *   configurations.
- * @param[in] lock_context The lock context of the lock acquire.
+ * @param[in] queue_context The thread queue context of the lock acquire.
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _Thread_queue_Extract_critical( \
-    queue, \
-    operations, \
-    the_thread, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _Thread_queue_Do_extract_critical( \
-      queue, \
-      operations, \
-      the_thread, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _Thread_queue_Extract_critical( \
-    queue, \
-    operations, \
-    the_thread, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _Thread_queue_Do_extract_critical( \
-      queue, \
-      operations, \
-      the_thread, \
-      lock_context \
-    )
-#endif
+void _Thread_queue_Extract_critical(
+  Thread_queue_Queue            *queue,
+  const Thread_queue_Operations *operations,
+  Thread_Control                *the_thread,
+  Thread_queue_Context          *queue_context
+);
 
 /**
  *  @brief Extracts thread from thread queue.
@@ -592,9 +596,10 @@ Thread_Control *_Thread_queue_First(
  *   optimize for architectures that use the same register for the first
  *   parameter and the return value.
  * @param queue The actual thread queue.
- * @param lock_context The lock context of the lock acquire.  May be used to
- *   pass additional data to the filter function via an overlay structure.  The
- *   filter function should not release or acquire the thread queue lock.
+ * @param queue_context The thread queue context of the lock acquire.  May be
+ *   used to pass additional data to the filter function via an overlay
+ *   structure.  The filter function should not release or acquire the thread
+ *   queue lock.
  *
  * @retval the_thread Extract this thread.
  * @retval NULL Do not extract this thread and stop the thread queue flush
@@ -602,9 +607,9 @@ Thread_Control *_Thread_queue_First(
  *   operation.
  */
 typedef Thread_Control *( *Thread_queue_Flush_filter )(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
 /**
@@ -612,24 +617,14 @@ typedef Thread_Control *( *Thread_queue_Flush_filter )(
  *
  * @param the_thread The thread to extract.
  * @param queue Unused.
- * @param lock_context Unused.
+ * @param queue_context Unused.
  *
  * @retval the_thread Extract this thread.
  */
 Thread_Control *_Thread_queue_Flush_default_filter(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
-);
-
-size_t _Thread_queue_Do_flush_critical(
-  Thread_queue_Queue            *queue,
-  const Thread_queue_Operations *operations,
-  Thread_queue_Flush_filter      filter,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout        mp_callout,
-#endif
-  ISR_lock_Context              *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 );
 
 /**
@@ -647,41 +642,19 @@ size_t _Thread_queue_Do_flush_critical(
  *   the thread queue lock, for example to set the thread wait return code.
  *   The return value of the filter function controls if the thread queue flush
  *   operation should stop or continue.
- * @param mp_callout Callout to extract the proxy of a remote thread.  This
- *   parameter is only used on multiprocessing configurations.
+ * @param queue_context The thread queue context of the lock acquire.  May be
+ *   used to pass additional data to the filter function via an overlay
+ *   structure.  The filter function should not release or acquire the thread
+ *   queue lock.
  *
  * @return The count of extracted threads.
  */
-#if defined(RTEMS_MULTIPROCESSING)
-  #define _Thread_queue_Flush_critical( \
-    queue, \
-    operations, \
-    filter, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _Thread_queue_Do_flush_critical( \
-      queue, \
-      operations, \
-      filter, \
-      mp_callout, \
-      lock_context \
-    )
-#else
-  #define _Thread_queue_Flush_critical( \
-    queue, \
-    operations, \
-    filter, \
-    mp_callout, \
-    lock_context \
-  ) \
-    _Thread_queue_Do_flush_critical( \
-      queue, \
-      operations, \
-      filter, \
-      lock_context \
-    )
-#endif
+size_t _Thread_queue_Flush_critical(
+  Thread_queue_Queue            *queue,
+  const Thread_queue_Operations *operations,
+  Thread_queue_Flush_filter      filter,
+  Thread_queue_Context          *queue_context
+);
 
 void _Thread_queue_Initialize( Thread_queue_Control *the_thread_queue );
 
diff --git a/cpukit/score/src/apimutexlock.c b/cpukit/score/src/apimutexlock.c
index 79729d4..11d12c5 100644
--- a/cpukit/score/src/apimutexlock.c
+++ b/cpukit/score/src/apimutexlock.c
@@ -25,20 +25,21 @@
 
 void _API_Mutex_Lock( API_Mutex_Control *the_mutex )
 {
-  Thread_Life_state previous_thread_life_state;
-  ISR_lock_Context  lock_context;
+  Thread_Life_state    previous_thread_life_state;
+  Thread_queue_Context queue_context;
 
   previous_thread_life_state =
     _Thread_Set_life_protection( THREAD_LIFE_PROTECTED );
 
-  _ISR_lock_ISR_disable( &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
 
   _CORE_mutex_Seize(
     &the_mutex->Mutex,
     _Thread_Executing,
     true,
     0,
-    &lock_context
+    &queue_context
   );
 
   if ( the_mutex->Mutex.nest_count == 1 ) {
diff --git a/cpukit/score/src/apimutexunlock.c b/cpukit/score/src/apimutexunlock.c
index 082961f..f0f114e 100644
--- a/cpukit/score/src/apimutexunlock.c
+++ b/cpukit/score/src/apimutexunlock.c
@@ -24,19 +24,16 @@
 
 void _API_Mutex_Unlock( API_Mutex_Control *the_mutex )
 {
-  ISR_lock_Context  lock_context;
-  Thread_Life_state previous_thread_life_state;
-  bool              restore_thread_life_protection;
+  Thread_queue_Context queue_context;
+  Thread_Life_state    previous_thread_life_state;
+  bool                 restore_thread_life_protection;
 
   previous_thread_life_state = the_mutex->previous_thread_life_state;
   restore_thread_life_protection = the_mutex->Mutex.nest_count == 1;
 
-  _ISR_lock_ISR_disable( &lock_context );
-  _CORE_mutex_Surrender(
-    &the_mutex->Mutex,
-    NULL,
-    &lock_context
-  );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
+  _CORE_mutex_Surrender( &the_mutex->Mutex, &queue_context );
 
   if ( restore_thread_life_protection ) {
     _Thread_Set_life_protection( previous_thread_life_state );
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index fcd93b2..c0320b2 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -220,54 +220,54 @@ int _Condition_Wait_recursive_timed(
 }
 
 typedef struct {
-  ISR_lock_Context Base;
-  int              count;
-} Condition_Lock_context;
+  Thread_queue_Context Base;
+  int                  count;
+} Condition_Context;
 
 static Thread_Control *_Condition_Flush_filter(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
-  Condition_Lock_context *condition_lock_context;
+  Condition_Context *context;
 
-  condition_lock_context = (Condition_Lock_context *) lock_context;
+  context = (Condition_Context *) queue_context;
 
-  if ( condition_lock_context->count <= 0 ) {
+  if ( context->count <= 0 ) {
     return NULL;
   }
 
-  --condition_lock_context->count;
+  --context->count;
 
   return the_thread;
 }
 
 static void _Condition_Wake( struct _Condition_Control *_condition, int count )
 {
-  Condition_Control      *condition;
-  Condition_Lock_context  lock_context;
+  Condition_Control *condition;
+  Condition_Context  context;
 
   condition = _Condition_Get( _condition );
-  _ISR_lock_ISR_disable( &lock_context.Base );
-  _Condition_Queue_acquire_critical( condition, &lock_context.Base );
+  _Thread_queue_Context_initialize( &context.Base, NULL );
+  _ISR_lock_ISR_disable( &context.Base.Lock_context );
+  _Condition_Queue_acquire_critical( condition, &context.Base.Lock_context );
 
   /*
    * In common uses cases of condition variables there are normally no threads
    * on the queue, so check this condition early.
    */
   if ( __predict_true( _Thread_queue_Is_empty( &condition->Queue.Queue ) ) ) {
-    _Condition_Queue_release( condition, &lock_context.Base );
+    _Condition_Queue_release( condition, &context.Base.Lock_context );
     return;
   }
 
-  lock_context.count = count;
+  context.count = count;
   _Thread_queue_Flush_critical(
     &condition->Queue.Queue,
     CONDITION_TQ_OPERATIONS,
     _Condition_Flush_filter,
-    NULL,
-    &lock_context.Base
+    &context.Base
   );
 }
 
diff --git a/cpukit/score/src/corebarrier.c b/cpukit/score/src/corebarrier.c
index 3cb7906..a32f88c 100644
--- a/cpukit/score/src/corebarrier.c
+++ b/cpukit/score/src/corebarrier.c
@@ -33,9 +33,9 @@ void _CORE_barrier_Initialize(
 }
 
 Thread_Control *_CORE_barrier_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_BARRIER_WAS_DELETED;
diff --git a/cpukit/score/src/corebarrierrelease.c b/cpukit/score/src/corebarrierrelease.c
index c9c80f4..8a23856 100644
--- a/cpukit/score/src/corebarrierrelease.c
+++ b/cpukit/score/src/corebarrierrelease.c
@@ -21,13 +21,10 @@
 
 #include <rtems/score/corebarrierimpl.h>
 
-uint32_t _CORE_barrier_Do_surrender(
+uint32_t _CORE_barrier_Do_flush(
   CORE_barrier_Control      *the_barrier,
   Thread_queue_Flush_filter  filter,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout    mp_callout,
-#endif
-  ISR_lock_Context          *lock_context
+  Thread_queue_Context      *queue_context
 )
 {
   the_barrier->number_of_waiting_threads = 0;
@@ -35,7 +32,6 @@ uint32_t _CORE_barrier_Do_surrender(
     &the_barrier->Wait_queue.Queue,
     CORE_BARRIER_TQ_OPERATIONS,
     filter,
-    mp_callout,
-    lock_context
+    queue_context
   );
 }
diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c
index 54e9096..33f1718 100644
--- a/cpukit/score/src/corebarrierwait.c
+++ b/cpukit/score/src/corebarrierwait.c
@@ -21,22 +21,19 @@
 #include <rtems/score/corebarrierimpl.h>
 #include <rtems/score/statesimpl.h>
 
-void _CORE_barrier_Do_seize(
-  CORE_barrier_Control    *the_barrier,
-  Thread_Control          *executing,
-  bool                     wait,
-  Watchdog_Interval        timeout,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout  mp_callout,
-#endif
-  ISR_lock_Context        *lock_context
+void _CORE_barrier_Seize(
+  CORE_barrier_Control *the_barrier,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   uint32_t number_of_waiting_threads;
 
   executing->Wait.return_code = CORE_BARRIER_STATUS_SUCCESSFUL;
 
-  _CORE_barrier_Acquire_critical( the_barrier, lock_context );
+  _CORE_barrier_Acquire_critical( the_barrier, queue_context );
 
   number_of_waiting_threads = the_barrier->number_of_waiting_threads;
   ++number_of_waiting_threads;
@@ -46,7 +43,7 @@ void _CORE_barrier_Do_seize(
       && number_of_waiting_threads == the_barrier->Attributes.maximum_count
   ) {
     executing->Wait.return_code = CORE_BARRIER_STATUS_AUTOMATICALLY_RELEASED;
-    _CORE_barrier_Surrender( the_barrier, mp_callout, lock_context );
+    _CORE_barrier_Surrender( the_barrier, queue_context );
   } else {
     the_barrier->number_of_waiting_threads = number_of_waiting_threads;
     _Thread_queue_Enqueue_critical(
@@ -56,7 +53,7 @@ void _CORE_barrier_Do_seize(
       STATES_WAITING_FOR_BARRIER,
       timeout,
       CORE_BARRIER_TIMEOUT,
-      lock_context
+      &queue_context->Lock_context
     );
   }
 }
diff --git a/cpukit/score/src/coremsgbroadcast.c b/cpukit/score/src/coremsgbroadcast.c
index a7a962f..23dd343 100644
--- a/cpukit/score/src/coremsgbroadcast.c
+++ b/cpukit/score/src/coremsgbroadcast.c
@@ -21,28 +21,25 @@
 #include <rtems/score/coremsgimpl.h>
 #include <rtems/score/objectimpl.h>
 
-CORE_message_queue_Status _CORE_message_queue_Do_broadcast(
+CORE_message_queue_Status _CORE_message_queue_Broadcast(
   CORE_message_queue_Control *the_message_queue,
   const void                 *buffer,
   size_t                      size,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout     mp_callout,
-#endif
   uint32_t                   *count,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
   Thread_Control             *the_thread;
   uint32_t                    number_broadcasted;
 
   if ( size > the_message_queue->maximum_message_size ) {
-    _ISR_lock_ISR_enable( lock_context );
+    _ISR_lock_ISR_enable( &queue_context->Lock_context );
     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
   }
 
   number_broadcasted = 0;
 
-  _CORE_message_queue_Acquire_critical( the_message_queue, lock_context );
+  _CORE_message_queue_Acquire_critical( the_message_queue, queue_context );
 
   while (
     ( the_thread =
@@ -50,18 +47,17 @@ CORE_message_queue_Status _CORE_message_queue_Do_broadcast(
         the_message_queue,
         buffer,
         size,
-        mp_callout,
         0,
-        lock_context
+        queue_context
       )
     )
   ) {
     number_broadcasted += 1;
 
-    _CORE_message_queue_Acquire( the_message_queue, lock_context );
+    _CORE_message_queue_Acquire( the_message_queue, queue_context );
   }
 
-  _CORE_message_queue_Release( the_message_queue, lock_context );
+  _CORE_message_queue_Release( the_message_queue, queue_context );
 
   *count = number_broadcasted;
   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
diff --git a/cpukit/score/src/coremsgclose.c b/cpukit/score/src/coremsgclose.c
index 1951e9f..e24d756 100644
--- a/cpukit/score/src/coremsgclose.c
+++ b/cpukit/score/src/coremsgclose.c
@@ -22,9 +22,9 @@
 #include <rtems/score/wkspace.h>
 
 static Thread_Control *_CORE_message_queue_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED;
@@ -32,12 +32,9 @@ static Thread_Control *_CORE_message_queue_Was_deleted(
   return the_thread;
 }
 
-void _CORE_message_queue_Do_close(
+void _CORE_message_queue_Close(
   CORE_message_queue_Control *the_message_queue,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout     mp_callout,
-#endif
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
 
@@ -50,8 +47,7 @@ void _CORE_message_queue_Do_close(
     &the_message_queue->Wait_queue.Queue,
     the_message_queue->operations,
     _CORE_message_queue_Was_deleted,
-    mp_callout,
-    lock_context
+    queue_context
   );
 
   (void) _Workspace_Free( the_message_queue->message_buffers );
diff --git a/cpukit/score/src/coremsgflush.c b/cpukit/score/src/coremsgflush.c
index 38f26b7..e5b51f9 100644
--- a/cpukit/score/src/coremsgflush.c
+++ b/cpukit/score/src/coremsgflush.c
@@ -23,7 +23,7 @@
 
 uint32_t   _CORE_message_queue_Flush(
   CORE_message_queue_Control *the_message_queue,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
   Chain_Node *inactive_head;
@@ -59,7 +59,7 @@ uint32_t   _CORE_message_queue_Flush(
    *  fixed execution time that only deals with pending messages.
    */
 
-  _CORE_message_queue_Acquire_critical( the_message_queue, lock_context );
+  _CORE_message_queue_Acquire_critical( the_message_queue, queue_context );
 
   count = the_message_queue->number_of_pending_messages;
   if ( count != 0 ) {
@@ -78,6 +78,6 @@ uint32_t   _CORE_message_queue_Flush(
     _Chain_Initialize_empty( &the_message_queue->Pending_messages );
   }
 
-  _CORE_message_queue_Release( the_message_queue, lock_context );
+  _CORE_message_queue_Release( the_message_queue, queue_context );
   return count;
 }
diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c
index fcc95a7..b05ddd6 100644
--- a/cpukit/score/src/coremsgseize.c
+++ b/cpukit/score/src/coremsgseize.c
@@ -24,7 +24,6 @@
 #include <rtems/score/coremsgimpl.h>
 #include <rtems/score/thread.h>
 #include <rtems/score/statesimpl.h>
-#include <rtems/score/wkspace.h>
 
 void _CORE_message_queue_Seize(
   CORE_message_queue_Control *the_message_queue,
@@ -33,7 +32,7 @@ void _CORE_message_queue_Seize(
   size_t                     *size_p,
   bool                        wait,
   Watchdog_Interval           timeout,
-  ISR_lock_Context           *lock_context
+  Thread_queue_Context       *queue_context
 )
 {
   CORE_message_queue_Buffer_control *the_message;
@@ -58,7 +57,7 @@ void _CORE_message_queue_Seize(
        *  So return immediately.
        */
       _CORE_message_queue_Free_message_buffer(the_message_queue, the_message);
-      _CORE_message_queue_Release( the_message_queue, lock_context );
+      _CORE_message_queue_Release( the_message_queue, queue_context );
       return;
     #else
     {
@@ -80,7 +79,7 @@ void _CORE_message_queue_Seize(
           the_message_queue,
           the_message
         );
-        _CORE_message_queue_Release( the_message_queue, lock_context );
+        _CORE_message_queue_Release( the_message_queue, queue_context );
         return;
       }
 
@@ -100,8 +99,7 @@ void _CORE_message_queue_Seize(
         &the_message_queue->Wait_queue.Queue,
         the_message_queue->operations,
         the_thread,
-        NULL,
-        lock_context
+        queue_context
       );
       return;
     }
@@ -109,7 +107,7 @@ void _CORE_message_queue_Seize(
   }
 
   if ( !wait ) {
-    _CORE_message_queue_Release( the_message_queue, lock_context );
+    _CORE_message_queue_Release( the_message_queue, queue_context );
     executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
     return;
   }
@@ -125,6 +123,6 @@ void _CORE_message_queue_Seize(
     STATES_WAITING_FOR_MESSAGE,
     timeout,
     CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
-    lock_context
+    &queue_context->Lock_context
   );
 }
diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c
index 68067cc..a867741 100644
--- a/cpukit/score/src/coremsgsubmit.c
+++ b/cpukit/score/src/coremsgsubmit.c
@@ -25,25 +25,22 @@
 #include <rtems/score/statesimpl.h>
 #include <rtems/score/wkspace.h>
 
-CORE_message_queue_Status _CORE_message_queue_Do_submit(
+CORE_message_queue_Status _CORE_message_queue_Submit(
   CORE_message_queue_Control       *the_message_queue,
   Thread_Control                   *executing,
   const void                       *buffer,
   size_t                            size,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout           mp_callout,
-#endif
   CORE_message_queue_Submit_types   submit_type,
   bool                              wait,
   Watchdog_Interval                 timeout,
-  ISR_lock_Context                 *lock_context
+  Thread_queue_Context             *queue_context
 )
 {
   CORE_message_queue_Buffer_control *the_message;
   Thread_Control                    *the_thread;
 
   if ( size > the_message_queue->maximum_message_size ) {
-    _CORE_message_queue_Release( the_message_queue, lock_context );
+    _CORE_message_queue_Release( the_message_queue, queue_context );
     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
   }
 
@@ -55,9 +52,8 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
     the_message_queue,
     buffer,
     size,
-    mp_callout,
     submit_type,
-    lock_context
+    queue_context
   );
   if ( the_thread != NULL ) {
     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
@@ -90,20 +86,20 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
     ) {
       ( *the_message_queue->notify_handler )(
         the_message_queue,
-        lock_context
+        queue_context
       );
     } else {
-      _CORE_message_queue_Release( the_message_queue, lock_context );
+      _CORE_message_queue_Release( the_message_queue, queue_context );
     }
 #else
-    _CORE_message_queue_Release( the_message_queue, lock_context );
+    _CORE_message_queue_Release( the_message_queue, queue_context );
 #endif
 
     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
   }
 
   #if !defined(RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND)
-    _CORE_message_queue_Release( the_message_queue, lock_context );
+    _CORE_message_queue_Release( the_message_queue, queue_context );
     return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
   #else
     /*
@@ -112,7 +108,7 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
      *  on the queue.
      */
     if ( !wait ) {
-      _CORE_message_queue_Release( the_message_queue, lock_context );
+      _CORE_message_queue_Release( the_message_queue, queue_context );
       return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
     }
 
@@ -121,7 +117,7 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
      *  deadly to block in an ISR.
      */
     if ( _ISR_Is_in_progress() ) {
-      _CORE_message_queue_Release( the_message_queue, lock_context );
+      _CORE_message_queue_Release( the_message_queue, queue_context );
       return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
     }
 
@@ -143,7 +139,7 @@ CORE_message_queue_Status _CORE_message_queue_Do_submit(
       STATES_WAITING_FOR_MESSAGE,
       timeout,
       CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
-      lock_context
+      &queue_context->Lock_context
     );
     return executing->Wait.return_code;
   #endif
diff --git a/cpukit/score/src/coremutex.c b/cpukit/score/src/coremutex.c
index ea5e759..ecca244 100644
--- a/cpukit/score/src/coremutex.c
+++ b/cpukit/score/src/coremutex.c
@@ -92,9 +92,9 @@ CORE_mutex_Status _CORE_mutex_Initialize(
 }
 
 Thread_Control *_CORE_mutex_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_MUTEX_WAS_DELETED;
@@ -103,9 +103,9 @@ Thread_Control *_CORE_mutex_Was_deleted(
 }
 
 Thread_Control *_CORE_mutex_Unsatisfied_nowait(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
diff --git a/cpukit/score/src/coremutexsurrender.c b/cpukit/score/src/coremutexsurrender.c
index 746fee1..040a580 100644
--- a/cpukit/score/src/coremutexsurrender.c
+++ b/cpukit/score/src/coremutexsurrender.c
@@ -23,12 +23,9 @@
 #include <rtems/score/coremuteximpl.h>
 #include <rtems/score/thread.h>
 
-CORE_mutex_Status _CORE_mutex_Do_surrender(
-  CORE_mutex_Control      *the_mutex,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout  mp_callout,
-#endif
-  ISR_lock_Context        *lock_context
+CORE_mutex_Status _CORE_mutex_Surrender(
+  CORE_mutex_Control   *the_mutex,
+  Thread_queue_Context *queue_context
 )
 {
   Thread_Control *the_thread;
@@ -46,17 +43,17 @@ CORE_mutex_Status _CORE_mutex_Do_surrender(
 
   if ( the_mutex->Attributes.only_owner_release ) {
     if ( !_Thread_Is_executing( holder ) ) {
-      _ISR_lock_ISR_enable( lock_context );
+      _ISR_lock_ISR_enable( &queue_context->Lock_context );
       return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
     }
   }
 
-  _Thread_queue_Acquire_critical( &the_mutex->Wait_queue, lock_context );
+  _CORE_mutex_Acquire_critical( the_mutex, queue_context );
 
   /* XXX already unlocked -- not right status */
 
   if ( !the_mutex->nest_count ) {
-    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+    _CORE_mutex_Release( the_mutex, queue_context );
     return CORE_MUTEX_STATUS_SUCCESSFUL;
   }
 
@@ -71,12 +68,12 @@ CORE_mutex_Status _CORE_mutex_Do_surrender(
     #if defined(RTEMS_DEBUG)
       switch ( the_mutex->Attributes.lock_nesting_behavior ) {
         case CORE_MUTEX_NESTING_ACQUIRES:
-          _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+          _CORE_mutex_Release( the_mutex, queue_context );
           return CORE_MUTEX_STATUS_SUCCESSFUL;
         #if defined(RTEMS_POSIX_API)
           case CORE_MUTEX_NESTING_IS_ERROR:
             /* should never occur */
-            _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+            _CORE_mutex_Release( the_mutex, queue_context );
             return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
         #endif
         case CORE_MUTEX_NESTING_BLOCKS:
@@ -84,7 +81,7 @@ CORE_mutex_Status _CORE_mutex_Do_surrender(
           break;
       }
     #else
-      _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+      _CORE_mutex_Release( the_mutex, queue_context );
       /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
       return CORE_MUTEX_STATUS_SUCCESSFUL;
     #endif
@@ -126,7 +123,7 @@ CORE_mutex_Status _CORE_mutex_Do_surrender(
       &the_mutex->Wait_queue.Queue,
       the_mutex->operations,
       the_thread,
-      mp_callout
+      queue_context
     );
 
 #if defined(RTEMS_MULTIPROCESSING)
@@ -155,10 +152,10 @@ CORE_mutex_Status _CORE_mutex_Do_surrender(
       unblock,
       &the_mutex->Wait_queue.Queue,
       the_thread,
-      lock_context
+      &queue_context->Lock_context
     );
   } else {
-    _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+    _CORE_mutex_Release( the_mutex, queue_context );
   }
 
   /*
diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c
index fcbaf4a..639ea70 100644
--- a/cpukit/score/src/corerwlockobtainread.c
+++ b/cpukit/score/src/corerwlockobtainread.c
@@ -24,11 +24,11 @@
 #include <rtems/score/watchdog.h>
 
 void _CORE_RWLock_Seize_for_reading(
-  CORE_RWLock_Control *the_rwlock,
-  Thread_Control      *executing,
-  bool                 wait,
-  Watchdog_Interval    timeout,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   /*
@@ -37,13 +37,13 @@ void _CORE_RWLock_Seize_for_reading(
    *  If any thread is waiting, then we wait.
    */
 
-  _CORE_RWLock_Acquire_critical( the_rwlock, lock_context );
+  _CORE_RWLock_Acquire_critical( the_rwlock, queue_context );
 
   switch ( the_rwlock->current_state ) {
     case CORE_RWLOCK_UNLOCKED:
       the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_READING;
       the_rwlock->number_of_readers += 1;
-      _CORE_RWLock_Release( the_rwlock, lock_context );
+      _CORE_RWLock_Release( the_rwlock, queue_context );
       executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
       return;
 
@@ -55,7 +55,7 @@ void _CORE_RWLock_Seize_for_reading(
       );
       if ( !waiter ) {
         the_rwlock->number_of_readers += 1;
-        _CORE_RWLock_Release( the_rwlock, lock_context );
+        _CORE_RWLock_Release( the_rwlock, queue_context );
         executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
         return;
       }
@@ -70,7 +70,7 @@ void _CORE_RWLock_Seize_for_reading(
    */
 
   if ( !wait ) {
-    _CORE_RWLock_Release( the_rwlock, lock_context );
+    _CORE_RWLock_Release( the_rwlock, queue_context );
     executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE;
     return;
   }
@@ -89,6 +89,6 @@ void _CORE_RWLock_Seize_for_reading(
      STATES_WAITING_FOR_RWLOCK,
      timeout,
      CORE_RWLOCK_TIMEOUT,
-     lock_context
+     &queue_context->Lock_context
   );
 }
diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c
index e1bb1bd..a7d1bb1 100644
--- a/cpukit/score/src/corerwlockobtainwrite.c
+++ b/cpukit/score/src/corerwlockobtainwrite.c
@@ -24,11 +24,11 @@
 #include <rtems/score/watchdog.h>
 
 void _CORE_RWLock_Seize_for_writing(
-  CORE_RWLock_Control *the_rwlock,
-  Thread_Control      *executing,
-  bool                 wait,
-  Watchdog_Interval    timeout,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_Control       *executing,
+  bool                  wait,
+  Watchdog_Interval     timeout,
+  Thread_queue_Context *queue_context
 )
 {
   /*
@@ -38,12 +38,12 @@ void _CORE_RWLock_Seize_for_writing(
    *  If any thread is waiting, then we wait.
    */
 
-  _CORE_RWLock_Acquire_critical( the_rwlock, lock_context );
+  _CORE_RWLock_Acquire_critical( the_rwlock, queue_context );
 
   switch ( the_rwlock->current_state ) {
     case CORE_RWLOCK_UNLOCKED:
       the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING;
-      _CORE_RWLock_Release( the_rwlock, lock_context );
+      _CORE_RWLock_Release( the_rwlock, queue_context );
       executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
       return;
 
@@ -57,7 +57,7 @@ void _CORE_RWLock_Seize_for_writing(
    */
 
   if ( !wait ) {
-    _CORE_RWLock_Release( the_rwlock, lock_context );
+    _CORE_RWLock_Release( the_rwlock, queue_context );
     executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE;
     return;
   }
@@ -76,6 +76,6 @@ void _CORE_RWLock_Seize_for_writing(
      STATES_WAITING_FOR_RWLOCK,
      timeout,
      CORE_RWLOCK_TIMEOUT,
-     lock_context
+     &queue_context->Lock_context
   );
 }
diff --git a/cpukit/score/src/corerwlockrelease.c b/cpukit/score/src/corerwlockrelease.c
index 6f76aad..81e01d1 100644
--- a/cpukit/score/src/corerwlockrelease.c
+++ b/cpukit/score/src/corerwlockrelease.c
@@ -35,9 +35,9 @@ static bool _CORE_RWLock_Is_waiting_for_reading(
 }
 
 static Thread_Control *_CORE_RWLock_Flush_filter(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   CORE_RWLock_Control *the_rwlock;
@@ -74,8 +74,8 @@ static Thread_Control *_CORE_RWLock_Flush_filter(
 }
 
 CORE_RWLock_Status _CORE_RWLock_Surrender(
-  CORE_RWLock_Control *the_rwlock,
-  ISR_lock_Context    *lock_context
+  CORE_RWLock_Control  *the_rwlock,
+  Thread_queue_Context *queue_context
 )
 {
   /*
@@ -85,11 +85,11 @@ CORE_RWLock_Status _CORE_RWLock_Surrender(
    *  If any thread is waiting, then we wait.
    */
 
-  _CORE_RWLock_Acquire_critical( the_rwlock, lock_context );
+  _CORE_RWLock_Acquire_critical( the_rwlock, queue_context );
 
   if ( the_rwlock->current_state == CORE_RWLOCK_UNLOCKED){
     /* This is an error at the caller site */
-    _CORE_RWLock_Release( the_rwlock, lock_context );
+    _CORE_RWLock_Release( the_rwlock, queue_context );
     return CORE_RWLOCK_SUCCESSFUL;
   }
 
@@ -98,7 +98,7 @@ CORE_RWLock_Status _CORE_RWLock_Surrender(
 
     if ( the_rwlock->number_of_readers != 0 ) {
       /* must be unlocked again */
-      _CORE_RWLock_Release( the_rwlock, lock_context );
+      _CORE_RWLock_Release( the_rwlock, queue_context );
       return CORE_RWLOCK_SUCCESSFUL;
     }
   }
@@ -119,8 +119,7 @@ CORE_RWLock_Status _CORE_RWLock_Surrender(
     &the_rwlock->Wait_queue.Queue,
     CORE_RWLOCK_TQ_OPERATIONS,
     _CORE_RWLock_Flush_filter,
-    NULL,
-    lock_context
+    queue_context
   );
   return CORE_RWLOCK_SUCCESSFUL;
 }
diff --git a/cpukit/score/src/coresem.c b/cpukit/score/src/coresem.c
index 02a3837..c94f2b7 100644
--- a/cpukit/score/src/coresem.c
+++ b/cpukit/score/src/coresem.c
@@ -38,9 +38,9 @@ void _CORE_semaphore_Initialize(
 }
 
 Thread_Control *_CORE_semaphore_Was_deleted(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_SEMAPHORE_WAS_DELETED;
@@ -49,9 +49,9 @@ Thread_Control *_CORE_semaphore_Was_deleted(
 }
 
 Thread_Control *_CORE_semaphore_Unsatisfied_nowait(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   the_thread->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c
index 66085a8..d7945d1 100644
--- a/cpukit/score/src/futex.c
+++ b/cpukit/score/src/futex.c
@@ -104,25 +104,25 @@ int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
 }
 
 typedef struct {
-  ISR_lock_Context Base;
-  int              count;
-} Futex_Lock_context;
+  Thread_queue_Context Base;
+  int                  count;
+} Futex_Context;
 
 static Thread_Control *_Futex_Flush_filter(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
-  Futex_Lock_context *futex_lock_context;
+  Futex_Context *context;
 
-  futex_lock_context = (Futex_Lock_context *) lock_context;
+  context = (Futex_Context *) queue_context;
 
-  if ( futex_lock_context->count <= 0 ) {
+  if ( context->count <= 0 ) {
     return NULL;
   }
 
-  --futex_lock_context->count;
+  --context->count;
 
   return the_thread;
 }
@@ -130,10 +130,10 @@ static Thread_Control *_Futex_Flush_filter(
 int _Futex_Wake( struct _Futex_Control *_futex, int count )
 {
   Futex_Control      *futex;
-  Futex_Lock_context  lock_context;
+  Futex_Context  context;
 
   futex = _Futex_Get( _futex );
-  _Futex_Queue_acquire( futex, &lock_context.Base );
+  _Futex_Queue_acquire( futex, &context.Base.Lock_context );
 
   /*
    * For some synchronization objects like barriers the _Futex_Wake() must be
@@ -141,17 +141,16 @@ int _Futex_Wake( struct _Futex_Control *_futex, int count )
    * check this condition early.
    */
   if ( __predict_true( _Thread_queue_Is_empty( &futex->Queue.Queue ) ) ) {
-    _Futex_Queue_release( futex, &lock_context.Base );
+    _Futex_Queue_release( futex, &context.Base.Lock_context );
     return 0;
   }
 
-  lock_context.count = count;
+  context.count = count;
   return (int) _Thread_queue_Flush_critical(
     &futex->Queue.Queue,
     FUTEX_TQ_OPERATIONS,
     _Futex_Flush_filter,
-    NULL,
-    &lock_context.Base
+    &context.Base
   );
 }
 
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 0b51e38..4022a80 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -321,24 +321,25 @@ void _MPCI_Receive_server(
 )
 {
 
-  MP_packet_Prefix         *the_packet;
-  MPCI_Packet_processor     the_function;
-  Thread_Control           *executing;
-  ISR_lock_Context          lock_context;
+  MP_packet_Prefix      *the_packet;
+  MPCI_Packet_processor  the_function;
+  Thread_Control        *executing;
+  Thread_queue_Context   queue_context;
 
   executing = _Thread_Get_executing();
+  _Thread_queue_Context_initialize( &queue_context, NULL );
 
   for ( ; ; ) {
 
     executing->receive_packet = NULL;
 
-    _ISR_lock_ISR_disable( &lock_context );
+    _ISR_lock_ISR_disable( &queue_context.Lock_context );
     _CORE_semaphore_Seize(
       &_MPCI_Semaphore,
       executing,
       true,
       WATCHDOG_NO_TIMEOUT,
-      &lock_context
+      &queue_context
     );
 
     for ( ; ; ) {
@@ -370,10 +371,10 @@ void _MPCI_Receive_server(
 
 void _MPCI_Announce ( void )
 {
-  ISR_lock_Context lock_context;
+  Thread_queue_Context queue_context;
 
-  _ISR_lock_ISR_disable( &lock_context );
-  (void) _CORE_semaphore_Surrender( &_MPCI_Semaphore, 0, &lock_context );
+  _ISR_lock_ISR_disable( &queue_context.Lock_context );
+  (void) _CORE_semaphore_Surrender( &_MPCI_Semaphore, &queue_context );
 }
 
 void _MPCI_Internal_packets_Send_process_packet (
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index e27075e..0b12232 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -125,11 +125,11 @@ static void _Mutex_Acquire_slow(
 }
 
 static void _Mutex_Release_slow(
-  Mutex_Control      *mutex,
-  Thread_Control     *executing,
-  Thread_queue_Heads *heads,
-  bool                keep_priority,
-  ISR_lock_Context   *lock_context
+  Mutex_Control        *mutex,
+  Thread_Control       *executing,
+  Thread_queue_Heads   *heads,
+  bool                  keep_priority,
+  Thread_queue_Context *queue_context
 )
 {
   if (heads != NULL) {
@@ -146,17 +146,17 @@ static void _Mutex_Release_slow(
       &mutex->Queue.Queue,
       operations,
       first,
-      NULL
+      queue_context
     );
     _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
     _Thread_queue_Unblock_critical(
       unblock,
       &mutex->Queue.Queue,
       first,
-      lock_context
+      &queue_context->Lock_context
     );
   } else {
-    _Mutex_Queue_release( mutex, lock_context);
+    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
   }
 
   if ( !keep_priority ) {
@@ -169,9 +169,9 @@ static void _Mutex_Release_slow(
 }
 
 static void _Mutex_Release_critical(
-  Mutex_Control *mutex,
-  Thread_Control *executing,
-  ISR_lock_Context *lock_context
+  Mutex_Control        *mutex,
+  Thread_Control       *executing,
+  Thread_queue_Context *queue_context
 )
 {
   Thread_queue_Heads *heads;
@@ -193,14 +193,14 @@ static void _Mutex_Release_critical(
     || !executing->priority_restore_hint;
 
   if ( __predict_true( heads == NULL && keep_priority ) ) {
-    _Mutex_Queue_release( mutex, lock_context );
+    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
   } else {
     _Mutex_Release_slow(
       mutex,
       executing,
       heads,
       keep_priority,
-      lock_context
+      queue_context
     );
   }
 }
@@ -297,16 +297,17 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
 
 void _Mutex_Release( struct _Mutex_Control *_mutex )
 {
-  Mutex_Control    *mutex;
-  ISR_lock_Context  lock_context;
-  Thread_Control   *executing;
+  Mutex_Control        *mutex;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *executing;
 
   mutex = _Mutex_Get( _mutex );
-  executing = _Mutex_Queue_acquire( mutex, &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
 
   _Assert( mutex->owner == executing );
 
-  _Mutex_Release_critical( mutex, executing, &lock_context );
+  _Mutex_Release_critical( mutex, executing, &queue_context );
 }
 
 static Mutex_recursive_Control *_Mutex_recursive_Get(
@@ -426,23 +427,27 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
 void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
 {
   Mutex_recursive_Control *mutex;
-  ISR_lock_Context         lock_context;
+  Thread_queue_Context     queue_context;
   Thread_Control          *executing;
   unsigned int             nest_level;
 
   mutex = _Mutex_recursive_Get( _mutex );
-  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  executing = _Mutex_Queue_acquire(
+    &mutex->Mutex,
+    &queue_context.Lock_context
+  );
 
   _Assert( mutex->Mutex.owner == executing );
 
   nest_level = mutex->nest_level;
 
   if ( __predict_true( nest_level == 0 ) ) {
-    _Mutex_Release_critical( &mutex->Mutex, executing, &lock_context );
+    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
   } else {
     mutex->nest_level = nest_level - 1;
 
-    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+    _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
   }
 }
 
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index 4e70b79..3d0d5f5 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -110,18 +110,19 @@ void _Semaphore_Wait( struct _Semaphore_Control *_sem )
 
 void _Semaphore_Post( struct _Semaphore_Control *_sem )
 {
-  Semaphore_Control  *sem;
-  ISR_lock_Context    lock_context;
-  Thread_queue_Heads *heads;
+  Semaphore_Control    *sem;
+  Thread_queue_Context  queue_context;
+  Thread_queue_Heads   *heads;
 
   sem = _Semaphore_Get( _sem );
-  _Semaphore_Queue_acquire( sem, &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, NULL );
+  _Semaphore_Queue_acquire( sem, &queue_context.Lock_context );
 
   heads = sem->Queue.Queue.heads;
   if ( heads == NULL ) {
     _Assert( sem->count < UINT_MAX );
     ++sem->count;
-    _Semaphore_Queue_release( sem, &lock_context );
+    _Semaphore_Queue_release( sem, &queue_context.Lock_context );
   } else {
     const Thread_queue_Operations *operations;
     Thread_Control *first;
@@ -133,8 +134,7 @@ void _Semaphore_Post( struct _Semaphore_Control *_sem )
       &sem->Queue.Queue,
       operations,
       first,
-      NULL,
-      &lock_context
+      &queue_context
     );
   }
 }
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index cda7c86..a1a37e1 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -99,7 +99,7 @@ bool _Thread_queue_Do_extract_locked(
   Thread_Control                *the_thread
 #if defined(RTEMS_MULTIPROCESSING)
   ,
-  Thread_queue_MP_callout        mp_callout
+  const Thread_queue_Context    *queue_context
 #endif
 )
 {
@@ -108,12 +108,13 @@ bool _Thread_queue_Do_extract_locked(
 
 #if defined(RTEMS_MULTIPROCESSING)
   if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
-    Thread_Proxy_control *the_proxy;
-
-    _Assert( mp_callout != NULL );
+    Thread_Proxy_control    *the_proxy;
+    Thread_queue_MP_callout  mp_callout;
 
     the_proxy = (Thread_Proxy_control *) the_thread;
-    the_proxy->thread_queue_callout = mp_callout;
+    mp_callout = queue_context->mp_callout;
+    _Assert( mp_callout != NULL );
+    the_proxy->thread_queue_callout = queue_context->mp_callout;
   }
 #endif
 
@@ -164,14 +165,11 @@ void _Thread_queue_Unblock_critical(
   }
 }
 
-void _Thread_queue_Do_extract_critical(
+void _Thread_queue_Extract_critical(
   Thread_queue_Queue            *queue,
   const Thread_queue_Operations *operations,
   Thread_Control                *the_thread,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout        mp_callout,
-#endif
-  ISR_lock_Context              *lock_context
+  Thread_queue_Context          *queue_context
 )
 {
   bool unblock;
@@ -180,24 +178,28 @@ void _Thread_queue_Do_extract_critical(
     queue,
     operations,
     the_thread,
-    mp_callout
+    queue_context
   );
 
   _Thread_queue_Unblock_critical(
     unblock,
     queue,
     the_thread,
-    lock_context
+    &queue_context->Lock_context
   );
 }
 
 void _Thread_queue_Extract( Thread_Control *the_thread )
 {
-  ISR_lock_Context    lock_context;
-  void               *lock;
-  Thread_queue_Queue *queue;
+  Thread_queue_Context  queue_context;
+  void                 *lock;
+  Thread_queue_Queue   *queue;
 
-  lock = _Thread_Lock_acquire( the_thread, &lock_context );
+  _Thread_queue_Context_initialize(
+    &queue_context,
+    _Thread_queue_MP_callout_do_nothing
+  );
+  lock = _Thread_Lock_acquire( the_thread, &queue_context.Lock_context );
 
   queue = the_thread->Wait.queue;
 
@@ -208,11 +210,10 @@ void _Thread_queue_Extract( Thread_Control *the_thread )
       queue,
       the_thread->Wait.operations,
       the_thread,
-      _Thread_queue_MP_callout_do_nothing,
-      &lock_context
+      &queue_context
     );
   } else {
-    _Thread_Lock_release( lock, &lock_context );
+    _Thread_Lock_release( lock, &queue_context.Lock_context );
   }
 }
 
@@ -225,10 +226,11 @@ Thread_Control *_Thread_queue_Do_dequeue(
 #endif
 )
 {
-  ISR_lock_Context  lock_context;
-  Thread_Control   *the_thread;
+  Thread_queue_Context  queue_context;
+  Thread_Control       *the_thread;
 
-  _Thread_queue_Acquire( the_thread_queue, &lock_context );
+  _Thread_queue_Context_initialize( &queue_context, mp_callout );
+  _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
 
   the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
 
@@ -239,11 +241,10 @@ Thread_Control *_Thread_queue_Do_dequeue(
       &the_thread_queue->Queue,
       operations,
       the_thread,
-      mp_callout,
-      &lock_context
+      &queue_context
     );
   } else {
-    _Thread_queue_Release( the_thread_queue, &lock_context );
+    _Thread_queue_Release( the_thread_queue, &queue_context.Lock_context );
   }
 
   return the_thread;
diff --git a/cpukit/score/src/threadqflush.c b/cpukit/score/src/threadqflush.c
index 0413388..8b23194 100644
--- a/cpukit/score/src/threadqflush.c
+++ b/cpukit/score/src/threadqflush.c
@@ -21,24 +21,21 @@
 #include <rtems/score/threadimpl.h>
 
 Thread_Control *_Thread_queue_Flush_default_filter(
-  Thread_Control     *the_thread,
-  Thread_queue_Queue *queue,
-  ISR_lock_Context   *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
   (void) queue;
-  (void) lock_context;
+  (void) queue_context;
   return the_thread;
 }
 
-size_t _Thread_queue_Do_flush_critical(
+size_t _Thread_queue_Flush_critical(
   Thread_queue_Queue            *queue,
   const Thread_queue_Operations *operations,
   Thread_queue_Flush_filter      filter,
-#if defined(RTEMS_MULTIPROCESSING)
-  Thread_queue_MP_callout        mp_callout,
-#endif
-  ISR_lock_Context              *lock_context
+  Thread_queue_Context          *queue_context
 )
 {
   size_t         flushed;
@@ -60,7 +57,7 @@ size_t _Thread_queue_Do_flush_critical(
     }
 
     first = ( *operations->first )( heads );
-    first = ( *filter )( first, queue, lock_context );
+    first = ( *filter )( first, queue, queue_context );
     if ( first == NULL ) {
       break;
     }
@@ -69,7 +66,7 @@ size_t _Thread_queue_Do_flush_critical(
       queue,
       operations,
       first,
-      mp_callout
+      queue_context
     );
     if ( do_unblock ) {
       _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain );
@@ -84,8 +81,10 @@ size_t _Thread_queue_Do_flush_critical(
   if ( node != tail ) {
     Per_CPU_Control *cpu_self;
 
-    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
-    _Thread_queue_Queue_release( queue, lock_context );
+    cpu_self = _Thread_Dispatch_disable_critical(
+      &queue_context->Lock_context
+    );
+    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
 
     do {
       Thread_Control *the_thread;
@@ -100,7 +99,7 @@ size_t _Thread_queue_Do_flush_critical(
 
     _Thread_Dispatch_enable( cpu_self );
   } else {
-    _Thread_queue_Queue_release( queue, lock_context );
+    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
   }
 
   return flushed;
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 30536f7..52d68de 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -87,24 +87,24 @@ static void _Thread_Raise_real_priority(
 }
 
 typedef struct {
-  ISR_lock_Context  Base;
+  Thread_queue_Context  Base;
 #if defined(RTEMS_POSIX_API)
-  void             *exit_value;
+  void                 *exit_value;
 #endif
-} Thread_Join_lock_context;
+} Thread_Join_context;
 
 #if defined(RTEMS_POSIX_API)
 static Thread_Control *_Thread_Join_flush_filter(
-  Thread_Control      *the_thread,
-  Thread_queue_Queue  *queue,
-  ISR_lock_Context    *lock_context
+  Thread_Control       *the_thread,
+  Thread_queue_Queue   *queue,
+  Thread_queue_Context *queue_context
 )
 {
-  Thread_Join_lock_context *join_lock_context;
+  Thread_Join_context *join_context;
 
-  join_lock_context = (Thread_Join_lock_context *) lock_context;
+  join_context = (Thread_Join_context *) queue_context;
 
-  the_thread->Wait.return_argument = join_lock_context->exit_value;
+  the_thread->Wait.return_argument = join_context->exit_value;
 
   return the_thread;
 }
@@ -112,13 +112,17 @@ static Thread_Control *_Thread_Join_flush_filter(
 
 static void _Thread_Wake_up_joining_threads( Thread_Control *the_thread )
 {
-  Thread_Join_lock_context join_lock_context;
+  Thread_Join_context join_context;
 
 #if defined(RTEMS_POSIX_API)
-  join_lock_context.exit_value = the_thread->Life.exit_value;
+  join_context.exit_value = the_thread->Life.exit_value;
 #endif
 
-  _Thread_State_acquire( the_thread, &join_lock_context.Base );
+  _Thread_queue_Context_initialize( &join_context.Base, NULL );
+  _Thread_queue_Acquire(
+    &the_thread->Join_queue,
+    &join_context.Base.Lock_context
+  );
   _Thread_queue_Flush_critical(
     &the_thread->Join_queue.Queue,
     THREAD_JOIN_TQ_OPERATIONS,
@@ -127,8 +131,7 @@ static void _Thread_Wake_up_joining_threads( Thread_Control *the_thread )
 #else
     _Thread_queue_Flush_default_filter,
 #endif
-    NULL,
-    &join_lock_context.Base
+    &join_context.Base
   );
 }
 
diff --git a/testsuites/sptests/spintrcritical22/init.c b/testsuites/sptests/spintrcritical22/init.c
index b5044d9..3670d1a 100644
--- a/testsuites/sptests/spintrcritical22/init.c
+++ b/testsuites/sptests/spintrcritical22/init.c
@@ -33,12 +33,12 @@ static test_context ctx_instance;
 
 static Semaphore_Control *get_semaphore_control(rtems_id id)
 {
-  ISR_lock_Context lock_context;
+  Thread_queue_Context queue_context;
   Semaphore_Control *sem;
 
-  sem = _Semaphore_Get(id, &lock_context);
+  sem = _Semaphore_Get(id, &queue_context, NULL);
   rtems_test_assert(sem != NULL);
-  _ISR_lock_ISR_enable(&lock_context);
+  _ISR_lock_ISR_enable(&queue_context.Lock_context);
 
   return sem;
 }
diff --git a/testsuites/tmtests/tm26/task1.c b/testsuites/tmtests/tm26/task1.c
index da37eee..68cb246 100644
--- a/testsuites/tmtests/tm26/task1.c
+++ b/testsuites/tmtests/tm26/task1.c
@@ -477,9 +477,10 @@ rtems_task Floating_point_task_2(
 
 void complete_test( void )
 {
-  uint32_t         index;
-  rtems_id         task_id;
-  ISR_lock_Context lock_context;
+  uint32_t             index;
+  rtems_id             task_id;
+  ISR_lock_Context     lock_context;
+  Thread_queue_Context queue_context;
 
   benchmark_timer_initialize();
     thread_resume( Middle_tcb );
@@ -513,8 +514,8 @@ void complete_test( void )
 
   benchmark_timer_initialize();
     for ( index=1 ; index <= OPERATION_COUNT ; index++ ) {
-      (void) _Semaphore_Get( Semaphore_id, &lock_context );
-      _ISR_lock_ISR_enable( &lock_context );
+      (void) _Semaphore_Get( Semaphore_id, &queue_context, NULL );
+      _ISR_lock_ISR_enable( &queue_context.Lock_context );
     }
   semaphore_get_time = benchmark_timer_read();
 



More information about the vc mailing list