[rtems-central commit] spec: Specify semphore obtain

Sebastian Huber sebh at rtems.org
Mon Apr 19 14:58:39 UTC 2021


Module:    rtems-central
Branch:    master
Commit:    93d6393c4b06e5de47be3a962771b140d72aa227
Changeset: http://git.rtems.org/rtems-central/commit/?id=93d6393c4b06e5de47be3a962771b140d72aa227

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed Apr 14 20:36:06 2021 +0200

spec: Specify semphore obtain

---

 spec/rtems/sem/req/obtain.yml          | 260 ++++++++++++++++++-
 spec/rtems/sem/req/release.yml         |   8 -
 spec/score/mtx/req/seize-try.yml       | 268 ++++++++++++++++++++
 spec/score/mtx/req/seize-wait.yml      | 353 ++++++++++++++++++++++++++
 spec/score/sem/req/seize-try.yml       |   4 +-
 spec/score/sem/req/seize-wait.yml      |  47 ++--
 spec/score/status/if/deadlock.yml      |  12 +
 spec/score/tq/req/enqueue-ceiling.yml  | 429 ++++++++++++++++++++++++++++++++
 spec/score/tq/req/enqueue-deadlock.yml | 142 +++++++++++
 spec/score/tq/req/enqueue-fifo.yml     |  57 +++--
 spec/score/tq/req/enqueue-mrsp.yml     | 439 +++++++++++++++++++++++++++++++++
 spec/score/tq/req/enqueue-priority.yml | 178 ++++++-------
 12 files changed, 2019 insertions(+), 178 deletions(-)

diff --git a/spec/rtems/sem/req/obtain.yml b/spec/rtems/sem/req/obtain.yml
index 62cee1b..b9cf485 100644
--- a/spec/rtems/sem/req/obtain.yml
+++ b/spec/rtems/sem/req/obtain.yml
@@ -32,12 +32,84 @@ post-conditions:
     text: |
       The calling task shall wait to seize the semaphore as specified by
       ${/score/sem/req/seize-wait}.
+  - name: MtxSeizeTry
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall try to seize the mutex as specified by
+      ${/score/mtx/req/seize-try} where an enqueue blocks and a recursive seize
+      is allowed.
+  - name: MtxSeizeWait
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall wait to seize the mutex as specified by
+      ${/score/mtx/req/seize-wait} where an enqueue blocks and a recursive
+      seize is allowed.
+  - name: CeilingMtxSeizeTry
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall try to seize the mutex as specified by
+      ${/score/mtx/req/seize-try} where an enqueue blocks, a recursive seize is
+      allowed, and a priority ceiling is used.
+  - name: CeilingMtxSeizeWait
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall wait to seize the mutex as specified by
+      ${/score/mtx/req/seize-wait} where an enqueue blocks, a recursive seize
+      is allowed, and a priority ceiling is used.
+  - name: MrsPMtxSeizeTry
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_NO_STATUS;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall try to seize the mutex as specified by
+      ${/score/mtx/req/seize-try} where an enqueue is sticky, a recursive seize
+      returns an error status, and a priority ceiling is used.
+  - name: MrsPMtxSeizeWait
+    test-code: |
+      ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_NO_STATUS;
+      ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
+      ctx->tq_mtx_ctx.get_owner = TQMtxGetOwnerClassic;
+      ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
+    text: |
+      The calling task shall wait to seize the mutex as specified by
+      ${/score/mtx/req/seize-wait} where an enqueue is sticky, a recursive
+      seize returns an error status, and a priority ceiling is used.
   test-epilogue: null
   test-prologue: |
     rtems_status_code sc;
 pre-conditions:
 - name: Class
   states:
+  - name: MrsP
+    test-code: |
+      ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE |
+        RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+    text: |
+      While the semaphore object is a MrsP semaphore.
   - name: Counting
     test-code: |
       ctx->attribute_set |= RTEMS_COUNTING_SEMAPHORE;
@@ -48,6 +120,21 @@ pre-conditions:
       ctx->attribute_set |= RTEMS_SIMPLE_BINARY_SEMAPHORE;
     text: |
       While the semaphore object is a simple binary semaphore.
+  - name: Binary
+    test-code: |
+      ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE;
+    text: |
+      While the semaphore object is a binary semaphore.
+  - name: PrioCeiling
+    test-code: |
+      ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING;
+    text: |
+      While the semaphore object is a priority ceiling semaphore.
+  - name: PrioInherit
+    test-code: |
+      ctx->attribute_set |= RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY;
+    text: |
+      While the semaphore object is a priority inheritance semaphore.
   test-epilogue: null
   test-prologue: null
 - name: Discipline
@@ -111,7 +198,13 @@ pre-conditions:
 rationale: null
 references: []
 requirement-type: functional
-skip-reasons: {}
+skip-reasons:
+  NeedsPriorityDiscipline: |
+    Binary semaphores with a locking protocol are required to use the priority
+    task wait queue discipline.
+  NoMrsP: |
+    Where the system is build with SMP support disabled, the MrsP locking
+    protocol is not available.
 test-action: |
   rtems_status_code sc;
 
@@ -119,7 +212,7 @@ test-action: |
     NAME,
     1,
     ctx->attribute_set,
-    PRIO_ULTRA_HIGH,
+    PRIO_VERY_HIGH,
     &ctx->tq_ctx.thread_queue_id
   );
   T_rsc_success( sc );
@@ -136,6 +229,7 @@ test-context:
   member: |
     union {
       TQContext tq_ctx;
+      TQMtxContext tq_mtx_ctx;
       TQSemContext tq_sem_ctx;
     }
 - brief: |
@@ -151,17 +245,19 @@ test-includes:
 - string.h
 test-local-includes:
 - tx-support.h
+- tr-mtx-seize-try.h
+- tr-mtx-seize-wait.h
 - tr-sem-seize-try.h
 - tr-sem-seize-wait.h
 - tx-thread-queue.h
-test-prepare: null
+test-prepare: |
+  ctx->attribute_set = RTEMS_DEFAULT_ATTRIBUTES;
 test-setup:
   brief: null
   code: |
     memset( ctx, 0, sizeof( *ctx ) );
     ctx->tq_ctx.enqueue = TQEnqueueClassicSem;
-    ctx->tq_ctx.dequeue_one = TQDequeueClassicSem;
-    ctx->tq_ctx.dequeue_all = TQDequeueClassicSem;
+    ctx->tq_ctx.surrender = TQSurrenderClassicSem;
     ctx->tq_ctx.convert_status = TQConvertStatusClassic;
     TQInitialize( &ctx->tq_ctx );
   description: null
@@ -182,16 +278,33 @@ transition-map:
   post-conditions:
     Action: InvId
   pre-conditions:
-    Class: all
+    Class:
+    - Counting
+    - Simple
+    - Binary
     Discipline: all
     Id:
     - Invalid
     Wait: all
 - enabled-by: true
   post-conditions:
+    Action: InvId
+  pre-conditions:
+    Class:
+    - PrioCeiling
+    - PrioInherit
+    Discipline:
+    - Priority
+    Id:
+    - Invalid
+    Wait: all
+- enabled-by: true
+  post-conditions:
     Action: SemSeizeTry
   pre-conditions:
-    Class: all
+    Class:
+    - Counting
+    - Simple
     Discipline: all
     Id:
     - Valid
@@ -201,11 +314,142 @@ transition-map:
   post-conditions:
     Action: SemSeizeWait
   pre-conditions:
-    Class: all
+    Class:
+    - Counting
+    - Simple
     Discipline: all
     Id:
     - Valid
     Wait:
     - Timeout
     - Forever
+- enabled-by: true
+  post-conditions:
+    Action: MtxSeizeTry
+  pre-conditions:
+    Class:
+    - Binary
+    Discipline: all
+    Id:
+    - Valid
+    Wait:
+    - 'No'
+- enabled-by: true
+  post-conditions:
+    Action: MtxSeizeWait
+  pre-conditions:
+    Class:
+    - Binary
+    Discipline: all
+    Id:
+    - Valid
+    Wait:
+    - Timeout
+    - Forever
+- enabled-by: true
+  post-conditions:
+    Action: CeilingMtxSeizeTry
+  pre-conditions:
+    Class:
+    - PrioCeiling
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - 'No'
+- enabled-by: true
+  post-conditions:
+    Action: CeilingMtxSeizeWait
+  pre-conditions:
+    Class:
+    - PrioCeiling
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - Timeout
+    - Forever
+- enabled-by: true
+  post-conditions:
+    Action: MtxSeizeTry
+  pre-conditions:
+    Class:
+    - PrioInherit
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - 'No'
+- enabled-by: true
+  post-conditions:
+    Action: MtxSeizeWait
+  pre-conditions:
+    Class:
+    - PrioInherit
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - Timeout
+    - Forever
+- enabled-by: true
+  post-conditions: NoMrsP
+  pre-conditions:
+    Class:
+    - MrsP
+    Discipline:
+    - Priority
+    Id: all
+    Wait: all
+- enabled-by: true
+  post-conditions: NeedsPriorityDiscipline
+  pre-conditions:
+    Class:
+    - PrioCeiling
+    - PrioInherit
+    - MrsP
+    Discipline:
+    - FIFO
+    Id: all
+    Wait: all
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Action: InvId
+  pre-conditions:
+    Class:
+    - MrsP
+    Discipline:
+    - Priority
+    Id:
+    - Invalid
+    Wait: all
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Action: MrsPMtxSeizeTry
+  pre-conditions:
+    Class:
+    - MrsP
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - 'No'
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Action: MrsPMtxSeizeWait
+  pre-conditions:
+    Class:
+    - MrsP
+    Discipline:
+    - Priority
+    Id:
+    - Valid
+    Wait:
+    - Timeout
+    - Forever
 type: requirement
diff --git a/spec/rtems/sem/req/release.yml b/spec/rtems/sem/req/release.yml
index 550fffe..a55ad56 100644
--- a/spec/rtems/sem/req/release.yml
+++ b/spec/rtems/sem/req/release.yml
@@ -583,14 +583,6 @@ test-support: |
     EVENT_BUSY_WAIT = RTEMS_EVENT_6
   } Event;
 
-  static void Yield( void )
-  {
-    rtems_status_code sc;
-
-    sc = rtems_task_wake_after( RTEMS_YIELD_PROCESSOR );
-    T_rsc_success( sc );
-  }
-
   static void SynchronizeRunner( void )
   {
     rtems_event_set events;
diff --git a/spec/score/mtx/req/seize-try.yml b/spec/score/mtx/req/seize-try.yml
new file mode 100644
index 0000000..75f6b69
--- /dev/null
+++ b/spec/score/mtx/req/seize-try.yml
@@ -0,0 +1,268 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links: []
+post-conditions:
+- name: Status
+  states:
+  - name: Ok
+    test-code: |
+      T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+      TQSurrender( &ctx->tq_ctx->base );
+    text: |
+      The return status of the directive call shall be derived from
+      ${../../status/if/successful:/name}.
+  - name: Recursive
+    test-code: |
+      switch ( ctx->tq_ctx->recursive ) {
+        case TQ_MTX_RECURSIVE_YES:
+          T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+          TQSurrender( &ctx->tq_ctx->base );
+          break;
+        case TQ_MTX_RECURSIVE_NO_STATUS:
+          T_eq_int( ctx->status, Status( ctx, STATUS_DEADLOCK ) );
+          break;
+        case TQ_MTX_RECURSIVE_NO_FATAL:
+          /* TODO */
+          T_unreachable();
+          break;
+        default:
+          T_unreachable();
+          break;
+      }
+    text: |
+      Where the mutex supports a recursive seize, the return status of the
+      directive call shall be derived from ${../../status/if/successful:/name}.
+
+      Where the mutex does not support a recursive seize, where a deadlock is
+      indicated by a status code, the return status of the directive call shall
+      be derived from ${../../status/if/deadlock:/name}.
+
+      Where the mutex does not support a recursive seize, where a deadlock is
+      indicated by a fatal error, the thread queue deadlock internal error
+      shall occur.
+  - name: Unsat
+    test-code: |
+      T_eq_int( ctx->status, Status( ctx, STATUS_UNSATISFIED ) );
+    text: |
+      The return status of the directive call shall be derived from
+      ${../../status/if/unsatisfied:/name}.
+  test-epilogue: null
+  test-prologue: null
+- name: Owner
+  states:
+  - name: Nop
+    test-code: |
+      if ( ctx->owner_self ) {
+        T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
+      } else if ( ctx->owner_other ) {
+        T_eq_ptr(
+          ctx->owner_after,
+          ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+        );
+      } else {
+        T_null( ctx->owner_after );
+      }
+    text: |
+      The owner of the semaphore shall not be modified.
+  - name: New
+    test-code: |
+      T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
+    text: |
+      The owner of the semaphore shall be the calling thread.
+  test-epilogue: null
+  test-prologue: null
+- name: Priority
+  states:
+  - name: Nop
+    test-code: |
+      T_eq_u32( ctx->priority_after, ctx->priority_before );
+    text: |
+      The current priority of the calling thread shall not be modified.
+  - name: Ceiling
+    test-code: |
+      if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
+        T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
+      } else {
+        T_eq_u32( ctx->priority_after, ctx->priority_before );
+      }
+    text: |
+      Where the mutex provides a priority ceiling, the calling thread shall use
+      the priority ceiling of the mutex.
+
+      Where the mutex does not provide a priority ceiling, the current priority
+      of the calling thread shall not be modified.
+  test-epilogue: null
+  test-prologue: null
+pre-conditions:
+- name: Owner
+  states:
+  - name: 'No'
+    test-code: |
+      /* This is the default */
+    text: |
+      While the mutex has no owner.
+  - name: Self
+    test-code: |
+      ctx->owner_self = true;
+    text: |
+      While the owner of the mutex is the calling thread.
+  - name: Other
+    test-code: |
+      ctx->owner_other = true;
+    text: |
+      While the owner of the mutex is a thread other than the calling thread.
+  test-epilogue: null
+  test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+  if ( ctx->owner_self ) {
+    Status_Control status;
+
+    status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+    T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+  } else if ( ctx->owner_other ) {
+    if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+      TQSetScheduler(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        ctx->tq_ctx->base.other_scheduler_id,
+        PRIO_HIGH
+      );
+      TQSend(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_RUNNER_SYNC
+      );
+      TQSynchronizeRunner();
+    } else {
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+    }
+  }
+
+  ctx->priority_before = GetSelfPriority();
+  ctx->status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+  ctx->owner_after = TQMtxGetOwner( ctx->tq_ctx );
+  ctx->priority_after = GetSelfPriority();
+
+  if ( ctx->owner_self ) {
+    TQSurrender( &ctx->tq_ctx->base );
+  } else if ( ctx->owner_other ) {
+    if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+      TQSend(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+      );
+      TQSynchronizeRunner();
+      TQSetScheduler(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        ctx->tq_ctx->base.runner_scheduler_id,
+        PRIO_HIGH
+      );
+    } else {
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+    }
+  }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+    If this member is true, then the calling thread shall be the owner of the
+    mutex.
+  description: null
+  member: |
+    bool owner_self;
+- brief: |
+    If this member is true, then a thread other than the calling thread shall
+    be the owner of the mutex.
+  description: null
+  member: |
+    bool owner_other;
+- brief: |
+    This member contains the current priority of the calling thread before the
+    directive call.
+  description: null
+  member: |
+    rtems_task_priority priority_before;
+- brief: |
+    This member contains the return status of the directive call.
+  description: null
+  member: |
+    Status_Control status
+- brief: |
+    This member contains the owner of the mutex after the directive call.
+  description: null
+  member: |
+    const rtems_tcb *owner_after
+- brief: |
+    This member contains the current priority of the calling thread after the
+    directive call.
+  description: null
+  member: |
+    rtems_task_priority priority_after;
+test-context-support: null
+test-description: null
+test-header:
+  code: null
+  includes: []
+  local-includes:
+  - tx-thread-queue.h
+  run-params:
+  - description: |
+      is the thread queue context.
+    dir: inout
+    name: tq_ctx
+    specifier: TQMtxContext *${.:name}
+  target: testsuites/validation/tr-mtx-seize-try.h
+test-includes: []
+test-local-includes:
+- tr-mtx-seize-try.h
+test-prepare: |
+  ctx->owner_self = false;
+  ctx->owner_other = false;
+test-setup: null
+test-stop: null
+test-support: |
+  typedef ScoreMtxReqSeizeTry_Context Context;
+
+  static Status_Control Status( const Context *ctx, Status_Control status )
+  {
+    return TQConvertStatus( &ctx->tq_ctx->base, status );
+  }
+test-target: testsuites/validation/tr-mtx-seize-try.c
+test-teardown: null
+text: |
+  When the calling thread tries to seize the mutex.
+transition-map:
+- enabled-by: true
+  post-conditions:
+    Status: Ok
+    Owner: New
+    Priority: Ceiling
+  pre-conditions:
+    Owner:
+    - 'No'
+- enabled-by: true
+  post-conditions:
+    Status: Recursive
+    Owner: Nop
+    Priority: Nop
+  pre-conditions:
+    Owner:
+    - Self
+- enabled-by: true
+  post-conditions:
+    Status: Unsat
+    Owner: Nop
+    Priority: Nop
+  pre-conditions:
+    Owner:
+    - Other
+type: requirement
diff --git a/spec/score/mtx/req/seize-wait.yml b/spec/score/mtx/req/seize-wait.yml
new file mode 100644
index 0000000..724f5a6
--- /dev/null
+++ b/spec/score/mtx/req/seize-wait.yml
@@ -0,0 +1,353 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links: []
+post-conditions:
+- name: Status
+  states:
+  - name: Ok
+    test-code: |
+      T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+      TQSurrender( &ctx->tq_ctx->base );
+    text: |
+      The return status of the directive call shall be derived from
+      ${../../status/if/successful:/name}.
+  - name: Recursive
+    test-code: |
+      switch ( ctx->tq_ctx->recursive ) {
+        case TQ_MTX_RECURSIVE_YES:
+          T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
+          TQSurrender( &ctx->tq_ctx->base );
+          break;
+        case TQ_MTX_RECURSIVE_NO_STATUS:
+          T_eq_int( ctx->status, Status( ctx, STATUS_DEADLOCK ) );
+          break;
+        case TQ_MTX_RECURSIVE_NO_FATAL:
+          /* TODO */
+          T_unreachable();
+          break;
+        default:
+          T_unreachable();
+          break;
+      }
+    text: |
+      Where the mutex supports a recursive seize, the return status of the
+      directive call shall be derived from ${../../status/if/successful:/name}.
+
+      Where the mutex does not support a recursive seize, where a deadlock is
+      indicated by a status code, the return status of the directive call shall
+      be derived from ${../../status/if/deadlock:/name}.
+
+      Where the mutex does not support a recursive seize, where a deadlock is
+      indicated by a fatal error, the thread queue deadlock internal error
+      shall occur.
+  - name: Deadlock
+    test-code: |
+      ${../../tq/req/enqueue-deadlock:/test-run}( &ctx->tq_ctx->base );
+    text: |
+      The return status of the directive call shall be derived from
+      ${../../status/if/deadlock:/name} for deadlock scenarios specified by
+      ${../../tq/req/enqueue-deadlock}.
+  - name: Enqueued
+    test-code: |
+      switch ( ctx->tq_ctx->base.discipline ) {
+        case TQ_FIFO:
+          ${../../tq/req/enqueue-fifo:/test-run}( &ctx->tq_ctx->base );
+          break;
+        case TQ_PRIORITY:
+          if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
+            if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+              ${../../tq/req/enqueue-mrsp:/test-run}( &ctx->tq_ctx->base );
+            } else {
+              ${../../tq/req/enqueue-ceiling:/test-run}( &ctx->tq_ctx->base );
+            }
+          } else {
+            ${../../tq/req/enqueue-priority:/test-run}( &ctx->tq_ctx->base );
+          }
+          break;
+        default:
+          T_unreachable();
+          break;
+      }
+    text: |
+      Where the thread queue uses the FIFO discipline, the calling thread shall
+      be enqueued in FIFO order.
+
+      Where the thread queue uses the priority discipline, the calling thread
+      shall be enqueued in priority order.
+  test-epilogue: null
+  test-prologue: null
+- name: Owner
+  states:
+  - name: Nop
+    test-code: |
+      if ( ctx->owner_self ) {
+        T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
+      } else if ( ctx->owner_other ) {
+        T_eq_ptr(
+          ctx->owner_after,
+          ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+        );
+      } else {
+        T_null( ctx->owner_after );
+      }
+    text: |
+      The owner of the semaphore shall not be modified.
+  - name: New
+    test-code: |
+      T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
+    text: |
+      The owner of the semaphore shall be the calling thread.
+  test-epilogue: null
+  test-prologue: null
+- name: Priority
+  states:
+  - name: Nop
+    test-code: |
+      T_eq_u32( ctx->priority_after, ctx->priority_before );
+    text: |
+      The current priority of the calling thread shall not be modified.
+  - name: Ceiling
+    test-code: |
+      if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
+        T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
+      } else {
+        T_eq_u32( ctx->priority_after, ctx->priority_before );
+      }
+    text: |
+      Where the mutex provides a priority ceiling, the calling thread shall use
+      the priority ceiling of the mutex.
+
+      Where the mutex does not provide a priority ceiling, the current priority
+      of the calling thread shall not be modified.
+  test-epilogue: null
+  test-prologue: null
+pre-conditions:
+- name: Owner
+  states:
+  - name: 'No'
+    test-code: |
+      /* This is the default */
+    text: |
+      While the mutex has no owner.
+  - name: Self
+    test-code: |
+      ctx->owner_self = true;
+    text: |
+      While the owner of the mutex is the calling thread.
+  - name: Other
+    test-code: |
+      ctx->owner_other = true;
+    text: |
+      While the owner of the mutex is a thread other than the calling thread.
+  - name: Deadlock
+    test-code: |
+      ctx->deadlock = true;
+    text: |
+      While the attempt to seize the mutex results in a deadlock.
+  test-epilogue: null
+  test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+  if ( !ctx->deadlock ) {
+    NonDeadlockAction( ctx );
+  }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+    If this member is true, then the calling thread shall be the owner of the
+    mutex.
+  description: null
+  member: |
+    bool owner_self;
+- brief: |
+    If this member is true, then a thread other than the calling thread shall
+    be the owner of the mutex.
+  description: null
+  member: |
+    bool owner_other;
+- brief: |
+    If this member is true, then a deadlock shall occur.
+  description: null
+  member: |
+    bool deadlock;
+- brief: |
+    This member contains the current priority of the calling thread before the
+    directive call.
+  description: null
+  member: |
+    rtems_task_priority priority_before;
+- brief: |
+    This member contains the return status of the directive call.
+  description: null
+  member: |
+    Status_Control status
+- brief: |
+    This member contains the owner of the mutex after the directive call.
+  description: null
+  member: |
+    const rtems_tcb *owner_after
+- brief: |
+    This member contains the current priority of the calling thread after the
+    directive call.
+  description: null
+  member: |
+    rtems_task_priority priority_after;
+test-context-support: null
+test-description: null
+test-header:
+  code: null
+  includes: []
+  local-includes:
+  - tx-thread-queue.h
+  run-params:
+  - description: |
+      is the thread queue context.
+    dir: inout
+    name: tq_ctx
+    specifier: TQMtxContext *${.:name}
+  target: testsuites/validation/tr-mtx-seize-wait.h
+test-includes:
+- rtems/score/statesimpl.h
+test-local-includes:
+- tr-mtx-seize-wait.h
+- tr-tq-enqueue-ceiling.h
+- tr-tq-enqueue-deadlock.h
+- tr-tq-enqueue-fifo.h
+- tr-tq-enqueue-mrsp.h
+- tr-tq-enqueue-priority.h
+test-prepare: |
+  ctx->tq_ctx->base.enqueue_prepare = TQEnqueuePrepareClassicSem;
+  ctx->tq_ctx->base.enqueue_done = TQSurrenderClassicSem;
+  ctx->tq_ctx->base.get_properties = GetProperties;
+  ctx->owner_self = false;
+  ctx->owner_other = false;
+  ctx->deadlock = false;
+test-setup: null
+test-stop: null
+test-support: |
+  typedef ScoreMtxReqSeizeWait_Context Context;
+
+  static Status_Control Status( const Context *ctx, Status_Control status )
+  {
+    return TQConvertStatus( &ctx->tq_ctx->base, status );
+  }
+
+  static void GetProperties( TQContext *base, TQWorkerKind enqueued_worker )
+  {
+    TQMtxContext        *ctx;
+    T_thread_timer_state timer_state;
+
+    ctx = (TQMtxContext *) base;
+    T_eq_u32(
+      ctx->base.worker_tcb[ enqueued_worker ]->current_state,
+      STATES_WAITING_FOR_MUTEX
+    );
+
+    timer_state = T_get_thread_timer_state(
+      ctx->base.worker_id[ enqueued_worker ]
+    );
+
+    if ( base->wait == TQ_WAIT_TICKS ) {
+      T_eq_int( timer_state, T_THREAD_TIMER_SCHEDULED );
+    } else {
+      T_eq_int( timer_state, T_THREAD_TIMER_INACTIVE );
+    }
+  }
+
+  static void NonDeadlockAction( Context *ctx )
+  {
+    if ( ctx->owner_self ) {
+      Status_Control status;
+
+      status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+      T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+    } else if ( ctx->owner_other ) {
+      if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+        TQSetScheduler(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_A,
+          ctx->tq_ctx->base.other_scheduler_id,
+          PRIO_HIGH
+        );
+        TQSend(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_A,
+          TQ_EVENT_ENQUEUE | TQ_EVENT_RUNNER_SYNC
+        );
+        TQSynchronizeRunner();
+      } else {
+        TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+      }
+    }
+
+    ctx->priority_before = GetSelfPriority();
+    ctx->status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
+    ctx->owner_after = TQMtxGetOwner( ctx->tq_ctx );
+    ctx->priority_after = GetSelfPriority();
+
+    if ( ctx->owner_self ) {
+      TQSurrender( &ctx->tq_ctx->base );
+    } else if ( ctx->owner_other ) {
+      if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+        TQSend(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_A,
+          TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+        );
+        TQSynchronizeRunner();
+        TQSetScheduler(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_A,
+          ctx->tq_ctx->base.runner_scheduler_id,
+          PRIO_HIGH
+        );
+      } else {
+        TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+      }
+    }
+  }
+test-target: testsuites/validation/tr-mtx-seize-wait.c
+test-teardown: null
+text: |
+  When the calling thread tries to seize the mutex.
+transition-map:
+- enabled-by: true
+  post-conditions:
+    Status: Ok
+    Owner: New
+    Priority: Ceiling
+  pre-conditions:
+    Owner:
+    - 'No'
+- enabled-by: true
+  post-conditions:
+    Status: Recursive
+    Owner: Nop
+    Priority: Nop
+  pre-conditions:
+    Owner:
+    - Self
+- enabled-by: true
+  post-conditions:
+    Status: Enqueued
+    Owner: Nop
+    Priority: Nop
+  pre-conditions:
+    Owner:
+    - Other
+- enabled-by: true
+  post-conditions:
+    Status: Deadlock
+    Owner: N/A
+    Priority: N/A
+  pre-conditions:
+    Owner:
+    - Deadlock
+type: requirement
diff --git a/spec/score/sem/req/seize-try.yml b/spec/score/sem/req/seize-try.yml
index f830d87..8c0377e 100644
--- a/spec/score/sem/req/seize-try.yml
+++ b/spec/score/sem/req/seize-try.yml
@@ -42,12 +42,12 @@ pre-conditions:
     test-code: |
       ctx->count_before = 0;
     text: |
-      The count of the semaphore shall be zero.
+      While the count of the semaphore is zero.
   - name: Positive
     test-code: |
       ctx->count_before = 1;
     text: |
-      The count of the semaphore shall be greater than zero.
+      While the count of the semaphore is greater than zero.
   test-epilogue: null
   test-prologue: null
 rationale: null
diff --git a/spec/score/sem/req/seize-wait.yml b/spec/score/sem/req/seize-wait.yml
index 2cc2fdb..bbc3ae9 100644
--- a/spec/score/sem/req/seize-wait.yml
+++ b/spec/score/sem/req/seize-wait.yml
@@ -16,14 +16,23 @@ post-conditions:
       ${../../status/if/successful:/name}.
   - name: Enqueued
     test-code: |
-      if ( ctx->tq_ctx->base.discipline == TQ_FIFO ) {
-        ${../../tq/req/enqueue-fifo:/test-run}( &ctx->tq_ctx->base );
-      } else {
-        ${../../tq/req/enqueue-priority:/test-run}( &ctx->tq_ctx->base );
+      switch ( ctx->tq_ctx->base.discipline ) {
+        case TQ_FIFO:
+          ${../../tq/req/enqueue-fifo:/test-run}( &ctx->tq_ctx->base );
+          break;
+        case TQ_PRIORITY:
+          ${../../tq/req/enqueue-priority:/test-run}( &ctx->tq_ctx->base );
+          break;
+        default:
+          T_unreachable();
+          break;
       }
     text: |
-      The calling task shall be enqueued in the specified order on the thread
-      queue used by the semaphore.
+      Where the thread queue uses the FIFO discipline, the calling thread shall
+      be enqueued in FIFO order.
+
+      Where the thread queue uses the priority discipline, the calling thread
+      shall be enqueued in priority order.
   test-epilogue: null
   test-prologue: |
     Status_Control status;
@@ -67,14 +76,14 @@ pre-conditions:
   states:
   - name: Zero
     test-code: |
-      /* Done by Prepare() */
+      /* Done by TQEnqueuePrepareClassicSem() */
     text: |
-      The count of the semaphore shall be zero.
+      While the count of the semaphore is zero.
   - name: Positive
     test-code: |
       TQSemSetCount( ctx->tq_ctx, 1 );
     text: |
-      The count of the semaphore shall be greater than zero.
+      While the count of the semaphore is greater than zero.
   test-epilogue: null
   test-prologue: null
 rationale: null
@@ -107,8 +116,8 @@ test-local-includes:
 - tr-tq-enqueue-fifo.h
 - tr-tq-enqueue-priority.h
 test-prepare: |
-  ctx->tq_ctx->base.prepare = Prepare;
-  ctx->tq_ctx->base.cleanup = Cleanup;
+  ctx->tq_ctx->base.enqueue_prepare = TQEnqueuePrepareClassicSem;
+  ctx->tq_ctx->base.enqueue_done = TQSurrenderClassicSem;
   ctx->tq_ctx->base.get_properties = GetProperties;
 test-setup: null
 test-stop: null
@@ -120,22 +129,6 @@ test-support: |
     return TQConvertStatus( &ctx->tq_ctx->base, status );
   }
 
-  static void Prepare( TQContext *base )
-  {
-    TQSemContext *ctx;
-
-    ctx = (TQSemContext *) base;
-    TQSemSetCount( ctx, 0 );
-  }
-
-  static void Cleanup( TQContext *base )
-  {
-    TQSemContext *ctx;
-
-    ctx = (TQSemContext *) base;
-    TQSemSetCount( ctx, 1 );
-  }
-
   static void GetProperties( TQContext *base, TQWorkerKind enqueued_worker )
   {
     TQSemContext        *ctx;
diff --git a/spec/score/status/if/deadlock.yml b/spec/score/status/if/deadlock.yml
new file mode 100644
index 0000000..93f7eac
--- /dev/null
+++ b/spec/score/status/if/deadlock.yml
@@ -0,0 +1,12 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+index-entries: []
+interface-type: unspecified
+links:
+- role: interface-placement
+  uid: header
+name: STATUS_DEADLOCK
+reference: null
+type: interface
diff --git a/spec/score/tq/req/enqueue-ceiling.yml b/spec/score/tq/req/enqueue-ceiling.yml
new file mode 100644
index 0000000..6b13689
--- /dev/null
+++ b/spec/score/tq/req/enqueue-ceiling.yml
@@ -0,0 +1,429 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links: []
+post-conditions:
+- name: Position
+  states:
+  - name: InitialFirst
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      A priority queue associated with the scheduler which contains exactly the
+      enqueueing thread shall be created as the first priority queue of the
+      thread queue.
+  - name: First
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+  - name: Second
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+  - name: FirstFirst
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+
+      The position of the priority queue in the thread queue shall not change.
+  - name: SecondFirst
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+
+      The position of the priority queue in the thread queue shall not change.
+  - name: SecondQueue
+    test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), NULL );
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+
+      The position of the priority queue in the thread queue shall not change.
+  test-epilogue: null
+  test-prologue: |
+    size_t i;
+
+    i = 0;
+
+    /* Event receives */
+    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+pre-conditions:
+- name: EligibleScheduler
+  states:
+  - name: Home
+    test-code: |
+      ctx->helping = false;
+    text: |
+      While the enqueueing thread has no helping scheduler.
+  - name: Helping
+    test-code: |
+      ctx->helping = true;
+    text: |
+      While the enqueueing thread has at least one helping scheduler.
+  test-epilogue: null
+  test-prologue: null
+- name: QueueEligible
+  states:
+  - name: None
+    test-code: |
+      /* This is the default */
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is empty.
+  - name: EQ
+    test-code: |
+      ctx->priority = PRIO_VERY_HIGH;
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is non-empty, while at least one
+      thread of this priority queue has a priority equal to the priority of the
+      enqueueing thread with respect to this scheduler.
+  - name: GT
+    test-code: |
+      ctx->priority = PRIO_HIGH;
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is non-empty, while at least one
+      thread of this priority queue has a priority greater than the priority of
+      the enqueueing thread with respect to this scheduler.
+  test-epilogue: null
+  test-prologue: null
+- name: QueueIneligible
+  states:
+  - name: None
+    test-code: |
+      /* This is the default */
+    text: |
+      While no priority queue of the thread queue exists which is not
+      associated with an eligible scheduler of the enqueueing thread.
+  - name: Before
+    test-code: |
+      ctx->other_before = true;
+    text: |
+      While a priority queue of the thread queue exists which is not
+      associated with an eligible scheduler of the enqueueing thread, while
+      this priority queue is positioned before all priority queues which are
+      associated with eligible schedulers of the enqueueing thread.
+  - name: After
+    test-code: |
+      ctx->other_after = true;
+    text: |
+      While a priority queue of the thread queue exists which is not associated
+      with an eligible scheduler of the enqueueing thread, while this priority
+      queue is positioned after all priority queues which are associated with
+      eligible schedulers of the enqueueing thread.
+  test-epilogue: null
+  test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+  Invalid: |
+    These variants are invalid due to two independent reasons.  Firstly,
+    where the system was built with SMP support disabled, no other scheduler
+    can exist.  Secondly, a priority queue must be present to have another
+    priority queue positioned before or after the priority queue.
+test-action: |
+  Status_Control status;
+
+  if ( ctx->priority == PRIO_PSEUDO_ISR ) {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+  } else {
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B , ctx->priority );
+
+    if ( ctx->other_before || ctx->other_after ) {
+      TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_OBTAIN );
+      TQSendAndWaitForExecutionStop(
+        ctx->tq_ctx,
+        TQ_BLOCKER_D,
+        TQ_EVENT_MUTEX_B_OBTAIN | TQ_EVENT_MUTEX_B_RELEASE |
+          TQ_EVENT_RUNNER_SYNC
+      );
+
+      if ( ctx->other_before ) {
+        TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE );
+      }
+
+      TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+      TQSend(
+        ctx->tq_ctx,
+        TQ_BLOCKER_B,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+      );
+
+      if ( ctx->other_before ) {
+        TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_SURRENDER );
+      }
+    } else {
+      TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+      TQSend(
+        ctx->tq_ctx,
+        TQ_BLOCKER_B,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+      );
+    }
+  }
+
+  if ( ctx->helping ) {
+    if ( ctx->other_before || ctx->other_after ) {
+      if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+        AddHelper( ctx->tq_ctx, ctx->tq_ctx->third_scheduler_id );
+      }
+    } else {
+      AddHelper( ctx->tq_ctx, ctx->tq_ctx->other_scheduler_id );
+    }
+  }
+
+  TQSchedulerRecordStart( ctx->tq_ctx );
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+  status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+  T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+  TQSchedulerRecordStop( ctx->tq_ctx );
+  TQSurrender( ctx->tq_ctx );
+
+  if ( ctx->other_before || ctx->other_after ) {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_RELEASE );
+    TQSynchronizeRunner();
+  }
+
+  if ( ctx->helping ) {
+    if ( ctx->other_before || ctx->other_after ) {
+      if ( rtems_scheduler_get_processor_maximum() > 2 ) {
+        RemoveHelper( ctx->tq_ctx );
+      }
+    } else {
+      RemoveHelper( ctx->tq_ctx );
+    }
+  }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+    This this member is true, then the enqueueing thread shall have at least
+    one helping scheduler.
+  description: null
+  member: |
+    bool helping
+- brief: |
+    This member specifies the priority of a thread with an eligible scheduler
+    equal to an eligible scheduler of the enqueueing thread.
+  description: null
+  member: |
+    rtems_task_priority priority;
+- brief: |
+    If this member is true, then a thread those eligible schedulers are
+    ineligible scheduler to the enqueueing task should be enqueued before a
+    thread with an eligible scheduler equal to an eligible scheduler of the
+    enqueueing thread.
+  description: null
+  member: |
+    size_t other_before;
+- brief: |
+    If this member is true, then a thread those eligible schedulers are
+    ineligible scheduler to the enqueueing task should be enqueued after a
+    thread with an eligible scheduler equal to an eligible scheduler of the
+    enqueueing thread.
+  description: null
+  member: |
+    size_t other_after;
+test-context-support: null
+test-description: null
+test-header:
+  code: null
+  includes: []
+  local-includes:
+  - tx-thread-queue.h
+  run-params:
+  - description: |
+      is the thread queue context.
+    dir: inout
+    name: tq_ctx
+    specifier: TQContext *${.:name}
+  target: testsuites/validation/tr-tq-enqueue-ceiling.h
+test-includes: []
+test-local-includes:
+- tr-tq-enqueue-ceiling.h
+test-prepare:
+  ctx->priority = PRIO_PSEUDO_ISR;
+  ctx->other_before = false;
+  ctx->other_after = false;
+test-setup:
+  brief: null
+  code: |
+    rtems_id scheduler_id;
+
+    scheduler_id = ctx->tq_ctx->runner_scheduler_id;
+    TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_A, scheduler_id, PRIO_VERY_HIGH );
+    TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_B, scheduler_id, PRIO_VERY_HIGH );
+    TQSetScheduler( ctx->tq_ctx, TQ_BLOCKER_C, scheduler_id, PRIO_VERY_HIGH );
+    #if defined( RTEMS_SMP )
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_D,
+      ctx->tq_ctx->other_scheduler_id,
+      PRIO_LOW
+    );
+    #endif
+  description: null
+test-stop: null
+test-support: |
+  typedef ScoreTqReqEnqueueCeiling_Context Context;
+
+  static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
+  {
+    const rtems_tcb *thread;
+
+    do {
+      thread = TQGetNextUnblock( ctx->tq_ctx, index )->thread;
+    } while ( thread == ctx->tq_ctx->runner_tcb );
+
+    return thread;
+  }
+
+  static const rtems_tcb *GetTCB( Context *ctx, TQWorkerKind worker )
+  {
+    return ctx->tq_ctx->worker_tcb[ worker ];
+  }
+
+  static void AddHelper( TQContext *tq_ctx, rtems_id scheduler_id )
+  {
+    TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
+    TQSetScheduler( tq_ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+    TQSendAndWaitForExecutionStop(
+      tq_ctx,
+      TQ_BLOCKER_E,
+      TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
+    );
+  }
+
+  static void RemoveHelper( TQContext *tq_ctx )
+  {
+    TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
+    TQMutexObtain( tq_ctx, TQ_MUTEX_A );
+    TQMutexRelease( tq_ctx, TQ_MUTEX_A );
+  }
+test-target: testsuites/validation/tr-tq-enqueue-ceiling.c
+test-teardown:
+  brief: null
+  code: |
+    TQReset( ctx->tq_ctx );
+  description: null
+text: |
+  When the calling thread is enqueued on the thread queue.
+transition-map:
+- enabled-by: true
+  post-conditions:
+    Position: InitialFirst
+  pre-conditions:
+    EligibleScheduler:
+    - Home
+    QueueEligible:
+    - None
+    QueueIneligible:
+    - None
+- enabled-by: true
+  post-conditions:
+    Position: First
+  pre-conditions:
+    EligibleScheduler:
+    - Home
+    QueueEligible:
+    - GT
+    QueueIneligible:
+    - None
+- enabled-by: true
+  post-conditions:
+    Position: Second
+  pre-conditions:
+    EligibleScheduler:
+    - Home
+    QueueEligible:
+    - EQ
+    QueueIneligible:
+    - None
+- enabled-by: true
+  post-conditions: Invalid
+  pre-conditions: default
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: InitialFirst
+  pre-conditions:
+    EligibleScheduler:
+    - Helping
+    QueueEligible:
+    - None
+    QueueIneligible:
+    - None
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: First
+  pre-conditions:
+    EligibleScheduler:
+    - Helping
+    QueueEligible:
+    - GT
+    QueueIneligible:
+    - None
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: Second
+  pre-conditions:
+    EligibleScheduler:
+    - Helping
+    QueueEligible:
+    - EQ
+    QueueIneligible:
+    - None
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: SecondQueue
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - EQ
+    - GT
+    QueueIneligible:
+    - Before
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: FirstFirst
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - GT
+    QueueIneligible:
+    - After
+- enabled-by: RTEMS_SMP
+  post-conditions:
+    Position: SecondFirst
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - EQ
+    QueueIneligible:
+    - After
+type: requirement
diff --git a/spec/score/tq/req/enqueue-deadlock.yml b/spec/score/tq/req/enqueue-deadlock.yml
new file mode 100644
index 0000000..14008ca
--- /dev/null
+++ b/spec/score/tq/req/enqueue-deadlock.yml
@@ -0,0 +1,142 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links: []
+post-conditions:
+- name: Status
+  states:
+  - name: Deadlock
+    test-code: |
+      /* Checked by action */
+    text: |
+      The return status of the directive call shall be derived from
+      ${../../status/if/deadlock:/name}.
+  test-epilogue: null
+  test-prologue: null
+pre-conditions:
+- name: Deadlock
+  states:
+  - name: One
+    test-code: |
+      ctx->more = false;
+    text: |
+      While the owner of the thread queue is enqueued on another thread queue
+      owned by the calling thread.
+  - name: More
+    test-code: |
+      ctx->more = true;
+    text: |
+      While the owner of the thread queue is enqueued on another thread queue
+      owned by a thread other than the calling thread, and so on, while the
+      owner of the last thread queue of this dependency chain is enqueued on a
+      thread queue owned by the calling thread.
+  test-epilogue: null
+  test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons: {}
+test-action: |
+  Status_Control status;
+
+  TQMutexObtain( ctx->tq_ctx, TQ_MUTEX_A );
+
+  if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->other_scheduler_id,
+      PRIO_HIGH
+    );
+  }
+
+  TQSendAndWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+
+  if ( ctx->more ) {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_OBTAIN );
+    TQSendAndWaitForExecutionStop(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      TQ_EVENT_MUTEX_B_OBTAIN
+    );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_OBTAIN );
+  } else {
+    TQSendAndWaitForExecutionStop(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      TQ_EVENT_MUTEX_A_OBTAIN
+    );
+  }
+
+  status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+  T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_DEADLOCK ) );
+
+  TQMutexRelease( ctx->tq_ctx, TQ_MUTEX_A );
+
+  if ( ctx->more ) {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_A_RELEASE );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_B_RELEASE );
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_B_RELEASE );
+  } else {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_MUTEX_A_RELEASE );
+  }
+
+  if ( ctx->tq_ctx->enqueue_variant == TQ_ENQUEUE_STICKY ) {
+    TQSend(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+    );
+    TQSynchronizeRunner();
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->runner_scheduler_id,
+      PRIO_HIGH
+    );
+  } else {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
+  }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+    If this member is true, then more than one mutex shall be used for the
+    deadlock scenario.
+  description: null
+  member: |
+    bool more
+test-context-support: null
+test-description: null
+test-header:
+  code: null
+  includes: []
+  local-includes:
+  - tx-thread-queue.h
+  run-params:
+  - description: |
+      is the thread queue context.
+    dir: inout
+    name: tq_ctx
+    specifier: TQContext *${.:name}
+  target: testsuites/validation/tr-tq-enqueue-deadlock.h
+test-includes: []
+test-local-includes:
+- tr-tq-enqueue-deadlock.h
+test-prepare: null
+test-setup: null
+test-stop: null
+test-support: null
+test-target: testsuites/validation/tr-tq-enqueue-deadlock.c
+test-teardown: null
+text: |
+  When the calling thread attempts to be enqueued on the thread queue.
+transition-map:
+- enabled-by: true
+  post-conditions:
+    Status: Deadlock
+  pre-conditions:
+    Deadlock: all
+type: requirement
diff --git a/spec/score/tq/req/enqueue-fifo.yml b/spec/score/tq/req/enqueue-fifo.yml
index 6bd62f4..87c493f 100644
--- a/spec/score/tq/req/enqueue-fifo.yml
+++ b/spec/score/tq/req/enqueue-fifo.yml
@@ -9,14 +9,14 @@ post-conditions:
   states:
   - name: First
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The thread shall be the first thread in the queue.
   - name: Last
     test-code: |
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The thread shall be the last thread in the queue.
@@ -26,26 +26,20 @@ post-conditions:
 
     i = 0;
 
-    /* Event receive */
+    /* Event receives */
+    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
     T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
-
-    /* Enqueue */
-    T_eq_ptr( GetBlock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
 pre-conditions:
 - name: Queue
   states:
   - name: Empty
     test-code: |
-      /* This is the default */
+      ctx->tq_ctx->how_many = 1;
     text: |
       While the queue is empty.
   - name: NonEmpty
     test-code: |
-      TQSend(
-        ctx->tq_ctx,
-        TQ_BLOCKER_B,
-        TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE
-      );
+      ctx->tq_ctx->how_many = 2;
     text: |
       While the queue is non-empty.
   test-epilogue: null
@@ -55,13 +49,18 @@ references: []
 requirement-type: functional
 skip-reasons: {}
 test-action: |
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
+  if ( ctx->tq_ctx->how_many >= 2 ) {
+    TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+  }
+
   TQSchedulerRecordStart( ctx->tq_ctx );
-  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE );
-  TQDequeueAll( ctx->tq_ctx );
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_C, TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER );
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
   TQSchedulerRecordStop( ctx->tq_ctx );
 test-brief: null
-test-cleanup: |
-  TQCleanup( ctx->tq_ctx );
+test-cleanup: null
 test-context: []
 test-context-support: null
 test-description: null
@@ -72,7 +71,7 @@ test-header:
   - tx-thread-queue.h
   run-params:
   - description: |
-      is the thread queue context.
+      is the thread queue test context.
     dir: inout
     name: tq_ctx
     specifier: TQContext *${.:name}
@@ -80,18 +79,18 @@ test-header:
 test-includes: []
 test-local-includes:
 - tr-tq-enqueue-fifo.h
-test-prepare: |
-  TQPrepare( ctx->tq_ctx );
-test-setup: null
+test-prepare: null
+test-setup:
+  brief: null
+  code: |
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_VERY_HIGH );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_HIGH );
+  description: null
 test-stop: null
 test-support: |
   typedef ScoreTqReqEnqueueFifo_Context Context;
 
-  static const rtems_tcb *GetBlock( Context *ctx, size_t *index )
-  {
-    return TQGetNextBlock( ctx->tq_ctx, index )->thread;
-  }
-
   static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
   {
     return TQGetNextUnblock( ctx->tq_ctx, index )->thread;
@@ -102,9 +101,13 @@ test-support: |
     return ctx->tq_ctx->worker_tcb[ worker ];
   }
 test-target: testsuites/validation/tr-tq-enqueue-fifo.c
-test-teardown: null
+test-teardown:
+  brief: null
+  code: |
+    TQReset( ctx->tq_ctx );
+  description: null
 text: |
-  When the calling task is enqueued on the thread queue.
+  When the calling thread is enqueued on the thread queue.
 transition-map:
 - enabled-by: true
   post-conditions:
diff --git a/spec/score/tq/req/enqueue-mrsp.yml b/spec/score/tq/req/enqueue-mrsp.yml
new file mode 100644
index 0000000..77d388a
--- /dev/null
+++ b/spec/score/tq/req/enqueue-mrsp.yml
@@ -0,0 +1,439 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: true
+functional-type: action
+links: []
+post-conditions:
+- name: Position
+  states:
+  - name: InitialFirst
+    test-code: |
+      T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+      T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+    text: |
+      A priority queue associated with the scheduler which contains exactly the
+      enqueueing thread shall be created as the first priority queue of the
+      thread queue.
+  - name: InitialLast
+    test-code: |
+      if ( CanDoFullValidation() ) {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+        T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 2, TQGetCounter( ctx->tq_ctx ) );
+      } else {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+      }
+    text: |
+      A priority queue associated with the scheduler which contains exactly the
+      enqueueing thread shall be created as the last priority queue of the
+      thread queue.
+  - name: Second
+    test-code: |
+      if ( CanDoFullValidation() ) {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+        T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 2, TQGetCounter( ctx->tq_ctx ) );
+      } else {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+      }
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+  - name: SecondFirst
+    test-code: |
+      if ( CanDoFullValidation() ) {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+        T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+        T_eq_u32( 3, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 3, TQGetCounter( ctx->tq_ctx ) );
+      } else {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+      }
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+
+      The position of the priority queue in the thread queue shall not change.
+  - name: SecondLast
+    test-code: |
+      if ( CanDoFullValidation() ) {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_C ) );
+        T_eq_u32( 2, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_B ) );
+        T_eq_u32( 3, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 3, TQGetCounter( ctx->tq_ctx ) );
+      } else {
+        T_eq_u32( 1, TQGetWorkerCounter( ctx->tq_ctx, TQ_BLOCKER_A ) );
+        T_eq_u32( 1, TQGetCounter( ctx->tq_ctx ) );
+      }
+    text: |
+      The enqueueing thread shall be enqueued in the priority queue associated
+      with the scheduler.
+
+      The position of the priority queue in the thread queue shall not change.
+  test-epilogue: null
+  test-prologue: |
+    size_t i;
+
+    i = 0;
+
+    /* The enqueue is sticky, so no enqueued thread is blocked by the scheduler */
+    T_null( TQGetNextUnblock( ctx->tq_ctx, &i )->thread );
+pre-conditions:
+- name: EligibleScheduler
+  states:
+  - name: Home
+    test-code: |
+      ctx->helping = false;
+    text: |
+      While the enqueueing thread has no helping scheduler.
+  - name: Helping
+    test-code: |
+      ctx->helping = true;
+    text: |
+      While the enqueueing thread has at least one helping scheduler.
+  test-epilogue: null
+  test-prologue: null
+- name: QueueEligible
+  states:
+  - name: None
+    test-code: |
+      ctx->priority = PRIO_PSEUDO_ISR;
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is empty.
+  - name: EQ
+    test-code: |
+      ctx->priority = PRIO_VERY_HIGH;
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is non-empty, while at least one
+      thread of this priority queue has a priority equal to the priority of the
+      enqueueing thread with respect to this scheduler.
+  - name: GT
+    test-code: |
+      ctx->priority = PRIO_HIGH;
+    text: |
+      While the priority queue of the thread queue associated with an eligible
+      scheduler of the enqueueing thread is non-empty, while at least one
+      thread of this priority queue has a priority greater than the priority of
+      the enqueueing thread with respect to this scheduler.
+  test-epilogue: null
+  test-prologue: null
+- name: QueueIneligible
+  states:
+  - name: None
+    test-code: |
+      ctx->other_before = false;
+      ctx->other_after = false;
+    text: |
+      While no priority queue of the thread queue exists which is not
+      associated with an eligible scheduler of the enqueueing thread.
+  - name: Only
+    test-code: |
+      ctx->other_before = true;
+      ctx->other_after = false;
+    text: |
+    text: |
+      While exactly one priority queue of the thread queue exists which is not
+      associated with an eligible scheduler of the enqueueing thread.
+  - name: Before
+    test-code: |
+      ctx->other_before = true;
+      ctx->other_after = false;
+    text: |
+    text: |
+      While a priority queue of the thread queue exists which is not
+      associated with an eligible scheduler of the enqueueing thread, while
+      this priority queue is positioned before all priority queues which are
+      associated with eligible schedulers of the enqueueing thread.
+  - name: After
+    test-code: |
+      ctx->other_before = false;
+      ctx->other_after = true;
+    text: |
+      While a priority queue of the thread queue exists which is not associated
+      with an eligible scheduler of the enqueueing thread, while this priority
+      queue is positioned after all priority queues which are associated with
+      eligible schedulers of the enqueueing thread.
+  test-epilogue: null
+  test-prologue: null
+rationale: null
+references: []
+requirement-type: functional
+skip-reasons:
+  Invalid: |
+    These variants are invalid due to two independent reasons.  Firstly, a
+    priority queue must be present to have another priority queue positioned
+    before or after the priority queue.  Secondly, if only one priority queue
+    shall be present, then on other priority queue can exist.
+test-action: |
+  Status_Control status;
+
+  TQResetCounter( ctx->tq_ctx );
+  TQClearDone( ctx->tq_ctx, TQ_BLOCKER_A );
+  TQClearDone( ctx->tq_ctx, TQ_BLOCKER_B );
+  TQClearDone( ctx->tq_ctx, TQ_BLOCKER_C );
+
+  status = TQEnqueue( ctx->tq_ctx, TQ_WAIT_FOREVER );
+  T_eq_int( status, TQConvertStatus( ctx->tq_ctx, STATUS_SUCCESSFUL ) );
+
+  if ( ctx->helping ) {
+    TQSend(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_RUNNER_SYNC
+    );
+    TQSynchronizeRunner();
+    TQSend(
+      ctx->tq_ctx,
+      TQ_BLOCKER_D,
+      TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE |
+        TQ_EVENT_RUNNER_SYNC_2
+    );
+  }
+
+  if ( CanDoFullValidation() ) {
+    if ( ctx->other_before ) {
+      TQSendAndWaitForIntendToBlock(
+        ctx->tq_ctx,
+        TQ_BLOCKER_C,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+      );
+    }
+
+    if ( ctx->priority != PRIO_PSEUDO_ISR ) {
+      TQSendAndWaitForIntendToBlock(
+        ctx->tq_ctx,
+        TQ_BLOCKER_B,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+      );
+    }
+
+    if ( ctx->other_after ) {
+      TQSendAndWaitForIntendToBlock(
+        ctx->tq_ctx,
+        TQ_BLOCKER_C,
+        TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+      );
+    }
+  }
+
+  TQSendAndWaitForIntendToBlock(
+    ctx->tq_ctx,
+    TQ_BLOCKER_A,
+    TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
+  );
+
+  TQSchedulerRecordStart( ctx->tq_ctx );
+  TQSurrender( ctx->tq_ctx );
+  TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_A );
+
+  if ( CanDoFullValidation() ) {
+    if ( ctx->priority != PRIO_PSEUDO_ISR ) {
+      TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_B );
+    }
+
+    if ( ctx->other_before || ctx->other_after ) {
+      TQWaitForDone( ctx->tq_ctx, TQ_BLOCKER_C );
+    }
+  }
+
+  TQSchedulerRecordStop( ctx->tq_ctx );
+
+  if ( ctx->helping ) {
+    TQSend(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      TQ_EVENT_MUTEX_A_RELEASE | TQ_EVENT_RUNNER_SYNC
+    );
+    TQSynchronizeRunner2();
+  }
+test-brief: null
+test-cleanup: null
+test-context:
+- brief: |
+    This this member is true, then the enqueueing thread shall have at least
+    one helping scheduler which is an ineligible scheduler for the already
+    enqueued threads.
+  description: null
+  member: |
+    bool helping
+- brief: |
+    This member specifies the priority of an already enqueued thread with an
+    eligible scheduler equal to an eligible scheduler of the enqueueing thread.
+  description: null
+  member: |
+    rtems_task_priority priority;
+- brief: |
+    If this member is true, then a thread those eligible schedulers are
+    ineligible scheduler to the enqueueing task should be enqueued before a
+    thread with an eligible scheduler equal to an eligible scheduler of the
+    enqueueing thread.
+  description: null
+  member: |
+    size_t other_before;
+- brief: |
+    If this member is true, then a thread those eligible schedulers are
+    ineligible scheduler to the enqueueing task should be enqueued after a
+    thread with an eligible scheduler equal to an eligible scheduler of the
+    enqueueing thread.
+  description: null
+  member: |
+    size_t other_after;
+test-context-support: null
+test-description: null
+test-header:
+  code: null
+  includes: []
+  local-includes:
+  - tx-thread-queue.h
+  run-params:
+  - description: |
+      is the thread queue test context.
+    dir: inout
+    name: tq_ctx
+    specifier: TQContext *${.:name}
+  target: testsuites/validation/tr-tq-enqueue-mrsp.h
+test-includes: []
+test-local-includes:
+- tr-tq-enqueue-mrsp.h
+test-prepare: null
+test-setup:
+  brief: null
+  code: |
+    if ( CanDoFullValidation() ) {
+      rtems_status_code sc;
+
+      sc = rtems_scheduler_remove_processor(
+        ctx->tq_ctx->third_scheduler_id,
+        2
+      );
+      T_rsc_success( sc );
+
+      sc = rtems_scheduler_add_processor(
+        ctx->tq_ctx->other_scheduler_id,
+        2
+      );
+      T_rsc_success( sc );
+
+      TQSetScheduler(
+        ctx->tq_ctx,
+        TQ_BLOCKER_C,
+        ctx->tq_ctx->third_scheduler_id,
+        PRIO_LOW
+      );
+    }
+
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->other_scheduler_id,
+      PRIO_LOW
+    );
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_B,
+      ctx->tq_ctx->other_scheduler_id,
+      PRIO_LOW
+    );
+
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_D,
+      ctx->tq_ctx->runner_scheduler_id,
+      PRIO_ULTRA_HIGH
+    );
+  description: null
+test-stop: null
+test-support: |
+  /*
+   * The MrsP locking protocol uses a sticky thread queue enqueue.  This means
+   * that threads waiting for the mutex ownership perform a busy wait and thus
+   * occupy the processor.  For a full validation we need at least four
+   * processors.
+   */
+  static bool CanDoFullValidation( void )
+  {
+    return rtems_scheduler_get_processor_maximum() >= 4;
+  }
+test-target: testsuites/validation/tr-tq-enqueue-mrsp.c
+test-teardown:
+  brief: null
+  code: |
+    if ( CanDoFullValidation() ) {
+      rtems_status_code sc;
+
+      sc = rtems_scheduler_remove_processor(
+        ctx->tq_ctx->other_scheduler_id,
+        2
+      );
+      T_rsc_success( sc );
+
+      sc = rtems_scheduler_add_processor(
+        ctx->tq_ctx->third_scheduler_id,
+        2
+      );
+      T_rsc_success( sc );
+    }
+
+    TQReset( ctx->tq_ctx );
+  description: null
+text: |
+  When the calling thread is enqueued on the thread queue.
+transition-map:
+- enabled-by: true
+  post-conditions:
+    Position: InitialFirst
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - None
+    QueueIneligible:
+    - None
+- enabled-by: true
+  post-conditions:
+    Position: InitialLast
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - None
+    QueueIneligible:
+    - Only
+- enabled-by: true
+  post-conditions:
+    Position: Second
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - EQ
+    - GT
+    QueueIneligible:
+    - None
+- enabled-by: true
+  post-conditions:
+    Position: SecondLast
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - EQ
+    - GT
+    QueueIneligible:
+    - Before
+- enabled-by: true
+  post-conditions:
+    Position: SecondFirst
+  pre-conditions:
+    EligibleScheduler: all
+    QueueEligible:
+    - EQ
+    - GT
+    QueueIneligible:
+    - After
+- enabled-by: true
+  post-conditions: Invalid
+  pre-conditions: default
+type: requirement
diff --git a/spec/score/tq/req/enqueue-priority.yml b/spec/score/tq/req/enqueue-priority.yml
index 8e0d7fe..d7c0463 100644
--- a/spec/score/tq/req/enqueue-priority.yml
+++ b/spec/score/tq/req/enqueue-priority.yml
@@ -9,7 +9,7 @@ post-conditions:
   states:
   - name: InitialFirst
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       A priority queue associated with the scheduler which contains exactly the
@@ -17,8 +17,8 @@ post-conditions:
       thread queue.
   - name: InitialLast
     test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       A priority queue associated with the scheduler which contains exactly the
@@ -26,25 +26,25 @@ post-conditions:
       thread queue.
   - name: First
     test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
       with the scheduler.
   - name: Second
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
       with the scheduler.
   - name: FirstFirst
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
@@ -53,9 +53,9 @@ post-conditions:
       The position of the priority queue in the thread queue shall not change.
   - name: SecondFirst
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
@@ -64,9 +64,9 @@ post-conditions:
       The position of the priority queue in the thread queue shall not change.
   - name: FirstLast
     test-code: |
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
@@ -75,9 +75,9 @@ post-conditions:
       The position of the priority queue in the thread queue shall not change.
   - name: SecondLast
     test-code: |
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
-      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_D ) );
       T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+      T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
       T_eq_ptr( GetUnblock( ctx, &i ), NULL );
     text: |
       The enqueueing thread shall be enqueued in the priority queue associated
@@ -90,11 +90,9 @@ post-conditions:
 
     i = 0;
 
-    /* Event receive */
-    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
-
-    /* Enqueue */
-    T_eq_ptr( GetBlock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_B ) );
+    /* Event receives */
+    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_C ) );
+    T_eq_ptr( GetUnblock( ctx, &i ), GetTCB( ctx, TQ_BLOCKER_A ) );
 pre-conditions:
 - name: EligibleScheduler
   states:
@@ -114,12 +112,13 @@ pre-conditions:
   states:
   - name: None
     test-code: |
-      /* This is the default */
+      ctx->priority = PRIO_PSEUDO_ISR;
     text: |
       While the priority queue of the thread queue associated with an eligible
       scheduler of the enqueueing thread is empty.
   - name: LT
     test-code: |
+      ++ctx->tq_ctx->how_many;
       ctx->priority = PRIO_ULTRA_HIGH;
     text: |
       While the priority queue of the thread queue associated with an eligible
@@ -128,6 +127,7 @@ pre-conditions:
       the enqueueing thread with respect to this scheduler.
   - name: EQ
     test-code: |
+      ++ctx->tq_ctx->how_many;
       ctx->priority = PRIO_VERY_HIGH;
     text: |
       While the priority queue of the thread queue associated with an eligible
@@ -136,6 +136,7 @@ pre-conditions:
       enqueueing thread with respect to this scheduler.
   - name: GT
     test-code: |
+      ++ctx->tq_ctx->how_many;
       ctx->priority = PRIO_HIGH;
     text: |
       While the priority queue of the thread queue associated with an eligible
@@ -148,19 +149,24 @@ pre-conditions:
   states:
   - name: None
     test-code: |
-      /* This is the default */
+      ctx->other_before = false;
+      ctx->other_after = false;
     text: |
       While no priority queue of the thread queue exists which is not
       associated with an eligible scheduler of the enqueueing thread.
   - name: Only
     test-code: |
+      ++ctx->tq_ctx->how_many;
       ctx->other_before = true;
+      ctx->other_after = false;
     text: |
       While exactly one priority queue of the thread queue exists which is not
       associated with an eligible scheduler of the enqueueing thread.
   - name: Before
     test-code: |
+      ++ctx->tq_ctx->how_many;
       ctx->other_before = true;
+      ctx->other_after = false;
     text: |
       While a priority queue of the thread queue exists which is not
       associated with an eligible scheduler of the enqueueing thread, while
@@ -168,6 +174,8 @@ pre-conditions:
       associated with eligible schedulers of the enqueueing thread.
   - name: After
     test-code: |
+      ++ctx->tq_ctx->how_many;
+      ctx->other_before = false;
       ctx->other_after = true;
     text: |
       While a priority queue of the thread queue exists which is not associated
@@ -184,72 +192,58 @@ skip-reasons:
     These variants are invalid due to three independent reasons.  Firstly,
     where the system was built with SMP support disabled, no other scheduler
     can exist.  Secondly, a priority queue must be present to have another
-    priority positioned before or after the priority queue.  Thirdly, if only
-    one priority queue shall be present, then on other priority queue can
+    priority queue positioned before or after the priority queue.  Thirdly, if
+    only one priority queue shall be present, then on other priority queue can
     exist.
 test-action: |
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_PREPARE );
+
   if ( ctx->other_before ) {
-    TQSetScheduler(
+    TQSendAndWaitForExecutionStop(
       ctx->tq_ctx,
-      TQ_BLOCKER_C,
-      ctx->tq_ctx->other_scheduler_id,
-      PRIO_LOW
-    );
-    TQSend(
-      ctx->tq_ctx,
-      TQ_BLOCKER_C,
-      TQ_EVENT_HELPER_OTHER_SYNC | TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE | TQ_EVENT_RUNNER_SYNC
+      TQ_BLOCKER_D,
+      TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
     );
-    TQSynchronizeRunner();
   }
 
   if ( ctx->priority != PRIO_PSEUDO_ISR ) {
-    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A , ctx->priority );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B , ctx->priority );
     TQSend(
       ctx->tq_ctx,
-      TQ_BLOCKER_A,
-      TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE
+      TQ_BLOCKER_B,
+      TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER
     );
   }
 
   if ( ctx->other_after ) {
-    TQSetScheduler(
+    TQSendAndWaitForExecutionStop(
       ctx->tq_ctx,
-      TQ_BLOCKER_C,
-      ctx->tq_ctx->other_scheduler_id,
-      PRIO_LOW
-    );
-    TQSend(
-      ctx->tq_ctx,
-      TQ_BLOCKER_C,
-      TQ_EVENT_HELPER_OTHER_SYNC | TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE | TQ_EVENT_RUNNER_SYNC
+      TQ_BLOCKER_D,
+      TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC_2
     );
-    TQSynchronizeRunner();
   }
 
   if ( ctx->helping ) {
     if ( ctx->other_before || ctx->other_after ) {
       if ( rtems_scheduler_get_processor_maximum() > 2 ) {
-        AddHelper(
-          ctx->tq_ctx,
-          ctx->tq_ctx->third_scheduler_id,
-          TQ_EVENT_HELPER_THIRD_SYNC
-        );
+        AddHelper( ctx->tq_ctx, ctx->tq_ctx->third_scheduler_id );
       }
     } else {
-      AddHelper(
-        ctx->tq_ctx,
-        ctx->tq_ctx->other_scheduler_id,
-        TQ_EVENT_HELPER_OTHER_SYNC
-      );
+      AddHelper( ctx->tq_ctx, ctx->tq_ctx->other_scheduler_id );
     }
   }
 
   TQSchedulerRecordStart( ctx->tq_ctx );
-  TQSend( ctx->tq_ctx, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE | TQ_EVENT_DEQUEUE_ONE );
-  TQDequeueAll( ctx->tq_ctx );
+  TQSend(
+    ctx->tq_ctx,
+    TQ_BLOCKER_C,
+    TQ_EVENT_ENQUEUE | TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+  );
+  TQSend( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE_DONE );
 
   if ( ctx->other_before || ctx->other_after ) {
+    TQSynchronizeRunner2();
+  } else {
     TQSynchronizeRunner();
   }
 
@@ -265,9 +259,7 @@ test-action: |
     }
   }
 test-brief: null
-test-cleanup: |
-  TQCleanup( ctx->tq_ctx );
-  TQReset( ctx->tq_ctx );
+test-cleanup: null
 test-context:
 - brief: |
     This this member is true, then the enqueueing thread shall have at least
@@ -315,39 +307,27 @@ test-includes: []
 test-local-includes:
 - tr-tq-enqueue-priority.h
 test-prepare:
-  ctx->priority = PRIORITY_PSEUDO_ISR;
-  ctx->other_before = false;
-  ctx->other_after = false;
-  TQPrepare( ctx->tq_ctx );
+  ctx->tq_ctx->how_many = 1;
 test-setup:
   brief: null
   code: |
-    if ( rtems_scheduler_get_processor_maximum() > 3 ) {
-      rtems_status_code sc;
-      rtems_id          scheduler_id;
-
-      sc = rtems_scheduler_ident_by_processor( 3, &scheduler_id );
-      T_rsc_success( sc );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_A, PRIO_ULTRA_HIGH );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_B, PRIO_LOW );
+    TQSetPriority( ctx->tq_ctx, TQ_BLOCKER_C, PRIO_VERY_HIGH );
 
-      sc = rtems_scheduler_remove_processor( scheduler_id, 3 );
-      T_rsc_success( sc );
-    }
+    #if defined( RTEMS_SMP )
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_D,
+      ctx->tq_ctx->other_scheduler_id,
+      PRIO_LOW
+    );
+    #endif
   description: null
 test-stop: null
 test-support: |
   typedef ScoreTqReqEnqueuePriority_Context Context;
 
-  static const rtems_tcb *GetBlock( Context *ctx, size_t *index )
-  {
-    const rtems_tcb *thread;
-
-    do {
-      thread = TQGetNextBlock( ctx->tq_ctx, index )->thread;
-    } while ( thread == ctx->tq_ctx->worker_tcb[ TQ_HELPER_THIRD ] );
-
-    return thread;
-  }
-
   static const rtems_tcb *GetUnblock( Context *ctx, size_t *index )
   {
     const rtems_tcb *thread;
@@ -364,42 +344,28 @@ test-support: |
     return ctx->tq_ctx->worker_tcb[ worker ];
   }
 
-  static void AddHelper(
-    TQContext *tq_ctx,
-    rtems_id   scheduler_id,
-    TQEvent    sync
-  )
+  static void AddHelper( TQContext *tq_ctx, rtems_id scheduler_id )
   {
-    TQSend( tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_OBTAIN );
-    TQSetScheduler( tq_ctx, TQ_BLOCKER_D, scheduler_id, PRIO_LOW );
-    TQSend(
+    TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_OBTAIN );
+    TQSetScheduler( tq_ctx, TQ_BLOCKER_E, scheduler_id, PRIO_LOW );
+    TQSendAndWaitForExecutionStop(
       tq_ctx,
-      TQ_BLOCKER_D,
-      sync | TQ_EVENT_MUTEX_OBTAIN | TQ_EVENT_MUTEX_RELEASE
+      TQ_BLOCKER_E,
+      TQ_EVENT_MUTEX_A_OBTAIN | TQ_EVENT_MUTEX_A_RELEASE
     );
-    TQSynchronizeRunner();
   }
 
   static void RemoveHelper( TQContext *tq_ctx )
   {
-    TQSend( tq_ctx, TQ_BLOCKER_B, TQ_EVENT_MUTEX_RELEASE );
-    TQMutexObtain( tq_ctx );
-    TQMutexRelease( tq_ctx );
+    TQSend( tq_ctx, TQ_BLOCKER_C, TQ_EVENT_MUTEX_A_RELEASE );
+    TQMutexObtain( tq_ctx, TQ_MUTEX_A );
+    TQMutexRelease( tq_ctx, TQ_MUTEX_A );
   }
 test-target: testsuites/validation/tr-tq-enqueue-priority.c
 test-teardown:
   brief: null
   code: |
-    if ( rtems_scheduler_get_processor_maximum() > 3 ) {
-      rtems_status_code sc;
-      rtems_id          scheduler_id;
-
-      sc = rtems_scheduler_ident_by_processor( 2, &scheduler_id );
-      T_rsc_success( sc );
-
-      sc = rtems_scheduler_add_processor( scheduler_id, 3 );
-      T_rsc_success( sc );
-    }
+    TQReset( ctx->tq_ctx );
   description: null
 text: |
   When the calling task is enqueued on the thread queue.



More information about the vc mailing list