[rtems-central commit] spec: Improve mutex seize specification

Sebastian Huber sebh at rtems.org
Wed Sep 15 06:02:36 UTC 2021


Module:    rtems-central
Branch:    master
Commit:    b048c7537b06844027243f29743f1623e46983a6
Changeset: http://git.rtems.org/rtems-central/commit/?id=b048c7537b06844027243f29743f1623e46983a6

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Sep 13 09:53:31 2021 +0200

spec: Improve mutex seize specification

---

 spec/rtems/sem/req/obtain.yml          |  30 +-
 spec/score/mtx/req/seize-try.yml       |  20 +-
 spec/score/mtx/req/seize-wait.yml      | 754 +++++++++++++++++++++++++--------
 spec/score/tq/req/enqueue-deadlock.yml |   7 +
 4 files changed, 605 insertions(+), 206 deletions(-)

diff --git a/spec/rtems/sem/req/obtain.yml b/spec/rtems/sem/req/obtain.yml
index a48fe07..6fed26f 100644
--- a/spec/rtems/sem/req/obtain.yml
+++ b/spec/rtems/sem/req/obtain.yml
@@ -35,7 +35,8 @@ post-conditions:
   - name: MtxSeizeTry
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
       ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -45,7 +46,8 @@ post-conditions:
   - name: MtxSeizeWait
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_INVALID;
       ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -55,7 +57,8 @@ post-conditions:
   - name: CeilingMtxSeizeTry
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
       ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -65,7 +68,8 @@ post-conditions:
   - name: CeilingMtxSeizeWait
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_BLOCKS;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_YES;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_ALLOWED;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
       ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -75,7 +79,8 @@ post-conditions:
   - name: MrsPMtxSeizeTry
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_NO_STATUS;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
       ${/score/mtx/req/seize-try:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -85,7 +90,8 @@ post-conditions:
   - name: MrsPMtxSeizeWait
     test-code: |
       ctx->tq_mtx_ctx.base.enqueue_variant = TQ_ENQUEUE_STICKY;
-      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_NO_STATUS;
+      ctx->tq_mtx_ctx.deadlock = TQ_MTX_DEADLOCK_STATUS;
+      ctx->tq_mtx_ctx.recursive = TQ_MTX_RECURSIVE_DEADLOCK;
       ctx->tq_mtx_ctx.priority_ceiling = PRIO_VERY_HIGH;
       ${/score/mtx/req/seize-wait:/test-run}( &ctx->tq_mtx_ctx );
     text: |
@@ -210,6 +216,18 @@ test-action: |
     &ctx->tq_ctx.thread_queue_id
   );
   T_rsc_success( sc );
+
+  if ( ( ctx->attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0 ) {
+    rtems_task_priority prio;
+
+    sc = rtems_semaphore_set_priority(
+      ctx->tq_ctx.thread_queue_id,
+      ctx->tq_ctx.other_scheduler_id,
+      PRIO_VERY_HIGH,
+      &prio
+    );
+    T_rsc_success( sc );
+  }
 test-brief: null
 test-cleanup:
   rtems_status_code sc;
diff --git a/spec/score/mtx/req/seize-try.yml b/spec/score/mtx/req/seize-try.yml
index f9bab38..fb7ad6f 100644
--- a/spec/score/mtx/req/seize-try.yml
+++ b/spec/score/mtx/req/seize-try.yml
@@ -35,32 +35,24 @@ post-conditions:
   - name: Recursive
     test-code: |
       switch ( ctx->tq_ctx->recursive ) {
-        case TQ_MTX_RECURSIVE_YES:
+        case TQ_MTX_RECURSIVE_ALLOWED:
           T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
           TQSurrender( &ctx->tq_ctx->base );
           break;
-        case TQ_MTX_RECURSIVE_NO_STATUS:
+        case TQ_MTX_RECURSIVE_DEADLOCK:
           T_eq_int( ctx->status, Status( ctx, STATUS_DEADLOCK ) );
           break;
-        case TQ_MTX_RECURSIVE_NO_FATAL:
-          /* TODO */
-          T_unreachable();
-          break;
         default:
           T_unreachable();
           break;
       }
     text: |
-      Where the mutex supports a recursive seize, the return status of the
+      Where recursive seize of the mutex is allowed, the return status of the
       directive call shall be derived from ${../../status/if/successful:/name}.
 
-      Where the mutex does not support a recursive seize, where a deadlock is
-      indicated by a status code, the return status of the directive call shall
-      be derived from ${../../status/if/deadlock:/name}.
-
-      Where the mutex does not support a recursive seize, where a deadlock is
-      indicated by a fatal error, the thread queue deadlock internal error
-      shall occur.
+      Where recursive seize of the mutex results in a deadlock, the return
+      status of the directive call shall be derived from
+      ${../../status/if/deadlock:/name}.
   - name: Unsat
     test-code: |
       T_eq_int( ctx->status, Status( ctx, STATUS_UNSATISFIED ) );
diff --git a/spec/score/mtx/req/seize-wait.yml b/spec/score/mtx/req/seize-wait.yml
index 8bf08aa..ae7dbbd 100644
--- a/spec/score/mtx/req/seize-wait.yml
+++ b/spec/score/mtx/req/seize-wait.yml
@@ -11,90 +11,61 @@ post-conditions:
   states:
   - name: Ok
     test-code: |
-      T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
-      TQSurrender( &ctx->tq_ctx->base );
+      T_true( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) );
     text: |
       The return status of the directive call shall be derived from
       ${../../status/if/successful:/name}.
-  - name: OkOrMutexCeilingViolated
+  - name: MutexCeilingViolated
     test-code: |
-      if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
-        T_eq_int( ctx->status, Status( ctx, STATUS_MUTEX_CEILING_VIOLATED ) );
-      } else {
-        T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
-        TQSurrender( &ctx->tq_ctx->base );
-      }
+      T_true( IsEnqueueStatus( ctx, STATUS_MUTEX_CEILING_VIOLATED ) );
     text: |
-      Where the mutex provides a priority ceiling, the return status of the
-      directive call shall be derived from
+      The return status of the directive call shall be derived from
       ${../../status/if/mutex-ceiling-violated:/name}.
-
-      Where the mutex does not provide a priority ceiling, the return status of
-      the directive call shall be derived from
-      ${../../status/if/successful:/name}.
-  - name: Recursive
-    test-code: |
-      switch ( ctx->tq_ctx->recursive ) {
-        case TQ_MTX_RECURSIVE_YES:
-          T_eq_int( ctx->status, Status( ctx, STATUS_SUCCESSFUL ) );
-          TQSurrender( &ctx->tq_ctx->base );
-          break;
-        case TQ_MTX_RECURSIVE_NO_STATUS:
-          T_eq_int( ctx->status, Status( ctx, STATUS_DEADLOCK ) );
-          break;
-        case TQ_MTX_RECURSIVE_NO_FATAL:
-          /* TODO */
-          T_unreachable();
-          break;
-        default:
-          T_unreachable();
-          break;
-      }
-    text: |
-      Where the mutex supports a recursive seize, the return status of the
-      directive call shall be derived from ${../../status/if/successful:/name}.
-
-      Where the mutex does not support a recursive seize, where a deadlock is
-      indicated by a status code, the return status of the directive call shall
-      be derived from ${../../status/if/deadlock:/name}.
-
-      Where the mutex does not support a recursive seize, where a deadlock is
-      indicated by a fatal error, the thread queue deadlock internal error
-      shall occur.
-  - name: Deadlock
+  - name: DeadlockStatus
     test-code: |
+      T_true( IsEnqueueStatus( ctx, STATUS_DEADLOCK ) );
       ${../../tq/req/enqueue-deadlock:/test-run}( &ctx->tq_ctx->base );
     text: |
       The return status of the directive call shall be derived from
-      ${../../status/if/deadlock:/name} for deadlock scenarios specified by
-      ${../../tq/req/enqueue-deadlock}.
-  - name: Enqueued
+      ${../../status/if/deadlock:/name}.
+  - name: DeadlockFatal
     test-code: |
-      switch ( ctx->tq_ctx->base.discipline ) {
-        case TQ_FIFO:
-          ${../../tq/req/enqueue-fifo:/test-run}( &ctx->tq_ctx->base );
-          break;
-        case TQ_PRIORITY:
-          if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
-            if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
-              ${../../tq/req/enqueue-mrsp:/test-run}( &ctx->tq_ctx->base );
-            } else {
-              ${../../tq/req/enqueue-ceiling:/test-run}( &ctx->tq_ctx->base );
-            }
-          } else {
-            ${../../tq/req/enqueue-priority:/test-run}( &ctx->tq_ctx->base );
-          }
-          break;
-        default:
-          T_unreachable();
-          break;
-      }
+      T_unreachable();
     text: |
-      Where the thread queue uses the FIFO discipline, the calling thread shall
-      be enqueued in FIFO order.
-
-      Where the thread queue uses the priority discipline, the calling thread
-      shall be enqueued in priority order.
+      The system shall terminate with the
+      ${/score/interr/if/internal-error-core:/name} fatal source and the
+      ${/score/interr/if/thread-queue-deadlock:/name} fatal code.
+  test-epilogue: null
+  test-prologue: null
+- name: Enqueued
+  states:
+  - name: 'No'
+    test-code: |
+      /* The test runner would block if the worker is enqueued */
+    text: |
+      The calling thread shall not be enqueued on the thread queue of the mutex.
+  - name: FIFO
+    test-code: |
+      ${../../tq/req/enqueue-fifo:/test-run}( &ctx->tq_ctx->base );
+    text: |
+      The calling thread shall be enqueued in FIFO order.
+  - name: Priority
+    test-code: |
+      ${../../tq/req/enqueue-priority:/test-run}( &ctx->tq_ctx->base );
+    text: |
+      The calling thread shall be enqueued in priority order.
+  - name: PriorityCeiling
+    test-code: |
+      ${../../tq/req/enqueue-ceiling:/test-run}( &ctx->tq_ctx->base );
+    text: |
+      The calling thread shall be enqueued in priority order according to the
+      priority ceiling locking protocol.
+  - name: PriorityMrsP
+    test-code: |
+      ${../../tq/req/enqueue-mrsp:/test-run}( &ctx->tq_ctx->base );
+    text: |
+      The calling thread shall be enqueued in priority order according to the
+      MrsP locking protocol.
   test-epilogue: null
   test-prologue: null
 - name: Owner
@@ -103,32 +74,23 @@ post-conditions:
     test-code: |
       T_eq_ptr(
         ctx->owner_after,
-        ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+        ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_B ]
       );
     text: |
       The owner of the mutex shall not be modified.
   - name: Caller
     test-code: |
-      T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
-    text: |
-      The owner of the mutex shall be the calling thread.
-  - name: CallerOrNoOwner
-    test-code: |
-      T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
+      T_eq_ptr(
+        ctx->owner_after,
+        ctx->tq_ctx->base.worker_tcb[ TQ_BLOCKER_A ]
+      );
     text: |
       The owner of the mutex shall be the calling thread.
+  - name: None
     test-code: |
-      if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
-        T_null( ctx->owner_after );
-      } else {
-        T_eq_ptr( ctx->owner_after, ctx->tq_ctx->base.runner_tcb );
-      }
+      T_null( ctx->owner_after );
     text: |
-      Where the mutex provides a priority ceiling, the mutex shall have no
-      owner.
-
-      Where the mutex does not provide a priority ceiling, the owner of the
-      mutex shall be the calling thread.
+      The mutex shall have no owner.
   test-epilogue: null
   test-prologue: null
 - name: Priority
@@ -137,23 +99,101 @@ post-conditions:
     test-code: |
       T_eq_u32( ctx->priority_after, ctx->priority_before );
     text: |
-      The current priority of the calling thread shall not be modified.
+      The priorities of the calling thread shall not be modified.
+  - name: Ceiling
+    test-code: |
+      T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
+    text: |
+      The calling thread shall use the priority ceiling of the mutex.
+  test-epilogue: null
+  test-prologue: null
+pre-conditions:
+- name: Protocol
+  states:
   - name: Ceiling
     test-code: |
+      if (
+        ctx->tq_ctx->priority_ceiling == PRIO_INVALID ||
+        ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY
+      ) {
+        ${.:skip}
+      }
+    text: |
+      Where the mutex uses the priority ceiling locking protocol.
+  - name: MrsP
+    test-code: |
+      if (
+        ctx->tq_ctx->priority_ceiling == PRIO_INVALID ||
+        ctx->tq_ctx->base.enqueue_variant != TQ_ENQUEUE_STICKY
+      ) {
+        ${.:skip}
+      }
+    text: |
+      Where the mutex uses the MrsP locking protocol.
+  - name: Other
+    test-code: |
       if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
-        T_eq_u32( ctx->priority_after, ctx->tq_ctx->priority_ceiling );
-      } else {
-        T_eq_u32( ctx->priority_after, ctx->priority_before );
+        ${.:skip}
       }
     text: |
-      Where the mutex provides a priority ceiling, the calling thread shall use
-      the priority ceiling of the mutex.
-
-      Where the mutex does not provide a priority ceiling, the current priority
-      of the calling thread shall not be modified.
+      Where the mutex does not use the priority ceiling or MrsP locking
+      protocol.
+  test-epilogue: null
+  test-prologue: null
+- name: Discipline
+  states:
+  - name: FIFO
+    test-code: |
+      if ( ctx->tq_ctx->base.discipline != TQ_FIFO ) {
+        ${.:skip}
+      }
+    text: |
+      Where the thread queue of the mutex uses the FIFO discipline.
+  - name: Priority
+    test-code: |
+      if ( ctx->tq_ctx->base.discipline != TQ_PRIORITY ) {
+        ${.:skip}
+      }
+    text: |
+      Where the thread queue of the mutex uses the priority discipline.
+  test-epilogue: null
+  test-prologue: null
+- name: DeadlockResult
+  states:
+  - name: Status
+    test-code: |
+      if ( ctx->tq_ctx->deadlock != TQ_MTX_DEADLOCK_STATUS ) {
+        ${.:skip}
+      }
+    text: |
+      Where a detected deadlock results in a return with a status code.
+  - name: Fatal
+    test-code: |
+      if ( ctx->tq_ctx->deadlock != TQ_MTX_DEADLOCK_FATAL ) {
+        ${.:skip}
+      }
+    text: |
+      Where a detected deadlock results in a fatal error.
+  test-epilogue: null
+  test-prologue: null
+- name: Recursive
+  states:
+  - name: Allowed
+    test-code: |
+      if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_ALLOWED ) {
+        ${.:skip}
+      }
+    text: |
+      Where a recursive seize of the mutex is allowed.
+  - name: Deadlock
+    test-code: |
+      if ( ctx->tq_ctx->recursive != TQ_MTX_RECURSIVE_DEADLOCK ) {
+        ${.:skip}
+      }
+    text: |
+      Where a recursive seize of the mutex results in a deadlock.
   test-epilogue: null
   test-prologue: null
-pre-conditions:
 - name: Owner
   states:
   - name: 'No'
@@ -173,6 +213,7 @@ pre-conditions:
       While the owner of the mutex is a thread other than the calling thread.
   - name: Deadlock
     test-code: |
+      ctx->owner_other = true;
       ctx->deadlock = true;
     text: |
       While the attempt to seize the mutex results in a deadlock.
@@ -180,24 +221,24 @@ pre-conditions:
   test-prologue: null
 - name: Priority
   states:
-  - name: LT
+  - name: High
     test-code: |
       ctx->priority_before = ctx->tq_ctx->priority_ceiling - 1;
     text: |
-      Where the mutex provides a priority ceiling, while the calling thread has
-      a ${/glossary/priority-current:/term} less than the priority ceiling.
-  - name: EQ
+      While the calling thread has a ${/glossary/priority-current:/term} higher
+      than the priority ceiling.
+  - name: Equal
     test-code: |
       ctx->priority_before = ctx->tq_ctx->priority_ceiling;
     text: |
-      Where the mutex provides a priority ceiling, while the calling thread has
-      a ${/glossary/priority-current:/term} equal to the priority ceiling.
-  - name: GT
+      While the calling thread has a ${/glossary/priority-current:/term} equal
+      to the priority ceiling.
+  - name: Low
     test-code: |
       ctx->priority_before = ctx->tq_ctx->priority_ceiling + 1;
     text: |
-      Where the mutex provides a priority ceiling, while the calling thread has
-      a ${/glossary/priority-current:/term} greater than the priority ceiling.
+      While the calling thread has a ${/glossary/priority-current:/term} lower
+      than the priority ceiling.
   test-epilogue: null
   test-prologue: null
 rationale: null
@@ -206,10 +247,22 @@ requirement-type: functional
 skip-reasons:
   CeilingOwner: |
     Where the mutex provides a priority ceiling, the owner of the mutex cannot
-    have a ${/glossary/priority-current:/term} greater than the priority ceiling.
+    have a ${/glossary/priority-current:/term} lower than the priority ceiling.
+  PriorityDisciplineByProtocol: |
+    The priority ceiling and MrsP locking protocol requires a priority
+    discipline.
 test-action: |
-  if ( !ctx->deadlock ) {
-    NonDeadlockAction( ctx );
+  TQSetScheduler(
+    &ctx->tq_ctx->base,
+    TQ_BLOCKER_B,
+    ctx->tq_ctx->base.runner_scheduler_id,
+    PRIO_VERY_HIGH
+  );
+
+  if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
+    ActionSticky( ctx );
+  } else {
+    Action( ctx );
   }
 test-brief: null
 test-cleanup: null
@@ -238,11 +291,6 @@ test-context:
   member: |
     rtems_task_priority priority_before;
 - brief: |
-    This member contains the return status of the directive call.
-  description: null
-  member: |
-    Status_Control status
-- brief: |
     This member contains the owner of the mutex after the directive call.
   description: null
   member: |
@@ -268,8 +316,7 @@ test-header:
     name: tq_ctx
     specifier: TQMtxContext *${.:name}
   target: testsuites/validation/tr-mtx-seize-wait.h
-test-includes:
-- rtems/score/statesimpl.h
+test-includes: []
 test-local-includes:
 - tr-mtx-seize-wait.h
 - tr-tq-enqueue-ceiling.h
@@ -280,10 +327,10 @@ test-local-includes:
 test-prepare: |
   ctx->tq_ctx->base.enqueue_prepare = TQEnqueuePrepareClassicSem;
   ctx->tq_ctx->base.enqueue_done = TQSurrenderClassicSem;
-  ctx->tq_ctx->base.get_properties = GetProperties;
   ctx->owner_caller = false;
   ctx->owner_other = false;
   ctx->deadlock = false;
+  ctx->priority_before = PRIO_VERY_HIGH;
 test-setup: null
 test-stop: null
 test-support: |
@@ -294,90 +341,180 @@ test-support: |
     return TQConvertStatus( &ctx->tq_ctx->base, status );
   }
 
-  static void GetProperties( TQContext *base, TQWorkerKind enqueued_worker )
+  static bool IsEnqueueStatus( const Context *ctx, Status_Control expected )
   {
-    TQMtxContext        *ctx;
-    T_thread_timer_state timer_state;
+    return ctx->tq_ctx->base.status[ TQ_BLOCKER_A ] == Status( ctx, expected );
+  }
 
-    ctx = (TQMtxContext *) base;
-    T_eq_u32(
-      ctx->base.worker_tcb[ enqueued_worker ]->current_state,
-      STATES_WAITING_FOR_MUTEX
+  static void Action( Context *ctx )
+  {
+    TQSetScheduler(
+      &ctx->tq_ctx->base,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->base.runner_scheduler_id,
+      PRIO_VERY_HIGH
     );
 
-    timer_state = T_get_thread_timer_state(
-      ctx->base.worker_id[ enqueued_worker ]
-    );
+    if ( ctx->owner_caller ) {
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+    } else if ( ctx->owner_other ) {
+      if ( ctx->deadlock ) {
+        TQSend(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_A,
+          TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+        );
+      }
 
-    if ( base->wait == TQ_WAIT_TICKS ) {
-      T_eq_int( timer_state, T_THREAD_TIMER_SCHEDULED );
-    } else {
-      T_eq_int( timer_state, T_THREAD_TIMER_INACTIVE );
-    }
-  }
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_ENQUEUE );
 
-  static void NonDeadlockAction( Context *ctx )
-  {
-    rtems_task_priority priority;
+      if ( ctx->deadlock ) {
+        TQSend(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_B,
+          TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+        );
+      }
+    }
 
-    priority = GetSelfPriority();
+    TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+    TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
+    ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+    ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
 
     if ( ctx->owner_caller ) {
-      Status_Control status;
-
-      status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
-      T_eq_int( status, Status( ctx, STATUS_SUCCESSFUL ) );
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
     } else if ( ctx->owner_other ) {
-      if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
-        TQSetScheduler(
+      if ( ctx->deadlock ) {
+        TQSend(
           &ctx->tq_ctx->base,
           TQ_BLOCKER_A,
-          ctx->tq_ctx->base.other_scheduler_id,
-          PRIO_HIGH
+          TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
         );
+      }
+
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_B, TQ_EVENT_SURRENDER );
+
+      if ( ctx->deadlock ) {
         TQSend(
           &ctx->tq_ctx->base,
-          TQ_BLOCKER_A,
-          TQ_EVENT_ENQUEUE | TQ_EVENT_RUNNER_SYNC
+          TQ_BLOCKER_B,
+          TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
         );
-        TQSynchronizeRunner();
-      } else {
-        TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );
       }
     }
 
-    if ( ctx->tq_ctx->priority_ceiling != PRIO_INVALID ) {
-      SetSelfPriority( ctx->priority_before );
-    } else {
-      ctx->priority_before = GetSelfPriority();
+    if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+      TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
     }
+  }
 
-    ctx->status = TQEnqueue( &ctx->tq_ctx->base, TQ_NO_WAIT );
-    ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
-    ctx->priority_after = GetSelfPriority();
+  static void ActionSticky( Context *ctx )
+  {
+    TQSetScheduler(
+      &ctx->tq_ctx->base,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->base.other_scheduler_id,
+      PRIO_VERY_HIGH
+    );
 
     if ( ctx->owner_caller ) {
-      TQSurrender( &ctx->tq_ctx->base );
+      TQSendAndSynchronizeRunner(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        TQ_EVENT_ENQUEUE
+      );
     } else if ( ctx->owner_other ) {
-      if ( ctx->tq_ctx->base.enqueue_variant == TQ_ENQUEUE_STICKY ) {
-        TQSend(
+      if ( ctx->deadlock ) {
+        TQSendAndSynchronizeRunner(
           &ctx->tq_ctx->base,
           TQ_BLOCKER_A,
-          TQ_EVENT_SURRENDER | TQ_EVENT_RUNNER_SYNC
+          TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
         );
-        TQSynchronizeRunner();
-        TQSetScheduler(
+      }
+
+      SetSelfScheduler(
+        ctx->tq_ctx->base.other_scheduler_id,
+        PRIO_ULTRA_HIGH
+      );
+      TQSendAndSynchronizeRunner(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_B,
+        TQ_EVENT_ENQUEUE
+      );
+
+      if ( ctx->deadlock ) {
+        TQSendAndWaitForExecutionStop(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_B,
+          TQ_EVENT_MUTEX_NO_PROTOCOL_OBTAIN
+        );
+      }
+
+      SetSelfScheduler(
+        ctx->tq_ctx->base.runner_scheduler_id,
+        PRIO_ULTRA_HIGH
+      );
+    }
+
+    TQSetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A, ctx->priority_before );
+    TQClearDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+    TQSendAndWaitForExecutionStopOrIntendToBlock(
+      &ctx->tq_ctx->base,
+      TQ_BLOCKER_A,
+      TQ_EVENT_ENQUEUE
+    );
+    ctx->owner_after = TQGetOwner( &ctx->tq_ctx->base );
+    ctx->priority_after = TQGetPriority( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+    if ( ctx->owner_caller ) {
+      TQSendAndSynchronizeRunner(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        TQ_EVENT_SURRENDER
+      );
+    } else if ( ctx->owner_other ) {
+      if ( ctx->deadlock ) {
+        TQSendAndSynchronizeRunner(
           &ctx->tq_ctx->base,
           TQ_BLOCKER_A,
-          ctx->tq_ctx->base.runner_scheduler_id,
-          PRIO_HIGH
+          TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
         );
-      } else {
-        TQSend( &ctx->tq_ctx->base, TQ_BLOCKER_A, TQ_EVENT_SURRENDER );
       }
+
+      SetSelfScheduler(
+        ctx->tq_ctx->base.other_scheduler_id,
+        PRIO_ULTRA_HIGH
+      );
+      TQSendAndSynchronizeRunner(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_B,
+        TQ_EVENT_SURRENDER
+      );
+
+      if ( ctx->deadlock ) {
+        TQSendAndSynchronizeRunner(
+          &ctx->tq_ctx->base,
+          TQ_BLOCKER_B,
+          TQ_EVENT_MUTEX_NO_PROTOCOL_RELEASE
+        );
+      }
+
+      SetSelfScheduler(
+        ctx->tq_ctx->base.runner_scheduler_id,
+        PRIO_NORMAL
+      );
     }
 
-    SetSelfPriority( priority );
+    TQWaitForDone( &ctx->tq_ctx->base, TQ_BLOCKER_A );
+
+    if ( IsEnqueueStatus( ctx, STATUS_SUCCESSFUL ) ) {
+      TQSendAndSynchronizeRunner(
+        &ctx->tq_ctx->base,
+        TQ_BLOCKER_A,
+        TQ_EVENT_SURRENDER
+      );
+    }
   }
 test-target: testsuites/validation/tr-mtx-seize-wait.c
 test-teardown: null
@@ -387,58 +524,303 @@ transition-map:
 - enabled-by: true
   post-conditions:
     Status: Ok
+    Enqueued: 'No'
     Owner: Caller
-    Priority: Ceiling
+    Priority: Nop
   pre-conditions:
+    Protocol:
+    - Other
+    Discipline: all
+    DeadlockResult: all
+    Recursive: all
     Owner:
     - 'No'
-    Priority:
-    - EQ
-    - GT
+    Priority: N/A
 - enabled-by: true
   post-conditions:
-    Status: OkOrMutexCeilingViolated
-    Owner: CallerOrNoOwner
+    Status: N/A
+    Enqueued:
+    - specified-by: Discipline
+    Owner: Other
     Priority: Nop
   pre-conditions:
+    Protocol:
+    - Other
+    Discipline: all
+    DeadlockResult: all
+    Recursive: all
     Owner:
-    - 'No'
+    - Other
+    Priority: N/A
+- enabled-by: true
+  post-conditions:
+    Status: Ok
+    Enqueued: 'No'
+    Owner: Caller
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Other
+    Discipline: all
+    DeadlockResult: all
+    Recursive:
+    - Allowed
+    Owner:
+    - Caller
+    Priority: N/A
+- enabled-by: true
+  post-conditions:
+    Status: Ok
+    Enqueued: 'No'
+    Owner: Caller
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Ceiling
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive:
+    - Allowed
+    Owner:
+    - Caller
     Priority:
-    - LT
+    - High
+    - Equal
+- enabled-by: true
+  post-conditions:
+    Status:
+    - if:
+        pre-conditions:
+           DeadlockResult: Fatal
+      then: DeadlockFatal
+    - else: DeadlockStatus
+    Enqueued: 'No'
+    Owner: Caller
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Other
+    Discipline: all
+    DeadlockResult: all
+    Recursive:
+    - Deadlock
+    Owner:
+    - Caller
+    Priority: N/A
 - enabled-by: true
   post-conditions:
-    Status: Recursive
+    Status:
+    - if:
+        pre-conditions:
+           DeadlockResult: Fatal
+      then: DeadlockFatal
+    - else: DeadlockStatus
+    Enqueued: 'No'
     Owner: Caller
     Priority: Nop
   pre-conditions:
+    Protocol:
+    - Ceiling
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive:
+    - Deadlock
     Owner:
     - Caller
     Priority:
-    - LT
-    - EQ
+    - High
+    - Equal
+- enabled-by: true
+  post-conditions:
+    Status:
+    - if:
+        pre-conditions:
+           DeadlockResult: Fatal
+      then: DeadlockFatal
+    - else: DeadlockStatus
+    Enqueued: 'No'
+    Owner: Other
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Other
+    Discipline: all
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - Deadlock
+    Priority: N/A
 - enabled-by: true
   post-conditions:
-    Status: Enqueued
+    Status:
+    - if:
+        pre-conditions:
+           DeadlockResult: Fatal
+      then: DeadlockFatal
+    - else: DeadlockStatus
+    Enqueued: 'No'
     Owner: Other
     Priority: Nop
   pre-conditions:
+    Protocol:
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - Deadlock
+    Priority:
+    - Low
+    - Equal
+- enabled-by: true
+  post-conditions:
+    Status: Ok
+    Enqueued: 'No'
+    Owner: Caller
+    Priority: Ceiling
+  pre-conditions:
+    Protocol:
+    - Ceiling
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - 'No'
+    Priority:
+    - Low
+    - Equal
+- enabled-by: true
+  post-conditions:
+    Status: N/A
+    Enqueued: PriorityCeiling
+    Owner: Other
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Ceiling
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
     Owner:
     - Other
     Priority: all
 - enabled-by: true
   post-conditions:
-    Status: Deadlock
-    Owner: N/A
-    Priority: N/A
+    Status:
+    - if:
+        pre-conditions:
+           DeadlockResult: Fatal
+      then: DeadlockFatal
+    - else: DeadlockStatus
+    Enqueued: 'No'
+    Owner: Other
+    Priority: Nop
   pre-conditions:
+    Protocol:
+    - Ceiling
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
     Owner:
     - Deadlock
     Priority: all
 - enabled-by: true
+  post-conditions:
+    Status: N/A
+    Enqueued: PriorityMrsP
+    Owner: Other
+    Priority: Ceiling
+  pre-conditions:
+    Protocol:
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - Other
+    Priority:
+    - Low
+    - Equal
+- enabled-by: true
+  post-conditions:
+    Status: MutexCeilingViolated
+    Enqueued: 'No'
+    Owner:
+    - if:
+        pre-conditions:
+          Owner: 'No'
+      then: None
+    - else: Other
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - Ceiling
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - 'No'
+    Priority:
+    - High
+- enabled-by: true
+  post-conditions:
+    Status: MutexCeilingViolated
+    Enqueued: 'No'
+    Owner:
+    - if:
+        pre-conditions:
+          Owner: 'No'
+      then: None
+    - else: Other
+    Priority: Nop
+  pre-conditions:
+    Protocol:
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
+    Owner:
+    - 'No'
+    - Other
+    - Deadlock
+    Priority:
+    - High
+- enabled-by: true
   post-conditions: CeilingOwner
   pre-conditions:
+    Protocol:
+    - Ceiling
+    - MrsP
+    Discipline:
+    - Priority
+    DeadlockResult: all
+    Recursive: all
     Owner:
     - Caller
     Priority:
-    - GT
+    - Low
+- enabled-by: true
+  post-conditions: PriorityDisciplineByProtocol
+  pre-conditions:
+    Protocol:
+    - Ceiling
+    - MrsP
+    Discipline:
+    - FIFO
+    DeadlockResult: all
+    Recursive: all
+    Owner: all
+    Priority: all
 type: requirement
diff --git a/spec/score/tq/req/enqueue-deadlock.yml b/spec/score/tq/req/enqueue-deadlock.yml
index f2a9600..2b44165 100644
--- a/spec/score/tq/req/enqueue-deadlock.yml
+++ b/spec/score/tq/req/enqueue-deadlock.yml
@@ -52,6 +52,13 @@ test-action: |
       ctx->tq_ctx->other_scheduler_id,
       PRIO_HIGH
     );
+  } else {
+    TQSetScheduler(
+      ctx->tq_ctx,
+      TQ_BLOCKER_A,
+      ctx->tq_ctx->runner_scheduler_id,
+      PRIO_HIGH
+    );
   }
 
   TQSendAndWaitForExecutionStop( ctx->tq_ctx, TQ_BLOCKER_A, TQ_EVENT_ENQUEUE );



More information about the vc mailing list