[rtems-central commit] spec: Specify thread queue deadlock details

Sebastian Huber sebh at rtems.org
Tue Sep 28 09:14:44 UTC 2021


Module:    rtems-central
Branch:    master
Commit:    3eeb04c51547f577c6da75a1d9ffd5e525fcd7da
Changeset: http://git.rtems.org/rtems-central/commit/?id=3eeb04c51547f577c6da75a1d9ffd5e525fcd7da

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Sep 27 07:13:50 2021 +0200

spec: Specify thread queue deadlock details

---

 spec/score/tq/req/deadlock-concurrent.yml |  17 ++
 spec/score/tq/req/deadlock.yml            |  14 ++
 spec/score/tq/req/enqueue-deadlock.yml    |   2 +-
 spec/score/tq/val/smp.yml                 | 307 ++++++++++++++++++++++++++----
 4 files changed, 299 insertions(+), 41 deletions(-)

diff --git a/spec/score/tq/req/deadlock-concurrent.yml b/spec/score/tq/req/deadlock-concurrent.yml
new file mode 100644
index 0000000..f959c09
--- /dev/null
+++ b/spec/score/tq/req/deadlock-concurrent.yml
@@ -0,0 +1,17 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+  uid: deadlock
+functional-type: function
+rationale: null
+references: []
+requirement-type: functional
+text: |
+  While a thread ``A`` tries to enqueue on a thread queue ``X``, while a thread
+  ``B`` tries to enqueue on a thread queue ``Y``, while the thread queue
+  dependency graph built up by threads ``A`` and ``B`` contains a cycle, the
+  deadlock shall be detected.
+type: requirement
diff --git a/spec/score/tq/req/deadlock.yml b/spec/score/tq/req/deadlock.yml
new file mode 100644
index 0000000..74fd429
--- /dev/null
+++ b/spec/score/tq/req/deadlock.yml
@@ -0,0 +1,14 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de
+enabled-by: true
+links:
+- role: requirement-refinement
+  uid: ../if/group
+non-functional-type: design
+rationale: null
+references: []
+requirement-type: non-functional
+text: |
+  The thread queue operations shall support deadlock detection.
+type: requirement
diff --git a/spec/score/tq/req/enqueue-deadlock.yml b/spec/score/tq/req/enqueue-deadlock.yml
index 057b905..a7df5f9 100644
--- a/spec/score/tq/req/enqueue-deadlock.yml
+++ b/spec/score/tq/req/enqueue-deadlock.yml
@@ -5,7 +5,7 @@ enabled-by: true
 functional-type: action
 links:
 - role: requirement-refinement
-  uid: ../if/group
+  uid: deadlock
 post-conditions:
 - name: Result
   states:
diff --git a/spec/score/tq/val/smp.yml b/spec/score/tq/val/smp.yml
index 365f07e..98317dd 100644
--- a/spec/score/tq/val/smp.yml
+++ b/spec/score/tq/val/smp.yml
@@ -8,13 +8,12 @@ test-actions:
     Create two worker threads and a mutex.  Use the mutex and the worker to do
     a thread priority change in parallel with a thread queue extraction.
   action-code: |
-    SetSelfPriority( PRIO_NORMAL );
     _SMP_barrier_Control_initialize( &ctx->barrier );
     _SMP_barrier_State_initialize( &ctx->barrier_state );
     WrapThreadQueueInitialize( &ctx->wrap, Extract, ctx );
 
-    ctx->mutex_id = CreateMutex();
-    ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_id );
+    ctx->mutex_a_id = CreateMutex();
+    ctx->thread_queue = GetMutexThreadQueue( ctx->mutex_a_id );
   checks:
   - brief: |
       Create and start worker A on a second processor.  Let it obtain the
@@ -22,9 +21,9 @@ test-actions:
     code: |
       ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
       SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
-      StartTask( ctx->worker_a_id, WorkerA, ctx );
+      StartTask( ctx->worker_a_id, PriorityChangeWorkerA, ctx );
 
-      /* B0 */
+      /* PC0 */
       _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
     links: []
   - brief: |
@@ -34,7 +33,7 @@ test-actions:
       change carried out by worker A.
     code: |
       ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
-      StartTask( ctx->worker_b_id, WorkerB, ctx );
+      StartTask( ctx->worker_b_id, PriorityChangeWorkerB, ctx );
       WrapThreadQueueExtractDirect( &ctx->wrap, GetThread( ctx->worker_b_id ) );
       DeleteTask( ctx->worker_b_id );
     links:
@@ -45,19 +44,143 @@ test-actions:
   - brief: |
       Clean up all used resources.
     code: |
-      /* B2 */
+      /* PC2 */
       _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
 
+      WaitForExecutionStop( ctx->worker_a_id );
       DeleteTask( ctx->worker_a_id );
-      DeleteMutex( ctx->mutex_id );
+      DeleteMutex( ctx->mutex_a_id );
       WrapThreadQueueDestroy( &ctx->wrap );
-      RestoreRunnerPriority();
+    links: []
+  links: []
+- action-brief: |
+    Build a cyclic dependency graph using several worker threads and mutexes.
+    Use the mutexes and the worker to construct a thread queue deadlock which
+    is detected on one processor while it uses thread queue links inserted by
+    another processor.  The runner thread controls the test scenario via the
+    two thread queue locks.  This is an important test scenario which shows why
+    the thread queue implementation is a bit more complicated in SMP
+    configurations.
+  action-code: |
+    Thread_queue_Queue *queue_b;
+    Thread_queue_Queue *queue_c;
+    ISR_lock_Context    lock_context;
+    SMP_barrier_State   state;
+
+    if ( rtems_scheduler_get_processor_maximum() <= 2 ) {
+      /*
+       * We can only run this validation test on systems with three or more
+       * processors.  The sequence under test can happen on systems with only two
+       * processors, however, we need a third processor to control the other two
+       * processors via ISR locks to get a deterministic test scenario.
+       */
+      return;
+    }
+
+    ctx->runner_id = rtems_task_self();
+
+    _SMP_barrier_Control_initialize( &ctx->barrier );
+    _SMP_barrier_State_initialize( &state );
+
+    ctx->mutex_a_id = CreateMutexNoProtocol();
+    ctx->mutex_b_id = CreateMutexNoProtocol();
+    ctx->mutex_c_id = CreateMutexNoProtocol();
+    ctx->mutex_d_id = CreateMutexNoProtocol();
+
+    queue_b = GetMutexThreadQueue( ctx->mutex_b_id );
+    queue_c = GetMutexThreadQueue( ctx->mutex_c_id );
+
+    ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
+    ctx->worker_b_id = CreateTask( "WRKB", PRIO_NORMAL );
+    ctx->worker_c_id = CreateTask( "WRKC", PRIO_NORMAL );
+    ctx->worker_d_id = CreateTask( "WRKD", PRIO_NORMAL );
+    ctx->worker_e_id = CreateTask( "WRKE", PRIO_NORMAL );
+
+    SetScheduler( ctx->worker_a_id, SCHEDULER_B_ID, PRIO_NORMAL );
+    SetScheduler( ctx->worker_b_id, SCHEDULER_B_ID, PRIO_HIGH );
+    SetScheduler( ctx->worker_c_id, SCHEDULER_B_ID, PRIO_HIGH );
+    SetScheduler( ctx->worker_d_id, SCHEDULER_B_ID, PRIO_HIGH );
+    SetScheduler( ctx->worker_e_id, SCHEDULER_C_ID, PRIO_NORMAL );
+  checks:
+  - brief: |
+      Let worker D wait for mutex A.  Let worker C wait for mutex D.  Let
+      worker B wait for mutex C.
+    code: |
+      StartTask( ctx->worker_a_id, DeadlockWorkerA, ctx );
+
+      /* D0 */
+      _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+      StartTask( ctx->worker_d_id, DeadlockWorkerD, ctx );
+      StartTask( ctx->worker_c_id, DeadlockWorkerC, ctx );
+      StartTask( ctx->worker_b_id, DeadlockWorkerB, ctx );
+      ReceiveAllEvents( RTEMS_EVENT_5 );
+      WaitForExecutionStop( ctx->worker_b_id );
+    links: []
+  - brief: |
+      Let worker A attempt to obtain mutex B.  Let worker A wait on the lock of
+      mutex C.  Worker A will insert two thread queue links.
+    code: |
+      _ISR_lock_ISR_disable( &lock_context );
+      _Thread_queue_Queue_acquire_critical(
+        queue_c,
+        &_Thread_Executing->Potpourri_stats,
+        &lock_context
+      );
+      _ISR_lock_ISR_enable( &lock_context );
+
+      /* D1 */
+      _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+      TicketLockWaitForOthers( &queue_c->Lock, 1 );
+    links: []
+  - brief: |
+      Let worker E try to obtain mutex D.  Worker E will add a thread queue
+      link which is later used by worker A to detect the deadlock.
+    code: |
+      StartTask( ctx->worker_e_id, DeadlockWorkerE, ctx );
+      TicketLockWaitForOthers( &queue_b->Lock, 1 );
+    links: []
+  - brief: |
+      Let worker A continue the obtain sequence.  It will detect a deadlock.
+    code: |
+      _ISR_lock_ISR_disable( &lock_context );
+      _Thread_queue_Queue_release( queue_c, &lock_context );
+    links:
+    - role: validation
+      uid: ../req/deadlock-concurrent
+  - brief: |
+      Clean up all used resources.
+    code: |
+      ReceiveAllEvents(
+        RTEMS_EVENT_0 | RTEMS_EVENT_1 | RTEMS_EVENT_2 | RTEMS_EVENT_3 |
+        RTEMS_EVENT_4
+      );
+      WaitForExecutionStop( ctx->worker_a_id );
+      WaitForExecutionStop( ctx->worker_b_id );
+      WaitForExecutionStop( ctx->worker_c_id );
+      WaitForExecutionStop( ctx->worker_d_id );
+      WaitForExecutionStop( ctx->worker_e_id );
+      DeleteTask( ctx->worker_a_id );
+      DeleteTask( ctx->worker_b_id );
+      DeleteTask( ctx->worker_c_id );
+      DeleteTask( ctx->worker_d_id );
+      DeleteTask( ctx->worker_e_id );
+      DeleteMutex( ctx->mutex_a_id );
+      DeleteMutex( ctx->mutex_b_id );
+      DeleteMutex( ctx->mutex_c_id );
+      DeleteMutex( ctx->mutex_d_id );
     links: []
   links: []
 test-brief: |
   Tests SMP-specific thread queue behaviour.
 test-context:
 - brief: |
+    This member contains the runner identifier.
+  description: null
+  member: |
+    rtems_id runner_id
+- brief: |
     This member contains the worker A identifier.
   description: null
   member: |
@@ -68,10 +191,40 @@ test-context:
   member: |
     rtems_id worker_b_id
 - brief: |
-    This member contains the mutex identifier.
+    This member contains the worker C identifier.
   description: null
   member: |
-    rtems_id mutex_id
+    rtems_id worker_c_id
+- brief: |
+    This member contains the worker D identifier.
+  description: null
+  member: |
+    rtems_id worker_d_id
+- brief: |
+    This member contains the worker E identifier.
+  description: null
+  member: |
+    rtems_id worker_e_id
+- brief: |
+    This member contains the mutex A identifier.
+  description: null
+  member: |
+    rtems_id mutex_a_id
+- brief: |
+    This member contains the mutex B identifier.
+  description: null
+  member: |
+    rtems_id mutex_b_id
+- brief: |
+    This member contains the mutex C identifier.
+  description: null
+  member: |
+    rtems_id mutex_c_id
+- brief: |
+    This member contains the mutex D identifier.
+  description: null
+  member: |
+    rtems_id mutex_d_id
 - brief: |
     This member contains the thread queue of the mutex.
   description: null
@@ -83,8 +236,7 @@ test-context:
   member: |
     WrapThreadQueueContext wrap
 - brief: |
-    This member contains the barrier to synchronize the runner, worker A, and
-    worker B.
+    This member contains the barrier to synchronize the runner and the workers.
   description: null
   member: |
     SMP_barrier_Control barrier
@@ -98,39 +250,32 @@ test-description: null
 test-header: null
 test-includes:
 - rtems/score/smpbarrier.h
-- rtems/score/threadq.h
+- rtems/score/threadqimpl.h
+- rtems/score/threadimpl.h
 test-local-includes:
 - tx-support.h
-test-setup: null
+test-setup:
+  brief: null
+  code: |
+    SetSelfPriority( PRIO_NORMAL );
+  description: null
 test-stop: null
 test-support: |
   typedef ${.:/test-context-type} Context;
 
   static void Extract( void *arg )
   {
-    Context     *ctx;
-    unsigned int ticket_0;
-    unsigned int ticket_1;
+    Context *ctx;
 
     ctx = arg;
 
-    ticket_0 = _Atomic_Load_uint(
-      &ctx->thread_queue->Lock.next_ticket,
-      ATOMIC_ORDER_RELAXED
-    );
-
-    /* B1 */
+    /* PC1 */
     _SMP_barrier_Wait( &ctx->barrier, &ctx->barrier_state, 2 );
 
     /*
      * Ensure that worker A acquired the thread wait lock of worker B.
      */
-    do {
-      ticket_1 = _Atomic_Load_uint(
-        &ctx->thread_queue->Lock.next_ticket,
-        ATOMIC_ORDER_RELAXED
-      );
-    } while ( ticket_0 == ticket_1 );
+    TicketLockWaitForOthers( &ctx->thread_queue->Lock, 1 );
 
     /*
      * Continue with the thread queue extraction.  The thread wait lock of
@@ -143,7 +288,7 @@ test-support: |
      */
   }
 
-  static void WorkerA( rtems_task_argument arg )
+  static void PriorityChangeWorkerA( rtems_task_argument arg )
   {
     Context          *ctx;
     SMP_barrier_State state;
@@ -151,31 +296,113 @@ test-support: |
     ctx = (Context *) arg;
     _SMP_barrier_State_initialize( &state );
 
-    ObtainMutex( ctx->mutex_id );
+    ObtainMutex( ctx->mutex_a_id );
 
-    /* B0 */
+    /* PC0 */
     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
 
-    /* B1 */
+    /* PC1 */
     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
 
     SetPriority( ctx->worker_b_id, PRIO_VERY_HIGH );
-    ReleaseMutex( ctx->mutex_id );
+    ReleaseMutex( ctx->mutex_a_id );
+
+    /* PC2 */
+    _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
+
+    (void) ReceiveAnyEvents();
+  }
+
+  static void PriorityChangeWorkerB( rtems_task_argument arg )
+  {
+    Context *ctx;
+
+    ctx = (Context *) arg;
+
+    ObtainMutex( ctx->mutex_a_id );
+  }
+
+  static void DeadlockWorkerA( rtems_task_argument arg )
+  {
+    Context          *ctx;
+    SMP_barrier_State state;
+
+    ctx = (Context *) arg;
+    _SMP_barrier_State_initialize( &state );
+
+    ObtainMutex( ctx->mutex_a_id );
+
+    /* D0 */
+    _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
 
-    /* B2 */
+    /* D1 */
     _SMP_barrier_Wait( &ctx->barrier, &state, 2 );
 
-    SuspendSelf();
+    ObtainMutexDeadlock( ctx->mutex_b_id );
+
+    ReleaseMutex( ctx->mutex_a_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_0 );
+    (void) ReceiveAnyEvents();
+  }
+
+  static void DeadlockWorkerB( rtems_task_argument arg )
+  {
+    Context *ctx;
+
+    ctx = (Context *) arg;
+
+    ObtainMutex( ctx->mutex_b_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_5 );
+    ObtainMutex( ctx->mutex_c_id );
+    ReleaseMutex( ctx->mutex_c_id );
+    ReleaseMutex( ctx->mutex_b_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_1 );
+    (void) ReceiveAnyEvents();
   }
 
-  static void WorkerB( rtems_task_argument arg )
+  static void DeadlockWorkerC( rtems_task_argument arg )
   {
     Context *ctx;
 
     ctx = (Context *) arg;
 
-    ObtainMutex( ctx->mutex_id );
+    ObtainMutex( ctx->mutex_c_id );
+    ObtainMutex( ctx->mutex_d_id );
+    ReleaseMutex( ctx->mutex_d_id );
+    ReleaseMutex( ctx->mutex_c_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_2 );
+    (void) ReceiveAnyEvents();
+  }
+
+  static void DeadlockWorkerD( rtems_task_argument arg )
+  {
+    Context *ctx;
+
+    ctx = (Context *) arg;
+
+    ObtainMutex( ctx->mutex_d_id );
+    ObtainMutex( ctx->mutex_a_id );
+    ReleaseMutex( ctx->mutex_a_id );
+    ReleaseMutex( ctx->mutex_d_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_3 );
+    (void) ReceiveAnyEvents();
+  }
+
+  static void DeadlockWorkerE( rtems_task_argument arg )
+  {
+    Context *ctx;
+
+    ctx = (Context *) arg;
+
+    ObtainMutex( ctx->mutex_d_id );
+    ReleaseMutex( ctx->mutex_d_id );
+    SendEvents( ctx->runner_id, RTEMS_EVENT_4 );
+    (void) ReceiveAnyEvents();
   }
 test-target: testsuites/validation/tc-score-tq-smp.c
-test-teardown: null
+test-teardown:
+  brief: null
+  code: |
+    RestoreRunnerPriority();
+  description: null
 type: test-case



More information about the vc mailing list