[rtems-central commit] spec: Remove processor special case

Sebastian Huber sebh at rtems.org
Tue Nov 9 14:47:48 UTC 2021


Module:    rtems-central
Branch:    master
Commit:    d2c27b85d24a982059d937465a913a14b2c2f7dc
Changeset: http://git.rtems.org/rtems-central/commit/?id=d2c27b85d24a982059d937465a913a14b2c2f7dc

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Nov  8 09:58:04 2021 +0100

spec: Remove processor special case

---

 .../smp/req/remove-last-processor-helping.yml      |  17 ++
 spec/score/sched/smp/val/smp.yml                   | 227 +++++++++++++++++++++
 2 files changed, 244 insertions(+)

diff --git a/spec/score/sched/smp/req/remove-last-processor-helping.yml b/spec/score/sched/smp/req/remove-last-processor-helping.yml
new file mode 100644
index 0000000..db071e0
--- /dev/null
+++ b/spec/score/sched/smp/req/remove-last-processor-helping.yml
@@ -0,0 +1,17 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+  uid: group
+functional-type: function
+rationale: null
+references: []
+requirement-type: functional
+text: |
+  While the processor allocated to a thread is owned by a
+  ${/glossary/scheduler-helping:/term}, while the processor is the only
+  processor of the scheduler, when the processor is removed, the thread shall
+  be blocked with respect to the scheduler.
+type: requirement
diff --git a/spec/score/sched/smp/val/smp.yml b/spec/score/sched/smp/val/smp.yml
new file mode 100644
index 0000000..46bcb0f
--- /dev/null
+++ b/spec/score/sched/smp/val/smp.yml
@@ -0,0 +1,227 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links: []
+test-actions:
+- action-brief: |
+    Use the mutex and the worker to construct the removal of the last processor
+    of a scheduler while a thread is scheduled.
+  action-code: |
+    SMP_barrier_State barrier_state;
+
+    _SMP_barrier_Control_initialize( &ctx->barrier );
+    _SMP_barrier_State_initialize( &barrier_state );
+
+    SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_B_ID, PRIO_NORMAL );
+  checks:
+  - brief: |
+      Let worker B help worker A.
+    code: |
+      SendEvents( ctx->worker_id[ WORKER_A ], EVENT_OBTAIN );
+      SendAndSync( ctx, WORKER_B, EVENT_OBTAIN );
+    links: []
+  - brief: |
+      Restart the worker B to withdraw the help offer and wait on barriers.
+      Move worker B to scheduler A.  Remove the processor while worker A is
+      scheduled.
+    code: |
+      SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RESTART );
+
+      /* A */
+      _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+
+      SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+      RemoveProcessor( SCHEDULER_B_ID, 1 );
+
+      /* B */
+      _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+    links:
+    - role: validation
+      uid: ../req/remove-last-processor-helping
+  - brief: |
+      Clean up all used resources.
+    code: |
+      SetPriority( ctx->runner_id, PRIO_NORMAL );
+      AddProcessor( SCHEDULER_B_ID, 1 );
+      SendEvents( ctx->worker_id[ WORKER_A ], EVENT_RELEASE );
+      SetScheduler( ctx->worker_id[ WORKER_B ], SCHEDULER_A_ID, PRIO_HIGH );
+    links: []
+  links: []
+test-brief: |
+  Tests SMP-specific scheduler behaviour.
+test-context:
+- brief: |
+    This member contains the runner identifier.
+  description: null
+  member: |
+    rtems_id runner_id
+- brief: |
+    This member contains the worker identifiers.
+  description: null
+  member: |
+    rtems_id worker_id[ WORKER_COUNT ]
+- brief: |
+    This member contains the mutex identifier.
+  description: null
+  member: |
+    rtems_id mutex_id
+- brief: |
+    This member contains the worker busy status.
+  description: null
+  member: |
+    volatile bool busy[ WORKER_COUNT ];
+- brief: |
+    This member provides the context to wrap thread queue operations.
+  description: null
+  member: |
+    WrapThreadQueueContext wrap_tq_ctx
+- brief: |
+    This member contains the call within ISR request.
+  description: null
+  member: |
+    CallWithinISRRequest request;
+- brief: |
+    This member contains the barrier to synchronize the runner and the workers.
+  description: null
+  member: |
+    SMP_barrier_Control barrier
+test-context-support: |
+  typedef enum {
+    WORKER_A,
+    WORKER_B,
+    WORKER_COUNT
+  } WorkerIndex;
+test-description: null
+test-header: null
+test-includes:
+- rtems.h
+- rtems/test-scheduler.h
+- rtems/score/smpbarrier.h
+test-local-includes:
+- tx-support.h
+test-setup:
+  brief: null
+  code: |
+    ctx->runner_id = rtems_task_self();
+    ctx->mutex_id = CreateMutex();
+
+    SetSelfPriority( PRIO_NORMAL );
+
+    ctx->worker_id[ WORKER_A ] = CreateTask( "WRKA", PRIO_HIGH );
+    StartTask( ctx->worker_id[ WORKER_A ], WorkerA, ctx );
+
+    ctx->worker_id[ WORKER_B ] = CreateTask( "WRKB", PRIO_HIGH );
+    StartTask( ctx->worker_id[ WORKER_B ], WorkerB, ctx );
+
+    WrapThreadQueueInitialize( &ctx->wrap_tq_ctx, RequestISR, ctx );
+  description: null
+test-stop: null
+test-support: |
+  typedef ${.:/test-context-type} Context;
+
+  typedef enum {
+    EVENT_OBTAIN = RTEMS_EVENT_0,
+    EVENT_RELEASE = RTEMS_EVENT_1,
+    EVENT_SYNC_RUNNER = RTEMS_EVENT_2,
+    EVENT_RESTART = RTEMS_EVENT_3
+  } Event;
+
+  static void Barriers( void *arg )
+  {
+    Context          *ctx;
+    SMP_barrier_State barrier_state;
+
+    ctx = arg;
+    _SMP_barrier_State_initialize( &barrier_state );
+
+    /* A */
+    _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+
+    /* B */
+    _SMP_barrier_Wait( &ctx->barrier, &barrier_state, 2 );
+  }
+
+  static void RequestISR( void *arg )
+  {
+    Context *ctx;
+
+    ctx = arg;
+    ctx->request.handler = Barriers;
+    ctx->request.arg = ctx;
+    CallWithinISRSubmit( &ctx->request );
+  }
+
+  static void SendAndSync( Context *ctx, WorkerIndex worker, Event event )
+  {
+    SendEvents( ctx->worker_id[ worker ], EVENT_SYNC_RUNNER | event );
+    ReceiveAllEvents( EVENT_SYNC_RUNNER );
+    WaitForExecutionStop( ctx->worker_id[ worker ] );
+  }
+
+  static void Worker( rtems_task_argument arg, WorkerIndex worker )
+  {
+    Context *ctx;
+
+    ctx = (Context *) arg;
+
+    while ( true ) {
+      rtems_event_set events;
+
+      events = ReceiveAnyEvents();
+
+      if ( ( events & EVENT_SYNC_RUNNER ) != 0 ) {
+        SendEvents( ctx->runner_id, EVENT_SYNC_RUNNER );
+      }
+
+      if ( ( events & EVENT_OBTAIN ) != 0 ) {
+        ObtainMutex( ctx->mutex_id );
+      }
+
+      if ( ( events & EVENT_RELEASE ) != 0 ) {
+        ReleaseMutex( ctx->mutex_id );
+      }
+
+      if ( ( events & EVENT_RESTART ) != 0 ) {
+        rtems_status_code sc;
+
+        T_eq_u32( rtems_scheduler_get_processor(), 0 );
+        SetPriority( ctx->runner_id, PRIO_VERY_HIGH );
+        T_eq_u32( rtems_scheduler_get_processor(), 1 );
+
+        WrapThreadQueueExtract(
+          &ctx->wrap_tq_ctx,
+          GetThread( ctx->worker_id[ WORKER_B ] )
+        );
+
+        sc = rtems_task_restart(
+          ctx->worker_id[ WORKER_B ],
+          (rtems_task_argument) ctx
+          );
+        T_rsc_success( sc );
+
+        T_eq_u32( rtems_scheduler_get_processor(), 0 );
+      }
+    }
+  }
+
+  static void WorkerA( rtems_task_argument arg )
+  {
+    Worker( arg, WORKER_A );
+  }
+
+  static void WorkerB( rtems_task_argument arg )
+  {
+    Worker( arg, WORKER_B );
+  }
+test-target: testsuites/validation/tc-sched-smp.c
+test-teardown:
+  brief: null
+  code: |
+    DeleteTask( ctx->worker_id[ WORKER_A ] );
+    DeleteTask( ctx->worker_id[ WORKER_B ] );
+    DeleteMutex( ctx->mutex_id );
+    RestoreRunnerPriority();
+    WrapThreadQueueDestroy( &ctx->wrap_tq_ctx );
+  description: null
+type: test-case



More information about the vc mailing list