[rtems-central commit] spec: Specify thread pinning
Sebastian Huber
sebh at rtems.org
Mon Sep 6 12:20:30 UTC 2021
Module: rtems-central
Branch: master
Commit: e7e15c38e902774d00ff5f58d0b3ab156f0114e5
Changeset: http://git.rtems.org/rtems-central/commit/?id=e7e15c38e902774d00ff5f58d0b3ab156f0114e5
Author: Sebastian Huber <sebastian.huber at embedded-brains.de>
Date: Fri Sep 3 17:36:44 2021 +0200
spec: Specify thread pinning
---
spec/score/thread/req/pinning-helping.yml | 16 ++
spec/score/thread/req/pinning-nested.yml | 14 ++
spec/score/thread/req/pinning-preemptible.yml | 15 ++
spec/score/thread/req/pinning-unpin-suspended.yml | 17 ++
spec/score/thread/req/pinning.yml | 19 ++
spec/score/thread/val/smp.yml | 245 ++++++++++++++++++++++
6 files changed, 326 insertions(+)
diff --git a/spec/score/thread/req/pinning-helping.yml b/spec/score/thread/req/pinning-helping.yml
new file mode 100644
index 0000000..8f5a160
--- /dev/null
+++ b/spec/score/thread/req/pinning-helping.yml
@@ -0,0 +1,16 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+ uid: pinning
+functional-type: function
+rationale: null
+references: []
+requirement-type: functional
+text: |
+ While a thread is pinned, while the thread executes on a processor which is
+ not owned by its ${/glossary/scheduler-home:/term}, the thread shall only use
+ the ${/glossary/scheduler:/term} of the processor to which it is pinned.
+type: requirement
diff --git a/spec/score/thread/req/pinning-nested.yml b/spec/score/thread/req/pinning-nested.yml
new file mode 100644
index 0000000..8a6baa4
--- /dev/null
+++ b/spec/score/thread/req/pinning-nested.yml
@@ -0,0 +1,14 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+ uid: pinning
+functional-type: function
+rationale: null
+references: []
+requirement-type: functional
+text: |
+ A thread may be pinned more than once at a time.
+type: requirement
diff --git a/spec/score/thread/req/pinning-preemptible.yml b/spec/score/thread/req/pinning-preemptible.yml
new file mode 100644
index 0000000..8fbea97
--- /dev/null
+++ b/spec/score/thread/req/pinning-preemptible.yml
@@ -0,0 +1,15 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+ uid: pinning
+functional-type: function
+rationale: null
+references: []
+requirement-type: functional
+text: |
+ While a thread is pinned, the thread may be preempted by a
+ ${/glossary/scheduler:/term}.
+type: requirement
diff --git a/spec/score/thread/req/pinning-unpin-suspended.yml b/spec/score/thread/req/pinning-unpin-suspended.yml
new file mode 100644
index 0000000..dc9f47b
--- /dev/null
+++ b/spec/score/thread/req/pinning-unpin-suspended.yml
@@ -0,0 +1,17 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+ uid: pinning
+functional-type: function
+rationale: |
+ Unpinning the thread is done with thread dispatching disabled. Other
+ processors or interrupts may suspend the thread while it runs with thread
+ dispatching disabled.
+references: []
+requirement-type: functional
+text: |
+ While a thread is suspended, the thread may be unpinned.
+type: requirement
diff --git a/spec/score/thread/req/pinning.yml b/spec/score/thread/req/pinning.yml
new file mode 100644
index 0000000..a465ad3
--- /dev/null
+++ b/spec/score/thread/req/pinning.yml
@@ -0,0 +1,19 @@
+SPDX-License-Identifier: CC-BY-SA-4.0
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links:
+- role: requirement-refinement
+ uid: ../if/group
+non-functional-type: design
+rationale: |
+ The pinning of threads to a processor is an important feature to support
+ dynamically allocated lock-free data structures. It allows efficient and
+ safe access to processor-specific data structures. It is used for example to
+ implement the Epoch Based Reclamation in libbsd. The libbsd is a port of
+ FreeBSD kernel modules to RTEMS.
+references: []
+requirement-type: non-functional
+text: |
+ Pinning of threads to a processor shall be supported.
+type: requirement
diff --git a/spec/score/thread/val/smp.yml b/spec/score/thread/val/smp.yml
new file mode 100644
index 0000000..61ad017
--- /dev/null
+++ b/spec/score/thread/val/smp.yml
@@ -0,0 +1,245 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+copyrights:
+- Copyright (C) 2021 embedded brains GmbH (http://www.embedded-brains.de)
+enabled-by: RTEMS_SMP
+links: []
+test-actions:
+- action-brief: |
+ Create three worker threads and a mutex. Use the mutex and the worker to
+ move to a helping scheduler.
+ action-code: |
+ rtems_status_code sc;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+
+ executing = _Thread_Get_executing();
+ SetSelfPriority( PRIO_NORMAL );
+ ctx->counter = 0;
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_A_NAME, &ctx->scheduler_a_id );
+ T_rsc_success( sc );
+
+ sc = rtems_scheduler_ident( TEST_SCHEDULER_B_NAME, &ctx->scheduler_b_id );
+ T_rsc_success( sc );
+
+ ctx->mutex_id = CreateMutex();
+
+ ctx->worker_a_id = CreateTask( "WRKA", PRIO_NORMAL );
+ SetScheduler( ctx->worker_a_id, ctx->scheduler_b_id, PRIO_NORMAL );
+ StartTask( ctx->worker_a_id, WorkerTask, ctx );
+
+ ctx->worker_b_id = CreateTask( "WRKB", PRIO_HIGH );
+ StartTask( ctx->worker_b_id, WorkerTask, ctx );
+
+ ctx->worker_c_id = CreateTask( "WRKC", PRIO_LOW );
+ StartTask( ctx->worker_c_id, WorkerTask, ctx );
+
+ ObtainMutex( ctx->mutex_id );
+ SendEvents( ctx->worker_a_id, EVENT_OBTAIN | EVENT_RELEASE );
+
+ ctx->busy = true;
+ SendEvents( ctx->worker_b_id, EVENT_BUSY );
+ checks:
+ - brief: |
+ Pin the runner thread while it executes on a processor owned by a
+ helping scheduler.
+ code: |
+ T_eq_u32( rtems_scheduler_get_processor(), 1 );
+ _Thread_Pin( executing );
+ links:
+ - role: validation
+ uid: ../req/pinning-helping
+ - brief: |
+ Pin and unpin the runner thread. This is a nested operation.
+ code: |
+ T_eq_u32( rtems_scheduler_get_processor(), 1 );
+ _Thread_Pin( executing );
+ _Thread_Unpin( executing, _Per_CPU_Get_snapshot() );
+ links:
+ - role: validation
+ uid: ../req/pinning-nested
+ - brief: |
+ Preempt the pinned runner thread. Worker B and C execute at the same
+ time on processor 0 and 1 respectively for some point in time. This
+ shows that the pinning of the runner thread is maintained.
+ code: |
+ ctx->busy = false;
+ SetScheduler( ctx->worker_b_id, ctx->scheduler_b_id, PRIO_HIGH );
+ SendEvents( ctx->worker_b_id, EVENT_LET_WORKER_C_COUNT );
+
+ T_eq_u32( rtems_scheduler_get_processor(), 1 );
+ T_eq_u32( ctx->counter, 1 );
+ links:
+ - role: validation
+ uid: ../req/pinning-preemptible
+ - brief: |
+ Unpin the runner thread. The runner moves back to its home scheduler.
+ code: |
+ cpu_self = _Thread_Dispatch_disable();
+ _Thread_Unpin( executing, cpu_self );
+ _Thread_Dispatch_direct( cpu_self );
+
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+ links:
+ - role: validation
+ uid: ../req/pinning-nested
+ - brief: |
+ Release the mutex.
+ code: |
+ ReleaseMutex( ctx->mutex_id);
+ T_eq_u32( rtems_scheduler_get_processor(), 0 );
+ links: []
+ - brief: |
+ Pin the runner thread. Unpin the runner thread while it is suspended.
+ code: |
+ _Thread_Pin( executing );
+
+ cpu_self = _Thread_Dispatch_disable();
+ CallWithinISR( Suspend, executing );
+ _Thread_Unpin( executing, cpu_self );
+ CallWithinISR( Resume, executing );
+ _Thread_Dispatch_direct( cpu_self );
+ links:
+ - role: validation
+ uid: ../req/pinning-unpin-suspended
+ - brief: |
+ Make sure the worker released the mutex.
+ code: |
+ SetSelfScheduler( ctx->scheduler_b_id, PRIO_LOW );
+ SetSelfScheduler( ctx->scheduler_a_id, PRIO_NORMAL );
+ links: []
+ - brief: |
+ Clean up all used resources.
+ code: |
+ DeleteTask( ctx->worker_a_id );
+ DeleteTask( ctx->worker_b_id );
+ DeleteTask( ctx->worker_c_id );
+ DeleteMutex( ctx->mutex_id );
+ RestoreRunnerPriority();
+ links: []
+ links: []
+test-brief: |
+ Tests SMP-specific thread behaviour.
+test-context:
+- brief: |
+ This member contains the scheduler A identifier.
+ description: null
+ member: |
+ rtems_id scheduler_a_id
+- brief: |
+ This member contains the scheduler B identifier.
+ description: null
+ member: |
+ rtems_id scheduler_b_id
+- brief: |
+ This member contains the worker A identifier.
+ description: null
+ member: |
+ rtems_id worker_a_id
+- brief: |
+ This member contains the worker B identifier.
+ description: null
+ member: |
+ rtems_id worker_b_id
+- brief: |
+ This member contains the worker C identifier.
+ description: null
+ member: |
+ rtems_id worker_c_id
+- brief: |
+ This member contains the mutex identifier.
+ description: null
+ member: |
+ rtems_id mutex_id
+- brief: |
+ If this member is true, then the worker shall busy wait.
+ description: null
+ member: |
+ volatile bool busy
+- brief: |
+ This member contains a counter for EVENT_COUNT.
+ description: null
+ member: |
+ volatile uint32_t counter
+test-context-support: null
+test-description: null
+test-header: null
+test-includes:
+- rtems.h
+- rtems/score/threadimpl.h
+test-local-includes:
+- ts-config.h
+- tx-support.h
+test-setup: null
+test-stop: null
+test-support: |
+ typedef ${.:/test-context-type} Context;
+
+ typedef enum {
+ EVENT_OBTAIN = RTEMS_EVENT_0,
+ EVENT_RELEASE = RTEMS_EVENT_1,
+ EVENT_BUSY = RTEMS_EVENT_2,
+ EVENT_COUNT = RTEMS_EVENT_3,
+ EVENT_LET_WORKER_C_COUNT = RTEMS_EVENT_4
+ } Event;
+
+ static void WorkerTask( rtems_task_argument arg )
+ {
+ Context *ctx;
+
+ ctx = (Context *) arg;
+
+ while ( true ) {
+ rtems_event_set events;
+
+ events = ReceiveAnyEvents();
+
+ if ( ( events & EVENT_OBTAIN ) != 0 ) {
+ ObtainMutex( ctx->mutex_id );
+ }
+
+ if ( ( events & EVENT_RELEASE ) != 0 ) {
+ ReleaseMutex( ctx->mutex_id );
+ }
+
+ if ( ( events & EVENT_BUSY ) != 0 ) {
+ while ( ctx->busy ) {
+ /* Do nothing */
+ }
+ }
+
+ if ( ( events & EVENT_COUNT ) != 0 ) {
+ ++ctx->counter;
+ }
+
+ if ( ( events & EVENT_LET_WORKER_C_COUNT ) != 0 ) {
+ uint32_t counter;
+
+ counter = ctx->counter;
+ SendEvents( ctx->worker_c_id, EVENT_COUNT );
+
+ while ( ctx->counter == counter ) {
+ /* Wait */
+ }
+ }
+ }
+ }
+
+ static void Suspend( void *arg )
+ {
+ Thread_Control *thread;
+
+ thread = arg;
+ SuspendTask( thread->Object.id );
+ }
+
+ static void Resume( void *arg )
+ {
+ Thread_Control *thread;
+
+ thread = arg;
+ ResumeTask( thread->Object.id );
+ }
+test-target: testsuites/validation/tc-score-smp-thread.c
+test-teardown: null
+type: test-case
More information about the vc
mailing list