[PATCH 3/4] score: Implement SMP specific priority queue
Sebastian Huber
sebastian.huber at embedded-brains.de
Thu Sep 3 12:01:22 UTC 2015
---
cpukit/sapi/include/confdefs.h | 7 +-
cpukit/score/include/rtems/score/threadq.h | 44 +++-
cpukit/score/include/rtems/score/threadqimpl.h | 16 ++
cpukit/score/src/thread.c | 3 +-
cpukit/score/src/threadinitialize.c | 4 +-
cpukit/score/src/threadqops.c | 70 +++++-
doc/user/smp.t | 54 ++++
testsuites/smptests/Makefile.am | 1 +
testsuites/smptests/configure.ac | 1 +
testsuites/smptests/smpmutex01/Makefile.am | 19 ++
testsuites/smptests/smpmutex01/init.c | 326 +++++++++++++++++++++++++
testsuites/smptests/smpmutex01/smpmutex01.doc | 14 ++
testsuites/smptests/smpmutex01/smpmutex01.scn | 2 +
13 files changed, 545 insertions(+), 16 deletions(-)
create mode 100644 testsuites/smptests/smpmutex01/Makefile.am
create mode 100644 testsuites/smptests/smpmutex01/init.c
create mode 100644 testsuites/smptests/smpmutex01/smpmutex01.doc
create mode 100644 testsuites/smptests/smpmutex01/smpmutex01.scn
diff --git a/cpukit/sapi/include/confdefs.h b/cpukit/sapi/include/confdefs.h
index 4b438ff..66c8c7e 100644
--- a/cpukit/sapi/include/confdefs.h
+++ b/cpukit/sapi/include/confdefs.h
@@ -1008,9 +1008,10 @@ const rtems_libio_helper rtems_fs_init_helper =
CONFIGURE_SCHEDULER_CONTROLS
};
+ #define CONFIGURE_SCHEDULER_COUNT RTEMS_ARRAY_SIZE( _Scheduler_Table )
+
#if defined(RTEMS_SMP)
- const size_t _Scheduler_Count =
- RTEMS_ARRAY_SIZE( _Scheduler_Table );
+ const size_t _Scheduler_Count = CONFIGURE_SCHEDULER_COUNT;
const Scheduler_Assignment _Scheduler_Assignments[] = {
#if defined(CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS)
@@ -2970,7 +2971,7 @@ const rtems_libio_helper rtems_fs_init_helper =
( \
_Configure_Object_RAM(_tasks, sizeof(Configuration_Thread_control)) \
+ _Configure_From_workspace(_Configure_Max_Objects(_tasks) \
- * sizeof(Thread_queue_Heads)) \
+ * THREAD_QUEUE_HEADS_SIZE(CONFIGURE_SCHEDULER_COUNT)) \
+ _Configure_Max_Objects(_number_FP_tasks) \
* _Configure_From_workspace(CONTEXT_FP_SIZE) \
)
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 2b58310..a877c4e 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -42,6 +42,26 @@ extern "C" {
typedef struct _Thread_Control Thread_Control;
/**
+ * @brief Thread priority queue.
+ */
+typedef struct {
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Node to enqueue this queue in the FIFO chain of the corresponding
+ * heads structure.
+ *
+ * @see Thread_queue_Heads::Heads::Fifo.
+ */
+ Chain_Node Node;
+#endif
+
+ /**
+ * @brief The actual thread priority queue.
+ */
+ RBTree_Control Queue;
+} Thread_queue_Priority_queue;
+
+/**
* @brief Thread queue heads.
*
* Each thread is equipped with spare thread queue heads in case it not
@@ -61,13 +81,19 @@ typedef struct _Thread_queue_Heads {
union {
/**
* @brief This is the FIFO discipline list.
+ *
+ * On SMP configurations this FIFO is used to enqueue the per scheduler
+ * instance priority queues of this structure. This ensures FIFO fairness
+ * among the highest priority thread of each scheduler instance.
*/
Chain_Control Fifo;
+#if !defined(RTEMS_SMP)
/**
* @brief This is the set of threads for priority discipline waiting.
*/
- RBTree_Control Priority;
+ Thread_queue_Priority_queue Priority;
+#endif
} Heads;
/**
@@ -81,8 +107,24 @@ typedef struct _Thread_queue_Heads {
* the thread queue heads dedicated to the thread queue of an object.
*/
Chain_Node Free_node;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief One priority queue per scheduler instance.
+ */
+ Thread_queue_Priority_queue Priority[ RTEMS_ZERO_LENGTH_ARRAY ];
+#endif
} Thread_queue_Heads;
+#if defined(RTEMS_SMP)
+ #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \
+ ( sizeof( Thread_queue_Heads ) \
+ + ( scheduler_count ) * sizeof( Thread_queue_Priority_queue ) )
+#else
+ #define THREAD_QUEUE_HEADS_SIZE( scheduler_count ) \
+ sizeof( Thread_queue_Heads )
+#endif
+
typedef struct {
/**
* @brief The thread queue heads.
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 3828f41..bf01eb7 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -22,6 +22,7 @@
#include <rtems/score/threadq.h>
#include <rtems/score/chainimpl.h>
#include <rtems/score/rbtreeimpl.h>
+#include <rtems/score/scheduler.h>
#include <rtems/score/thread.h>
#ifdef __cplusplus
@@ -51,6 +52,21 @@ typedef struct {
#endif
} Thread_queue_Syslock_queue;
+RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
+ Thread_queue_Heads *heads
+)
+{
+#if defined(RTEMS_SMP)
+ size_t i;
+
+ for ( i = 0; i < _Scheduler_Count; ++i ) {
+ _RBTree_Initialize_empty( &heads->Priority[ i ].Queue );
+ }
+#endif
+
+ _Chain_Initialize_empty( &heads->Free_chain );
+}
+
RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize(
Thread_queue_Queue *queue
)
diff --git a/cpukit/score/src/thread.c b/cpukit/score/src/thread.c
index e1d6d5c..1ad7a59 100644
--- a/cpukit/score/src/thread.c
+++ b/cpukit/score/src/thread.c
@@ -20,6 +20,7 @@
#include <rtems/score/threadimpl.h>
#include <rtems/score/interr.h>
+#include <rtems/score/scheduler.h>
#include <rtems/score/wkspace.h>
#define THREAD_OFFSET_ASSERT( field ) \
@@ -73,7 +74,7 @@ void _Thread_Initialize_information(
&information->Free_thread_queue_heads,
_Workspace_Allocate_or_fatal_error,
_Objects_Maximum_per_allocation( maximum ),
- sizeof( Thread_queue_Heads )
+ THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count )
);
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index 9a796e9..bdb4370 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -142,12 +142,12 @@ bool _Thread_Initialize(
&information->Free_thread_queue_heads,
_Workspace_Allocate,
_Objects_Extend_size( &information->Objects ),
- sizeof( *the_thread->Wait.spare_heads )
+ THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count )
);
if ( the_thread->Wait.spare_heads == NULL ) {
goto failed;
}
- _Chain_Initialize_empty( &the_thread->Wait.spare_heads->Free_chain );
+ _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads );
/*
* Initialize the thread timer
diff --git a/cpukit/score/src/threadqops.c b/cpukit/score/src/threadqops.c
index d9dc944..07473f5 100644
--- a/cpukit/score/src/threadqops.c
+++ b/cpukit/score/src/threadqops.c
@@ -20,6 +20,7 @@
#include <rtems/score/assert.h>
#include <rtems/score/chainimpl.h>
#include <rtems/score/rbtreeimpl.h>
+#include <rtems/score/schedulerimpl.h>
static void _Thread_queue_Do_nothing_priority_change(
Thread_Control *the_thread,
@@ -150,22 +151,41 @@ static Thread_Control *_Thread_queue_FIFO_first(
return THREAD_CHAIN_NODE_TO_THREAD( first );
}
+static Thread_queue_Priority_queue *_Thread_queue_Priority_queue(
+ Thread_queue_Heads *heads,
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ return &heads->Priority[
+ _Scheduler_Get_index( _Scheduler_Get_own( the_thread ) )
+ ];
+#else
+ (void) the_thread;
+
+ return &heads->Heads.Priority;
+#endif
+}
+
static void _Thread_queue_Priority_priority_change(
Thread_Control *the_thread,
Priority_Control new_priority,
Thread_queue_Queue *queue
)
{
- Thread_queue_Heads *heads = queue->heads;
+ Thread_queue_Heads *heads = queue->heads;
+ Thread_queue_Priority_queue *priority_queue;
_Assert( heads != NULL );
+ priority_queue = _Thread_queue_Priority_queue( heads, the_thread );
+
_RBTree_Extract(
- &heads->Heads.Priority,
+ &priority_queue->Queue,
&the_thread->Wait.Node.RBTree
);
_RBTree_Insert(
- &heads->Heads.Priority,
+ &priority_queue->Queue,
&the_thread->Wait.Node.RBTree,
_Thread_queue_Compare_priority,
false
@@ -176,7 +196,11 @@ static void _Thread_queue_Priority_do_initialize(
Thread_queue_Heads *heads
)
{
+#if defined(RTEMS_SMP)
+ _Chain_Initialize_empty( &heads->Heads.Fifo );
+#else
_RBTree_Initialize_empty( &heads->Heads.Priority );
+#endif
}
static void _Thread_queue_Priority_do_enqueue(
@@ -184,8 +208,17 @@ static void _Thread_queue_Priority_do_enqueue(
Thread_Control *the_thread
)
{
+ Thread_queue_Priority_queue *priority_queue =
+ _Thread_queue_Priority_queue( heads, the_thread );
+
+#if defined(RTEMS_SMP)
+ if ( _RBTree_Is_empty( &priority_queue->Queue ) ) {
+ _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ }
+#endif
+
_RBTree_Insert(
- &heads->Heads.Priority,
+ &priority_queue->Queue,
&the_thread->Wait.Node.RBTree,
_Thread_queue_Compare_priority,
false
@@ -197,10 +230,21 @@ static void _Thread_queue_Priority_do_extract(
Thread_Control *the_thread
)
{
+ Thread_queue_Priority_queue *priority_queue =
+ _Thread_queue_Priority_queue( heads, the_thread );
+
_RBTree_Extract(
- &heads->Heads.Priority,
+ &priority_queue->Queue,
&the_thread->Wait.Node.RBTree
);
+
+#if defined(RTEMS_SMP)
+ _Chain_Extract_unprotected( &priority_queue->Node );
+
+ if ( !_RBTree_Is_empty( &priority_queue->Queue ) ) {
+ _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
+ }
+#endif
}
static void _Thread_queue_Priority_enqueue(
@@ -232,11 +276,19 @@ static Thread_Control *_Thread_queue_Priority_first(
Thread_queue_Heads *heads
)
{
- RBTree_Control *priority_queue = &heads->Heads.Priority;
- RBTree_Node *first;
+ Thread_queue_Priority_queue *priority_queue;
+ RBTree_Node *first;
+
+#if defined(RTEMS_SMP)
+ _Assert( !_Chain_Is_empty( &heads->Heads.Fifo ) );
+ priority_queue = (Thread_queue_Priority_queue *)
+ _Chain_First( &heads->Heads.Fifo );
+#else
+ priority_queue = &heads->Heads.Priority;
+#endif
- _Assert( !_RBTree_Is_empty( priority_queue ) );
- first = _RBTree_Minimum( priority_queue );
+ _Assert( !_RBTree_Is_empty( &priority_queue->Queue ) );
+ first = _RBTree_Minimum( &priority_queue->Queue );
return THREAD_RBTREE_NODE_TO_THREAD( first );
}
diff --git a/doc/user/smp.t b/doc/user/smp.t
index ba700e0..53fd7b64 100644
--- a/doc/user/smp.t
+++ b/doc/user/smp.t
@@ -185,6 +185,60 @@ To set the scheduler of a task see @ref{Symmetric Multiprocessing Services
SCHEDULER_IDENT - Get ID of a scheduler} and @ref{Symmetric Multiprocessing
Services TASK_SET_SCHEDULER - Set scheduler of a task}.
+ at subsection Task Priority Queues
+
+Due to the support for clustered/partitioned scheduling the task priority
+queues need special attention. It makes no sense to compare the priority
+values of two different scheduler instances. Thus, it is not possible to
+simply use one plain priority queue for tasks of different scheduler instances.
+
+One solution to this problem is to use two levels of queues. The top level
+queue provides FIFO ordering and contains priority queues. Each priority queue
+is associated with a scheduler instance and contains only tasks of this
+scheduler instance. Tasks are enqueued in the priority queue corresponding to
+their scheduler instance. In case this priority queue was empty, then it is
+appended to the FIFO. To dequeue a task the highest priority task of the first
+priority queue in the FIFO is selected. Then the first priority queue is
+removed from the FIFO. In case the previously first priority queue is not
+empty, then it is appended to the FIFO. So there is FIFO fairness with respect
+to the highest priority task of each scheduler instances. See also @cite{
+Brandenburg, Björn B.: A fully preemptive multiprocessor semaphore protocol for
+latency-sensitive real-time applications. In Proceedings of the 25th Euromicro
+Conference on Real-Time Systems (ECRTS 2013), pages 292â302, 2013.
+ at uref{http://www.mpi-sws.org/~bbb/papers/pdf/ecrts13b.pdf}}.
+
+Such a two level queue may need a considerable amount of memory if fast enqueue
+and dequeue operations are desired (depends on the scheduler instance count).
+To mitigate this problem an approch of the FreeBSD kernel was implemented in
+RTEMS. We have the invariant that a task can be enqueued on at most one task
+queue. Thus, we need only as many queues as we have tasks. Each task is
+equipped with spare task queue which it can give to an object on demand. The
+task queue uses a dedicated memory space independent of the other memory used
+for the task itself. In case a task needs to block, then there are two options
+
+ at itemize @bullet
+ at item the object already has task queue, then the task enqueues itself to this
+already present queue and the spare task queue of the task is added to a list
+of free queues for this object, or
+ at item otherwise, then the queue of the task is given to the object and the task
+enqueues itself to this queue.
+ at end itemize
+
+In case the task is dequeued, then there are two options
+
+ at itemize @bullet
+ at item the task is the last task on the queue, then it removes this queue from
+the object and reclaims it for its own purpose, or
+ at item otherwise, then the task removes one queue from the free list of the
+object and reclaims it for its own purpose.
+ at end itemize
+
+Since there are usually more objects than tasks, this actually reduces the
+memory demands. In addition the objects contain only a pointer to the task
+queue structure. This helps to hide implementation details and makes it
+possible to use self-contained synchronization objects in Newlib and GCC (C++
+and OpenMP run-time support).
+
@subsection Scheduler Helping Protocol
The scheduler provides a helping protocol to support locking protocols like
diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index 4b81a20..92f4528 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -26,6 +26,7 @@ SUBDIRS += smplock01
SUBDIRS += smpmigration01
SUBDIRS += smpmigration02
SUBDIRS += smpmrsp01
+SUBDIRS += smpmutex01
SUBDIRS += smpschedaffinity01
SUBDIRS += smpschedaffinity02
SUBDIRS += smpschedaffinity03
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index 5aee6ec..27e8f9c 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -81,6 +81,7 @@ smplock01/Makefile
smpmigration01/Makefile
smpmigration02/Makefile
smpmrsp01/Makefile
+smpmutex01/Makefile
smppsxaffinity01/Makefile
smppsxaffinity02/Makefile
smppsxsignal01/Makefile
diff --git a/testsuites/smptests/smpmutex01/Makefile.am b/testsuites/smptests/smpmutex01/Makefile.am
new file mode 100644
index 0000000..1b9e01c
--- /dev/null
+++ b/testsuites/smptests/smpmutex01/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpmutex01
+smpmutex01_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpmutex01.scn smpmutex01.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpmutex01_OBJECTS)
+LINK_LIBS = $(smpmutex01_LDLIBS)
+
+smpmutex01$(EXEEXT): $(smpmutex01_OBJECTS) $(smpmutex01_DEPENDENCIES)
+ @rm -f smpmutex01$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpmutex01/init.c b/testsuites/smptests/smpmutex01/init.c
new file mode 100644
index 0000000..1b2a189
--- /dev/null
+++ b/testsuites/smptests/smpmutex01/init.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SMPMUTEX 1";
+
+#define SCHED_A rtems_build_name(' ', ' ', ' ', 'A')
+
+#define SCHED_B rtems_build_name(' ', ' ', ' ', 'B')
+
+#define PART_COUNT 2
+
+#define TASK_COUNT 8
+
+typedef enum {
+ REQ_WAKE_UP_MASTER = RTEMS_EVENT_0,
+ REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
+ REQ_MTX_OBTAIN = RTEMS_EVENT_2,
+ REQ_MTX_RELEASE = RTEMS_EVENT_3
+} request_id;
+
+typedef enum {
+ A_1,
+ A_2_0,
+ A_2_1,
+ M,
+ B_4,
+ B_5_0,
+ B_5_1,
+ H,
+ NONE
+} task_id;
+
+typedef struct {
+ rtems_id mtx;
+ rtems_id tasks[TASK_COUNT];
+ int generation[TASK_COUNT];
+ int expected_generation[TASK_COUNT];
+} test_context;
+
+static test_context test_instance;
+
+static void start_task(
+ test_context *ctx,
+ task_id id,
+ rtems_task_entry entry,
+ rtems_task_priority prio,
+ rtems_name scheduler
+)
+{
+ rtems_status_code sc;
+ rtems_id scheduler_id;
+
+ sc = rtems_task_create(
+ rtems_build_name('T', 'A', 'S', 'K'),
+ prio,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->tasks[id]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_scheduler_ident(scheduler, &scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->tasks[id], scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(ctx->tasks[id], entry, id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void send_event(test_context *ctx, task_id id, rtems_event_set events)
+{
+ rtems_status_code sc;
+
+ sc = rtems_event_send(ctx->tasks[id], events);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static rtems_event_set wait_for_events(void)
+{
+ rtems_event_set events;
+ rtems_status_code sc;
+
+ sc = rtems_event_receive(
+ RTEMS_ALL_EVENTS,
+ RTEMS_EVENT_ANY | RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT,
+ &events
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ return events;
+}
+
+static void sync_with_helper(test_context *ctx)
+{
+ rtems_event_set events;
+
+ send_event(ctx, H, REQ_WAKE_UP_HELPER);
+ events = wait_for_events();
+ rtems_test_assert(events == REQ_WAKE_UP_MASTER);
+}
+
+static void request(test_context *ctx, task_id id, request_id req)
+{
+ send_event(ctx, id, req);
+ sync_with_helper(ctx);
+}
+
+static void obtain(test_context *ctx)
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void release(test_context *ctx)
+{
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release(ctx->mtx);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void check_generations(test_context *ctx, task_id a, task_id b)
+{
+ size_t i;
+
+ if (a != NONE) {
+ ++ctx->expected_generation[a];
+ }
+
+ if (b != NONE) {
+ ++ctx->expected_generation[b];
+ }
+
+ for (i = 0; i < TASK_COUNT; ++i) {
+ rtems_test_assert(ctx->generation[i] == ctx->expected_generation[i]);
+ }
+}
+
+static void helper(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+
+ while (true) {
+ rtems_event_set events = wait_for_events();
+ rtems_test_assert(events == REQ_WAKE_UP_HELPER);
+ send_event(ctx, M, REQ_WAKE_UP_MASTER);
+ }
+}
+
+static void worker(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ task_id id = arg;
+
+ while (true) {
+ rtems_event_set events = wait_for_events();
+
+ if ((events & REQ_MTX_OBTAIN) != 0) {
+ obtain(ctx);
+ ++ctx->generation[id];
+ }
+
+ if ((events & REQ_MTX_RELEASE) != 0) {
+ release(ctx);
+ ++ctx->generation[id];
+ }
+ }
+}
+
+static void test(void)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+
+ ctx->tasks[M] = rtems_task_self();
+ start_task(ctx, A_1, worker, 1, SCHED_A);
+ start_task(ctx, A_2_0, worker, 2, SCHED_A);
+ start_task(ctx, A_2_1, worker, 2, SCHED_A);
+ start_task(ctx, B_4, worker, 4, SCHED_B);
+ start_task(ctx, B_5_0, worker, 5, SCHED_B);
+ start_task(ctx, B_5_1, worker, 5, SCHED_B);
+ start_task(ctx, H, helper, 6, SCHED_B);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name(' ', 'M', 'T', 'X'),
+ 1,
+ RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
+ 0,
+ &ctx->mtx
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ obtain(ctx);
+ request(ctx, A_1, REQ_MTX_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ release(ctx);
+ check_generations(ctx, A_1, NONE);
+ request(ctx, A_1, REQ_MTX_RELEASE);
+ check_generations(ctx, A_1, NONE);
+
+ obtain(ctx);
+ request(ctx, A_2_0, REQ_MTX_OBTAIN);
+ request(ctx, A_1, REQ_MTX_OBTAIN);
+ request(ctx, A_2_1, REQ_MTX_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ release(ctx);
+ check_generations(ctx, A_1, NONE);
+ request(ctx, A_1, REQ_MTX_RELEASE);
+ check_generations(ctx, A_1, A_2_0);
+ request(ctx, A_2_0, REQ_MTX_RELEASE);
+ check_generations(ctx, A_2_0, A_2_1);
+ request(ctx, A_2_1, REQ_MTX_RELEASE);
+ check_generations(ctx, A_2_1, NONE);
+
+ obtain(ctx);
+ request(ctx, B_5_0, REQ_MTX_OBTAIN);
+ request(ctx, B_4, REQ_MTX_OBTAIN);
+ request(ctx, B_5_1, REQ_MTX_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ release(ctx);
+ sync_with_helper(ctx);
+ check_generations(ctx, B_4, NONE);
+ request(ctx, B_4, REQ_MTX_RELEASE);
+ check_generations(ctx, B_4, B_5_0);
+ request(ctx, B_5_0, REQ_MTX_RELEASE);
+ check_generations(ctx, B_5_0, B_5_1);
+ request(ctx, B_5_1, REQ_MTX_RELEASE);
+ check_generations(ctx, B_5_1, NONE);
+
+ obtain(ctx);
+ request(ctx, A_2_0, REQ_MTX_OBTAIN);
+ request(ctx, B_5_0, REQ_MTX_OBTAIN);
+ request(ctx, B_5_1, REQ_MTX_OBTAIN);
+ request(ctx, B_4, REQ_MTX_OBTAIN);
+ request(ctx, A_2_1, REQ_MTX_OBTAIN);
+ request(ctx, A_1, REQ_MTX_OBTAIN);
+ check_generations(ctx, NONE, NONE);
+ release(ctx);
+ check_generations(ctx, A_1, NONE);
+ request(ctx, A_1, REQ_MTX_RELEASE);
+ check_generations(ctx, A_1, B_4);
+ request(ctx, B_4, REQ_MTX_RELEASE);
+ check_generations(ctx, B_4, A_2_0);
+ request(ctx, A_2_0, REQ_MTX_RELEASE);
+ check_generations(ctx, A_2_0, B_5_0);
+ request(ctx, B_5_0, REQ_MTX_RELEASE);
+ check_generations(ctx, B_5_0, A_2_1);
+ request(ctx, A_2_1, REQ_MTX_RELEASE);
+ check_generations(ctx, A_2_1, B_5_1);
+ request(ctx, B_5_1, REQ_MTX_RELEASE);
+ check_generations(ctx, B_5_1, NONE);
+}
+
+static void Init(rtems_task_argument arg)
+{
+ TEST_BEGIN();
+
+ if (rtems_get_processor_count() >= PART_COUNT) {
+ test();
+ }
+
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_USE_IMFS_AS_BASE_FILESYSTEM
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS PART_COUNT
+
+#define CONFIGURE_SCHEDULER_SIMPLE_SMP
+
+#include <rtems/scheduler.h>
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(a);
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
+
+#define CONFIGURE_SCHEDULER_CONTROLS \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(a, SCHED_A), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_B)
+
+#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
+ RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
+
+#define CONFIGURE_MAXIMUM_TASKS TASK_COUNT
+
+#define CONFIGURE_MAXIMUM_SEMAPHORES 1
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_INIT_TASK_PRIORITY 3
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpmutex01/smpmutex01.doc b/testsuites/smptests/smpmutex01/smpmutex01.doc
new file mode 100644
index 0000000..117d952
--- /dev/null
+++ b/testsuites/smptests/smpmutex01/smpmutex01.doc
@@ -0,0 +1,14 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpmutex01
+
+directives:
+
+ - _Thread_queue_Priority_do_enqueue()
+ - _Thread_queue_Priority_do_extract()
+ - _Thread_queue_Priority_first()
+
+concepts:
+
+ - Ensure that the thread queue priority discipline enforces FIFO fairness
+ among the highest priority thread of each scheduler instance.
diff --git a/testsuites/smptests/smpmutex01/smpmutex01.scn b/testsuites/smptests/smpmutex01/smpmutex01.scn
new file mode 100644
index 0000000..aad67a7
--- /dev/null
+++ b/testsuites/smptests/smpmutex01/smpmutex01.scn
@@ -0,0 +1,2 @@
+*** BEGIN OF TEST SMPMUTEX 1 ***
+*** END OF TEST SMPMUTEX 1 ***
--
1.8.4.5
More information about the devel
mailing list