[PATCH] smp: Fix scheduler helping protocol
Sebastian Huber
sebastian.huber at embedded-brains.de
Fri Nov 21 16:07:29 UTC 2014
From: Luca Bonato <lohathe at gmail.com>
New test case for smptests/smpmrsp01.
It possible that a state change from SCHEDULER_SMP_NODE_READY to
SCHEDULER_SMP_NODE_READY can happen.
---
cpukit/score/include/rtems/score/schedulerimpl.h | 28 ++--
.../score/include/rtems/score/schedulersmpimpl.h | 25 ++-
testsuites/smptests/smpmrsp01/init.c | 174 +++++++++++++++++++++
testsuites/smptests/smpmrsp01/smpmrsp01.scn | 1 +
4 files changed, 210 insertions(+), 18 deletions(-)
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 45a2f8d..b262b91 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -1081,6 +1081,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
*/
RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
Scheduler_Context *context,
+ Thread_Control *thread,
Scheduler_Node *node,
bool is_scheduled,
Scheduler_Get_idle_thread get_idle_thread
@@ -1088,25 +1089,24 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
{
bool block;
Thread_Control *old_user = _Scheduler_Node_get_user( node );
- Thread_Control *new_user;
+ Thread_Control *new_user = NULL;
_Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
- if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
- new_user = _Scheduler_Node_get_owner( node );
-
- _Assert( new_user != old_user );
- _Scheduler_Node_set_user( node, new_user );
- } else if (
- node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
- && is_scheduled
- ) {
- new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
- } else {
- new_user = NULL;
+ if ( is_scheduled ) {
+ if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+ new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+ } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
+ Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+ if ( thread == old_user && owner != old_user ) {
+ new_user = owner;
+ _Scheduler_Node_set_user( node, new_user );
+ }
+ }
}
- if ( new_user != NULL && is_scheduled ) {
+ if ( new_user != NULL ) {
Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
_Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 156307d..0ddfce0 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -793,13 +793,17 @@ static inline void _Scheduler_SMP_Block(
{
Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
- bool block = _Scheduler_Block_node(
+ bool block;
+
+ _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
+
+ block = _Scheduler_Block_node(
context,
+ thread,
&node->Base,
is_scheduled,
_Scheduler_SMP_Get_idle_thread
);
-
if ( block ) {
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
@@ -838,9 +842,22 @@ static inline Thread_Control *_Scheduler_SMP_Unblock(
Thread_Control *needs_help;
if ( unblock ) {
- _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+ if ( node->state != SCHEDULER_SMP_NODE_READY ) {
+ _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+
+ needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+ } else {
+ _Assert( node->state == SCHEDULER_SMP_NODE_READY );
+ _Assert( node->Base.idle == NULL );
- needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+ if ( node->Base.accepts_help == thread ) {
+ _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER );
+ needs_help = thread;
+ } else {
+ _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+ needs_help = NULL;
+ }
+ }
} else {
needs_help = NULL;
}
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
index bfa5d98..f6a98e2 100644
--- a/testsuites/smptests/smpmrsp01/init.c
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -54,6 +54,7 @@ typedef struct {
typedef struct {
rtems_id main_task_id;
rtems_id migration_task_id;
+ rtems_id high_task_id;
rtems_id counting_sem_id;
rtems_id mrsp_ids[MRSP_COUNT];
rtems_id scheduler_ids[CPU_COUNT];
@@ -66,6 +67,7 @@ typedef struct {
SMP_lock_Control switch_lock;
size_t switch_index;
switch_event switch_events[32];
+ volatile bool run;
} test_context;
static test_context test_instance = {
@@ -728,6 +730,177 @@ static void run_task(rtems_task_argument arg)
}
}
+static void ready_unlock_worker(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ assert_prio(RTEMS_SELF, 4);
+
+ /* Obtain (F) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 4);
+
+ /* Done (G) */
+ barrier(ctx, &barrier_state);
+
+ rtems_task_suspend(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void ready_unblock_timer(rtems_id timer_id, void *arg)
+{
+ test_context *ctx = arg;
+ rtems_status_code sc;
+
+ sc = rtems_task_start(
+ ctx->high_task_id,
+ run_task,
+ (rtems_task_argument) &ctx->run
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /*
+ * At this point the scheduler node of the main thread is in the
+ * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
+ * performed.
+ */
+ sc = rtems_event_transient_send(ctx->main_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_ready_unblock(test_context *ctx)
+{
+ rtems_status_code sc;
+ rtems_id timer_id;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ puts("test MrsP ready unblock");
+
+ ctx->run = false;
+
+ change_prio(RTEMS_SELF, 4);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name(' ', ' ', ' ', 'A'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 3,
+ &ctx->mrsp_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 4);
+
+ sc = rtems_task_create(
+ rtems_build_name('H', 'I', 'G', 'H'),
+ 2,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->high_task_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
+ rtems_build_name('W', 'O', 'R', 'K'),
+ 4,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_timer_create(rtems_build_name('T', 'I', 'M', 'R'), &timer_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 3);
+
+ sc = rtems_timer_fire_after(timer_id, 2, ready_unblock_timer, ctx);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(!ctx->run);
+
+ sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Worker obtain (F) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_resume(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ while (!ctx->run) {
+ /* Do noting */
+ }
+
+ sc = rtems_task_resume(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_suspend(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 4);
+
+ /* Worker done (F) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_timer_delete(timer_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_delete(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_delete(ctx->high_task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ change_prio(RTEMS_SELF, 2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
{
rtems_status_code sc;
@@ -1232,6 +1405,7 @@ static void Init(rtems_task_argument arg)
test_mrsp_unlock_order_error();
test_mrsp_deadlock_error(ctx);
test_mrsp_multiple_obtain();
+ test_mrsp_ready_unblock(ctx);
test_mrsp_obtain_and_sleep_and_release(ctx);
test_mrsp_obtain_and_release_with_help(ctx);
test_mrsp_obtain_and_release(ctx);
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
index 3315db1..2e1f32c 100644
--- a/testsuites/smptests/smpmrsp01/smpmrsp01.scn
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -5,6 +5,7 @@ test MrsP nested obtain error
test MrsP unlock order error
test MrsP deadlock error
test MrsP multiple obtain
+test MrsP ready unblock
test MrsP obtain and sleep and release
[0] MAIN -> RUN (prio 2, node RUN)
[0] RUN -> MAIN (prio 1, node MAIN)
--
1.8.4.5
More information about the devel
mailing list