[rtems commit] smp: Fix scheduler helping protocol

Sebastian Huber sebh at rtems.org
Mon Nov 24 10:58:12 UTC 2014


Module:    rtems
Branch:    master
Commit:    cceb19f4e50d7b52e02b39141a433cab75b90423
Changeset: http://git.rtems.org/rtems/commit/?id=cceb19f4e50d7b52e02b39141a433cab75b90423

Author:    Luca Bonato <lohathe at gmail.com>
Date:      Fri Nov 21 11:01:34 2014 +0100

smp: Fix scheduler helping protocol

New test case for smptests/smpmrsp01.

Fix _Scheduler_Block_node() in case the node is in the
SCHEDULER_HELP_ACTIVE_RIVAL helping state.  For example a
rtems_task_suspend() on a task waiting for a MrsP semaphore.

Fix _Scheduler_Unblock_node() in case the node is in the
SCHEDULER_SMP_NODE_READY state.  For example a rtems_task_resume() on a
task owning or waiting for a MrsP semaphore.

---

 cpukit/score/include/rtems/score/schedulerimpl.h   |  28 +--
 .../score/include/rtems/score/schedulersmpimpl.h   |  25 ++-
 testsuites/smptests/smpmrsp01/init.c               | 190 +++++++++++++++++++++
 testsuites/smptests/smpmrsp01/smpmrsp01.scn        |   1 +
 4 files changed, 226 insertions(+), 18 deletions(-)

diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 45a2f8d..b262b91 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -1081,6 +1081,7 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
  */
 RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
   Scheduler_Context         *context,
+  Thread_Control            *thread,
   Scheduler_Node            *node,
   bool                       is_scheduled,
   Scheduler_Get_idle_thread  get_idle_thread
@@ -1088,25 +1089,24 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
 {
   bool block;
   Thread_Control *old_user = _Scheduler_Node_get_user( node );
-  Thread_Control *new_user;
+  Thread_Control *new_user = NULL;
 
   _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );
 
-  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
-    new_user = _Scheduler_Node_get_owner( node );
-
-    _Assert( new_user != old_user );
-    _Scheduler_Node_set_user( node, new_user );
-  } else if (
-    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
-      && is_scheduled
-  ) {
-    new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
-  } else {
-    new_user = NULL;
+  if ( is_scheduled ) {
+    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
+      new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+    } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
+      Thread_Control *owner = _Scheduler_Node_get_owner( node );
+
+      if ( thread == old_user && owner != old_user ) {
+        new_user = owner;
+        _Scheduler_Node_set_user( node, new_user );
+      }
+    }
   }
 
-  if ( new_user != NULL && is_scheduled ) {
+  if ( new_user != NULL ) {
     Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );
 
     _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 156307d..0ddfce0 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -793,13 +793,17 @@ static inline void _Scheduler_SMP_Block(
 {
   Scheduler_SMP_Node *node = _Scheduler_SMP_Thread_get_node( thread );
   bool is_scheduled = node->state == SCHEDULER_SMP_NODE_SCHEDULED;
-  bool block = _Scheduler_Block_node(
+  bool block;
+
+  _Assert( is_scheduled || node->state == SCHEDULER_SMP_NODE_READY );
+
+  block = _Scheduler_Block_node(
     context,
+    thread,
     &node->Base,
     is_scheduled,
     _Scheduler_SMP_Get_idle_thread
   );
-
   if ( block ) {
     _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
 
@@ -838,9 +842,22 @@ static inline Thread_Control *_Scheduler_SMP_Unblock(
   Thread_Control *needs_help;
 
   if ( unblock ) {
-    _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+    if ( node->state != SCHEDULER_SMP_NODE_READY ) {
+      _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
+
+      needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+    } else {
+      _Assert( node->state == SCHEDULER_SMP_NODE_READY );
+      _Assert( node->Base.idle == NULL );
 
-    needs_help = ( *enqueue_fifo )( context, &node->Base, thread );
+      if ( node->Base.accepts_help == thread ) {
+        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_OWNER );
+        needs_help = thread;
+      } else {
+        _Assert( node->Base.help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
+        needs_help = NULL;
+      }
+    }
   } else {
     needs_help = NULL;
   }
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
index bfa5d98..b672728 100644
--- a/testsuites/smptests/smpmrsp01/init.c
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -54,6 +54,8 @@ typedef struct {
 typedef struct {
   rtems_id main_task_id;
   rtems_id migration_task_id;
+  rtems_id high_task_id;
+  rtems_id timer_id;
   rtems_id counting_sem_id;
   rtems_id mrsp_ids[MRSP_COUNT];
   rtems_id scheduler_ids[CPU_COUNT];
@@ -66,6 +68,7 @@ typedef struct {
   SMP_lock_Control switch_lock;
   size_t switch_index;
   switch_event switch_events[32];
+  volatile bool run;
 } test_context;
 
 static test_context test_instance = {
@@ -728,6 +731,192 @@ static void run_task(rtems_task_argument arg)
   }
 }
 
+static void ready_unlock_worker(rtems_task_argument arg)
+{
+  test_context *ctx = &test_instance;
+  rtems_status_code sc;
+  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+  assert_prio(RTEMS_SELF, 4);
+
+  /* Obtain (F) */
+  barrier(ctx, &barrier_state);
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 4);
+
+  /* Done (G) */
+  barrier(ctx, &barrier_state);
+
+  rtems_task_suspend(RTEMS_SELF);
+  rtems_test_assert(0);
+}
+
+static void unblock_ready_timer(rtems_id timer_id, void *arg)
+{
+  test_context *ctx = arg;
+  rtems_status_code sc;
+
+  sc = rtems_task_start(
+    ctx->high_task_id,
+    run_task,
+    (rtems_task_argument) &ctx->run
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_suspend(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_resume(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /*
+   * At this point the scheduler node of the main thread is in the
+   * SCHEDULER_SMP_NODE_READY state and a _Scheduler_SMP_Unblock() operation is
+   * performed.
+   */
+  sc = rtems_event_transient_send(ctx->main_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_suspend(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void unblock_ready_owner(test_context *ctx)
+{
+  rtems_status_code sc;
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 3);
+
+  sc = rtems_timer_fire_after(ctx->timer_id, 2, unblock_ready_timer, ctx);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_test_assert(!ctx->run);
+}
+
+static void unblock_ready_rival(test_context *ctx)
+{
+  rtems_status_code sc;
+  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+  sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /* Worker obtain (F) */
+  barrier(ctx, &barrier_state);
+
+  sc = rtems_task_wake_after(2);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_suspend(ctx->worker_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_resume(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  while (!ctx->run) {
+    /* Do noting */
+  }
+
+  sc = rtems_task_resume(ctx->worker_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_suspend(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 4);
+
+  /* Worker done (F) */
+  barrier(ctx, &barrier_state);
+}
+
+static void test_mrsp_unblock_ready(test_context *ctx)
+{
+  rtems_status_code sc;
+
+  puts("test MrsP unblock ready");
+
+  ctx->run = false;
+
+  change_prio(RTEMS_SELF, 4);
+
+  sc = rtems_semaphore_create(
+    rtems_build_name(' ', ' ', ' ', 'A'),
+    1,
+    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+      | RTEMS_BINARY_SEMAPHORE,
+    3,
+    &ctx->mrsp_ids[0]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 4);
+
+  sc = rtems_task_create(
+    rtems_build_name('H', 'I', 'G', 'H'),
+    2,
+    RTEMS_MINIMUM_STACK_SIZE,
+    RTEMS_DEFAULT_MODES,
+    RTEMS_DEFAULT_ATTRIBUTES,
+    &ctx->high_task_id
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_create(
+    rtems_build_name('W', 'O', 'R', 'K'),
+    4,
+    RTEMS_MINIMUM_STACK_SIZE,
+    RTEMS_DEFAULT_MODES,
+    RTEMS_DEFAULT_ATTRIBUTES,
+    &ctx->worker_ids[0]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_timer_create(
+    rtems_build_name('T', 'I', 'M', 'R'),
+    &ctx->timer_id
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  unblock_ready_owner(ctx);
+  unblock_ready_rival(ctx);
+
+  sc = rtems_timer_delete(ctx->timer_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_delete(ctx->worker_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_delete(ctx->high_task_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  change_prio(RTEMS_SELF, 2);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
 static void test_mrsp_obtain_and_sleep_and_release(test_context *ctx)
 {
   rtems_status_code sc;
@@ -1232,6 +1421,7 @@ static void Init(rtems_task_argument arg)
   test_mrsp_unlock_order_error();
   test_mrsp_deadlock_error(ctx);
   test_mrsp_multiple_obtain();
+  test_mrsp_unblock_ready(ctx);
   test_mrsp_obtain_and_sleep_and_release(ctx);
   test_mrsp_obtain_and_release_with_help(ctx);
   test_mrsp_obtain_and_release(ctx);
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
index 3315db1..a32d71f 100644
--- a/testsuites/smptests/smpmrsp01/smpmrsp01.scn
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -5,6 +5,7 @@ test MrsP nested obtain error
 test MrsP unlock order error
 test MrsP deadlock error
 test MrsP multiple obtain
+test MrsP unblock ready
 test MrsP obtain and sleep and release
 [0] MAIN ->  RUN (prio   2, node  RUN)
 [0]  RUN -> MAIN (prio   1, node MAIN)



More information about the vc mailing list