[rtems commit] score: Fix scheduler helping protocol

Sebastian Huber sebh at rtems.org
Mon May 11 07:21:06 UTC 2015


Module:    rtems
Branch:    master
Commit:    be0366bb62ed4a804725a484ffd73242cd4f1d7b
Changeset: http://git.rtems.org/rtems/commit/?id=be0366bb62ed4a804725a484ffd73242cd4f1d7b

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Sun May 10 21:30:26 2015 +0200

score: Fix scheduler helping protocol

Account for priority changes of threads executing in a foreign
partition.  Exchange idle threads in case a victim node uses an idle
thread and the new scheduled node needs an idle thread.

---

 cpukit/score/include/rtems/score/mrspimpl.h        |   4 +-
 cpukit/score/include/rtems/score/schedulerimpl.h   |  84 +++-
 .../score/include/rtems/score/schedulersmpimpl.h   | 218 +++++----
 testsuites/smptests/smpmrsp01/init.c               | 224 ++++++++-
 testsuites/smptests/smpmrsp01/smpmrsp01.scn        | 504 +++++++++++----------
 5 files changed, 680 insertions(+), 354 deletions(-)

diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index c1e05e4..c40f41f 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
+ * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
  *
  *  embedded brains GmbH
  *  Dornierstr. 4
@@ -230,7 +230,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
 )
 {
   MRSP_Status status;
-  const Scheduler_Control *scheduler = _Scheduler_Get( executing );
+  const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
   uint32_t scheduler_index = _Scheduler_Get_index( scheduler );
   Priority_Control initial_priority = executing->current_priority;
   Priority_Control ceiling_priority =
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index d11b2c5..212bace 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -10,7 +10,7 @@
 /*
  *  Copyright (C) 2010 Gedare Bloom.
  *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
- *  Copyright (c) 2014 embedded brains GmbH
+ *  Copyright (c) 2014-2015 embedded brains GmbH
  *
  *  The license and distribution terms for this file may be
  *  found in the file LICENSE in this distribution or at
@@ -950,6 +950,26 @@ void _Scheduler_Thread_change_resource_root(
   Thread_Control *root
 );
 
+RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
+  Scheduler_Node *node,
+  Thread_Control *idle
+)
+{
+  _Assert(
+    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
+      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
+  );
+  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
+  _Assert(
+    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
+  );
+
+  _Scheduler_Thread_set_node( idle, node );
+
+  _Scheduler_Node_set_user( node, idle );
+  node->idle = idle;
+}
+
 /**
  * @brief Use an idle thread for this scheduler node.
  *
@@ -970,45 +990,44 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
 {
   Thread_Control *idle = ( *get_idle_thread )( context );
 
-  _Assert(
-    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
-      || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL
-  );
-  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
-  _Assert(
-    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
-  );
-
-  _Scheduler_Thread_set_node( idle, node );
-
-  _Scheduler_Node_set_user( node, idle );
-  node->idle = idle;
+  _Scheduler_Set_idle_thread( node, idle );
 
   return idle;
 }
 
+typedef enum {
+  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
+  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
+  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
+} Scheduler_Try_to_schedule_action;
+
 /**
  * @brief Try to schedule this scheduler node.
  *
  * @param[in] context The scheduler instance context.
  * @param[in] node The node which wants to get scheduled.
+ * @param[in] idle A potential idle thread used by a potential victim node.
  * @param[in] get_idle_thread Function to get an idle thread.
  *
  * @retval true This node can be scheduled.
  * @retval false Otherwise.
  */
-RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
+RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
+_Scheduler_Try_to_schedule_node(
   Scheduler_Context         *context,
   Scheduler_Node            *node,
+  Thread_Control            *idle,
   Scheduler_Get_idle_thread  get_idle_thread
 )
 {
-  bool schedule;
+  Scheduler_Try_to_schedule_action action;
   Thread_Control *owner;
   Thread_Control *user;
 
+  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
+
   if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
-    return true;
+    return action;
   }
 
   owner = _Scheduler_Node_get_owner( node );
@@ -1018,32 +1037,33 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
     } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) {
-      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+      if ( idle != NULL ) {
+        action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
+      } else {
+        _Scheduler_Use_idle_thread( context, node, get_idle_thread );
+      }
     } else {
       _Scheduler_Node_set_user( node, owner );
     }
-
-    schedule = true;
   } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
+    } else if ( idle != NULL ) {
+      action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
     } else {
       _Scheduler_Use_idle_thread( context, node, get_idle_thread );
     }
-
-    schedule = true;
   } else {
     _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );
 
     if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
       _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
-      schedule = true;
     } else {
-      schedule = false;
+      action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
     }
   }
 
-  return schedule;
+  return action;
 }
 
 /**
@@ -1078,6 +1098,20 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
   return idle;
 }
 
+RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
+  Scheduler_Node *needs_idle,
+  Scheduler_Node *uses_idle,
+  Thread_Control *idle
+)
+{
+  uses_idle->idle = NULL;
+  _Scheduler_Node_set_user(
+    uses_idle,
+    _Scheduler_Node_get_owner( uses_idle )
+  );
+  _Scheduler_Set_idle_thread( needs_idle, idle );
+}
+
 /**
  * @brief Block this scheduler node.
  *
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index e41c737..a395f2c 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -7,7 +7,7 @@
  */
 
 /*
- * Copyright (c) 2013-2014 embedded brains GmbH.  All rights reserved.
+ * Copyright (c) 2013-2015 embedded brains GmbH.  All rights reserved.
  *
  *  embedded brains GmbH
  *  Dornierstr. 4
@@ -532,41 +532,76 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_to_scheduled(
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
-  Thread_Control *user = _Scheduler_Node_get_user( node );
-  Thread_Control *lowest_scheduled_user =
-    _Scheduler_Node_get_user( lowest_scheduled );
   Thread_Control *needs_help;
-  Thread_Control *idle;
+  Scheduler_Try_to_schedule_action action;
 
-  _Scheduler_SMP_Node_change_state(
-    _Scheduler_SMP_Node_downcast( lowest_scheduled ),
-    SCHEDULER_SMP_NODE_READY
-  );
-  _Scheduler_Thread_change_state(
-    lowest_scheduled_user,
-    THREAD_SCHEDULER_READY
-  );
-
-  _Scheduler_Thread_set_node( user, node );
-
-  _Scheduler_SMP_Allocate_processor(
+  action = _Scheduler_Try_to_schedule_node(
     context,
     node,
-    lowest_scheduled,
-    allocate_processor
+    _Scheduler_Node_get_idle( lowest_scheduled ),
+    _Scheduler_SMP_Get_idle_thread
   );
 
-  ( *insert_scheduled )( context, node );
-  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+    Thread_Control *lowest_scheduled_user =
+      _Scheduler_Node_get_user( lowest_scheduled );
+    Thread_Control *idle;
 
-  idle = _Scheduler_Release_idle_thread(
-    context,
-    lowest_scheduled,
-    _Scheduler_SMP_Release_idle_thread
-  );
-  if ( idle == NULL ) {
-    needs_help = lowest_scheduled_user;
+    _Scheduler_SMP_Node_change_state(
+      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+      SCHEDULER_SMP_NODE_READY
+    );
+    _Scheduler_Thread_change_state(
+      lowest_scheduled_user,
+      THREAD_SCHEDULER_READY
+    );
+
+    _Scheduler_SMP_Allocate_processor(
+      context,
+      node,
+      lowest_scheduled,
+      allocate_processor
+    );
+
+    ( *insert_scheduled )( context, node );
+    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+    idle = _Scheduler_Release_idle_thread(
+      context,
+      lowest_scheduled,
+      _Scheduler_SMP_Release_idle_thread
+    );
+    if ( idle == NULL ) {
+      needs_help = lowest_scheduled_user;
+    } else {
+      needs_help = NULL;
+    }
+  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+    _Scheduler_SMP_Node_change_state(
+      _Scheduler_SMP_Node_downcast( lowest_scheduled ),
+      SCHEDULER_SMP_NODE_READY
+    );
+    _Scheduler_SMP_Node_change_state(
+      _Scheduler_SMP_Node_downcast( node ),
+      SCHEDULER_SMP_NODE_SCHEDULED
+    );
+
+    ( *insert_scheduled )( context, node );
+    ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
+
+    _Scheduler_Exchange_idle_thread(
+      node,
+      lowest_scheduled,
+      _Scheduler_Node_get_idle( lowest_scheduled )
+    );
+
+    needs_help = NULL;
   } else {
+    _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+    _Scheduler_SMP_Node_change_state(
+      _Scheduler_SMP_Node_downcast( node ),
+      SCHEDULER_SMP_NODE_BLOCKED
+    );
     needs_help = NULL;
   }
 
@@ -660,7 +695,7 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
 {
   Thread_Control *needs_help;
 
-  while ( true ) {
+  do {
     Scheduler_Node *highest_ready = ( *get_highest_ready )( context, node );
 
     /*
@@ -671,55 +706,80 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered(
       ( *insert_scheduled )( context, node );
 
       needs_help = NULL;
+    } else {
+      Scheduler_Try_to_schedule_action action;
 
-      break;
-    } else if (
-      _Scheduler_Try_to_schedule_node(
+      action = _Scheduler_Try_to_schedule_node(
         context,
         highest_ready,
+        _Scheduler_Node_get_idle( node ),
         _Scheduler_SMP_Get_idle_thread
-      )
-    ) {
-      Thread_Control *user = _Scheduler_Node_get_user( node );
-      Thread_Control *idle;
-
-      _Scheduler_SMP_Node_change_state(
-        _Scheduler_SMP_Node_downcast( node ),
-        SCHEDULER_SMP_NODE_READY
       );
-      _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
 
-      _Scheduler_SMP_Allocate_processor(
-        context,
-        highest_ready,
-        node,
-        allocate_processor
-      );
+      if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+        Thread_Control *user = _Scheduler_Node_get_user( node );
+        Thread_Control *idle;
 
-      ( *insert_ready )( context, node );
-      ( *move_from_ready_to_scheduled )( context, highest_ready );
+        _Scheduler_SMP_Node_change_state(
+          _Scheduler_SMP_Node_downcast( node ),
+          SCHEDULER_SMP_NODE_READY
+        );
+        _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_READY );
+
+        _Scheduler_SMP_Allocate_processor(
+          context,
+          highest_ready,
+          node,
+          allocate_processor
+        );
+
+        ( *insert_ready )( context, node );
+        ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+        idle = _Scheduler_Release_idle_thread(
+          context,
+          node,
+          _Scheduler_SMP_Release_idle_thread
+        );
+        if ( idle == NULL ) {
+          needs_help = user;
+        } else {
+          needs_help = NULL;
+        }
+      } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
+        _Scheduler_SMP_Node_change_state(
+          _Scheduler_SMP_Node_downcast( node ),
+          SCHEDULER_SMP_NODE_READY
+        );
+        _Scheduler_SMP_Node_change_state(
+          _Scheduler_SMP_Node_downcast( highest_ready ),
+          SCHEDULER_SMP_NODE_SCHEDULED
+        );
+
+        ( *insert_ready )( context, node );
+        ( *move_from_ready_to_scheduled )( context, highest_ready );
+
+        _Scheduler_Exchange_idle_thread(
+          highest_ready,
+          node,
+          _Scheduler_Node_get_idle( node )
+        );
 
-      idle = _Scheduler_Release_idle_thread(
-        context,
-        node,
-        _Scheduler_SMP_Release_idle_thread
-      );
-      if ( idle == NULL ) {
-        needs_help = user;
-      } else {
         needs_help = NULL;
-      }
+      } else {
+        _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
 
-      break;
-    } else {
-      _Scheduler_SMP_Node_change_state(
-        _Scheduler_SMP_Node_downcast( highest_ready ),
-        SCHEDULER_SMP_NODE_BLOCKED
-      );
+        _Scheduler_SMP_Node_change_state(
+          _Scheduler_SMP_Node_downcast( highest_ready ),
+          SCHEDULER_SMP_NODE_BLOCKED
+        );
 
-      ( *extract_from_ready )( context, highest_ready );
+        ( *extract_from_ready )( context, highest_ready );
+
+        continue;
+      }
     }
-  }
+  } while ( false );
 
   return needs_help;
 }
@@ -740,16 +800,18 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
   Scheduler_SMP_Allocate_processor  allocate_processor
 )
 {
-  while ( true ) {
+  do {
     Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+    Scheduler_Try_to_schedule_action action;
 
-    if (
-      _Scheduler_Try_to_schedule_node(
-        context,
-        highest_ready,
-        _Scheduler_SMP_Get_idle_thread
-      )
-    ) {
+    action = _Scheduler_Try_to_schedule_node(
+      context,
+      highest_ready,
+      NULL,
+      _Scheduler_SMP_Get_idle_thread
+    );
+
+    if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
       _Scheduler_SMP_Allocate_processor(
         context,
         highest_ready,
@@ -758,17 +820,19 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
       );
 
       ( *move_from_ready_to_scheduled )( context, highest_ready );
-
-      break;
     } else {
+      _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
       _Scheduler_SMP_Node_change_state(
         _Scheduler_SMP_Node_downcast( highest_ready ),
         SCHEDULER_SMP_NODE_BLOCKED
       );
 
       ( *extract_from_ready )( context, highest_ready );
+
+      continue;
     }
-  }
+  } while ( false );
 }
 
 /**
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
index a1aae86..1f07904 100644
--- a/testsuites/smptests/smpmrsp01/init.c
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
+ * Copyright (c) 2014-2015 embedded brains GmbH.  All rights reserved.
  *
  *  embedded brains GmbH
  *  Dornierstr. 4
@@ -74,7 +74,6 @@ typedef struct {
 } test_context;
 
 static test_context test_instance = {
-  .barrier = SMP_BARRIER_CONTROL_INITIALIZER,
   .switch_lock = SMP_LOCK_INITIALIZER("test instance switch lock")
 };
 
@@ -87,6 +86,11 @@ static void busy_wait(void)
   }
 }
 
+static void barrier_init(test_context *ctx)
+{
+  _SMP_barrier_Control_initialize(&ctx->barrier);
+}
+
 static void barrier(test_context *ctx, SMP_barrier_State *bs)
 {
   _SMP_barrier_Wait(&ctx->barrier, bs, 2);
@@ -291,6 +295,7 @@ static void test_mrsp_obtain_and_release(test_context *ctx)
 
   change_prio(RTEMS_SELF, 3);
 
+  barrier_init(ctx);
   reset_switch_events(ctx);
 
   ctx->high_run[0] = false;
@@ -467,6 +472,219 @@ static void test_mrsp_obtain_and_release(test_context *ctx)
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 }
 
+static void obtain_after_migration_worker(rtems_task_argument arg)
+{
+  test_context *ctx = &test_instance;
+  rtems_status_code sc;
+  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+  assert_prio(RTEMS_SELF, 3);
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /* Worker done (K) */
+  barrier(ctx, &barrier_state);
+
+  while (true) {
+    /* Wait for termination */
+  }
+}
+
+static void obtain_after_migration_high(rtems_task_argument arg)
+{
+  test_context *ctx = &test_instance;
+  rtems_status_code sc;
+  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+  assert_prio(RTEMS_SELF, 2);
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[1], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /* Obtain done (I) */
+  barrier(ctx, &barrier_state);
+
+  /* Ready to release (J) */
+  barrier(ctx, &barrier_state);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[1]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_task_suspend(RTEMS_SELF);
+  rtems_test_assert(0);
+}
+
+static void test_mrsp_obtain_after_migration(test_context *ctx)
+{
+  rtems_status_code sc;
+  rtems_task_priority prio;
+  rtems_id scheduler_id;
+  SMP_barrier_State barrier_state;
+
+  puts("test MrsP obtain after migration");
+
+  change_prio(RTEMS_SELF, 3);
+
+  barrier_init(ctx);
+  reset_switch_events(ctx);
+
+  /* Create tasks */
+
+  sc = rtems_task_create(
+    rtems_build_name('H', 'I', 'G', '0'),
+    2,
+    RTEMS_MINIMUM_STACK_SIZE,
+    RTEMS_DEFAULT_MODES,
+    RTEMS_DEFAULT_ATTRIBUTES,
+    &ctx->high_task_id[0]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_create(
+    rtems_build_name('W', 'O', 'R', 'K'),
+    3,
+    RTEMS_MINIMUM_STACK_SIZE,
+    RTEMS_DEFAULT_MODES,
+    RTEMS_DEFAULT_ATTRIBUTES,
+    &ctx->worker_ids[0]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /* Create a MrsP semaphore objects */
+
+  sc = rtems_semaphore_create(
+    rtems_build_name('M', 'R', 'S', 'P'),
+    1,
+    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+      | RTEMS_BINARY_SEMAPHORE,
+    3,
+    &ctx->mrsp_ids[0]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_create(
+    rtems_build_name('M', 'R', 'S', 'P'),
+    1,
+    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+      | RTEMS_BINARY_SEMAPHORE,
+    2,
+    &ctx->mrsp_ids[1]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_create(
+    rtems_build_name('M', 'R', 'S', 'P'),
+    1,
+    RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+      | RTEMS_BINARY_SEMAPHORE,
+    1,
+    &ctx->mrsp_ids[2]
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  prio = 4;
+  sc = rtems_semaphore_set_priority(
+    ctx->mrsp_ids[2],
+    ctx->scheduler_ids[1],
+    prio,
+    &prio
+  );
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+  rtems_test_assert(prio == 1);
+
+  /* Check executing task parameters */
+
+  sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
+
+  assert_prio(RTEMS_SELF, 3);
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 3);
+
+  /* Start other tasks */
+
+  sc = rtems_task_start(ctx->worker_ids[0], obtain_after_migration_worker, 0);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_start(ctx->high_task_id[0], obtain_after_migration_high, 0);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_test_assert(rtems_get_current_processor() == 1);
+
+  /* Obtain done (I) */
+  _SMP_barrier_State_initialize(&barrier_state);
+  barrier(ctx, &barrier_state);
+
+  sc = rtems_task_suspend(ctx->high_task_id[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_test_assert(rtems_get_current_processor() == 1);
+
+  /*
+   * Obtain second MrsP semaphore and ensure that we change the priority of our
+   * own scheduler node and not the one we are currently using.
+   */
+
+  sc = rtems_semaphore_obtain(ctx->mrsp_ids[2], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  assert_prio(RTEMS_SELF, 1);
+
+  rtems_test_assert(rtems_get_current_processor() == 1);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[2]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_resume(ctx->high_task_id[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  /* Ready to release (J) */
+  barrier(ctx, &barrier_state);
+
+  rtems_test_assert(rtems_get_current_processor() == 1);
+
+  /* Prepare barrier for worker */
+  barrier_init(ctx);
+  _SMP_barrier_State_initialize(&barrier_state);
+
+  sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  rtems_test_assert(rtems_get_current_processor() == 0);
+
+  print_switch_events(ctx);
+
+  /* Worker done (K) */
+  barrier(ctx, &barrier_state);
+
+  sc = rtems_task_delete(ctx->worker_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_task_delete(ctx->high_task_id[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_delete(ctx->mrsp_ids[1]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  sc = rtems_semaphore_delete(ctx->mrsp_ids[2]);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
 static void test_mrsp_flush_error(void)
 {
   rtems_status_code sc;
@@ -1034,6 +1252,7 @@ static void test_mrsp_various_block_and_unblock(test_context *ctx)
 
   change_prio(RTEMS_SELF, 4);
 
+  barrier_init(ctx);
   reset_switch_events(ctx);
 
   ctx->low_run[0] = false;
@@ -1637,6 +1856,7 @@ static void Init(rtems_task_argument arg)
   test_mrsp_deadlock_error(ctx);
   test_mrsp_multiple_obtain();
   test_mrsp_various_block_and_unblock(ctx);
+  test_mrsp_obtain_after_migration(ctx);
   test_mrsp_obtain_and_sleep_and_release(ctx);
   test_mrsp_obtain_and_release_with_help(ctx);
   test_mrsp_obtain_and_release(ctx);
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
index 90184b5..19a4fbb 100644
--- a/testsuites/smptests/smpmrsp01/smpmrsp01.scn
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -23,9 +23,17 @@ test MrsP various block and unblock
 [0] MAIN -> HIG0 (prio   2, node HIG0)
 [1] MAIN -> HIG1 (prio   2, node HIG1)
 [1] HIG1 -> MAIN (prio   3, node WORK)
-[0] HIG0 -> IDLE (prio   4, node MAIN)
-[1] MAIN -> WORK (prio   3, node WORK)
+[0] HIG0 -> IDLE (prio 255, node IDLE)
 [0] IDLE -> MAIN (prio   4, node MAIN)
+[1] MAIN -> WORK (prio   3, node WORK)
+test MrsP obtain after migration
+[1] IDLE -> WORK (prio   3, node WORK)
+[0] MAIN -> HIG0 (prio   2, node HIG0)
+[1] WORK -> MAIN (prio   3, node WORK)
+[0] HIG0 -> IDLE (prio   2, node HIG0)
+[0] IDLE -> HIG0 (prio   2, node HIG0)
+[1] MAIN -> WORK (prio   3, node WORK)
+[0] HIG0 -> MAIN (prio   3, node MAIN)
 test MrsP obtain and sleep and release
 [0] MAIN ->  RUN (prio   2, node  RUN)
 [0]  RUN -> MAIN (prio   1, node MAIN)
@@ -42,290 +50,290 @@ test MrsP obtain and release with help
 [0]  RUN -> IDLE (prio   2, node MAIN)
 [1] MAIN -> HELP (prio   2, node HELP)
 [1] HELP -> MAIN (prio   2, node HELP)
-[0] IDLE -> MAIN (prio   3, node MAIN)
+[0] IDLE -> MAIN (prio   1, node MAIN)
 [1] MAIN -> HELP (prio   2, node HELP)
 test MrsP obtain and release
 [1] IDLE -> WORK (prio   4, node WORK)
 [1] WORK -> MAIN (prio   3, node WORK)
 [0] MAIN -> HIG0 (prio   1, node HIG0)
-[1] MAIN -> WORK (prio   4, node WORK)
+[1] MAIN -> WORK (prio   3, node WORK)
 [0] HIG0 -> MAIN (prio   2, node MAIN)
 test MrsP load
 worker[0]
-  sleep = 53
-  timeout = 3445
-  obtain[0] = 7240
-  obtain[1] = 5484
-  obtain[2] = 12983
-  obtain[3] = 9453
-  obtain[4] = 16142
-  obtain[5] = 12509
-  obtain[6] = 16471
-  obtain[7] = 14380
-  obtain[8] = 16566
-  obtain[9] = 16192
-  obtain[10] = 14868
-  obtain[11] = 18208
-  obtain[12] = 12505
-  obtain[13] = 19995
-  obtain[14] = 11155
-  obtain[15] = 20684
-  obtain[16] = 7288
-  obtain[17] = 22252
-  obtain[18] = 6476
-  obtain[19] = 18299
-  obtain[20] = 5711
-  obtain[21] = 17063
-  obtain[22] = 4791
-  obtain[23] = 14655
-  obtain[24] = 3452
-  obtain[25] = 10565
-  obtain[26] = 2912
-  obtain[27] = 8142
-  obtain[28] = 2090
-  obtain[29] = 5086
-  obtain[30] = 1145
-  obtain[31] = 1946
-  cpu[0] = 378475
-  cpu[1] = 64814
-  cpu[2] = 132133
-  cpu[3] = 138047
+  sleep = 7
+  timeout = 1780
+  obtain[0] = 607
+  obtain[1] = 443
+  obtain[2] = 988
+  obtain[3] = 659
+  obtain[4] = 1169
+  obtain[5] = 846
+  obtain[6] = 1267
+  obtain[7] = 854
+  obtain[8] = 1016
+  obtain[9] = 1079
+  obtain[10] = 1165
+  obtain[11] = 1020
+  obtai[12] = 767
+  obtain[13] = 925
+  obtain[14] = 792
+  obtain[15] = 881
+  obtain[16] = 439
+  obtain[17] = 1007
+  obtain[18] = 243
+  obtain[19] = 853
+  obtain[20] = 210
+  obtain[21] = 445
+  obtain[22] = 247
+  obtain[23] = 497
+  obtain[24] = 102
+  obtain[25] = 580
+  obtain[26] = 90
+  obtain[27] = 186
+  obtain[28] = 74
+  obtain[29] = 139
+  obtain[30] = 68
+  obtain[31] = 98
+  cpu[0] = 27776
+  cpu[1] = 2795
+  cpu[2] = 4397
+  cpu[3] = 4551
 worker[1]
   sleep = 1
-  timeout = 6
-  obtain[0] = 19
-  obtain[1] = 8
-  obtain[2] = 15
-  obtain[3] = 24
-  obtain[4] = 20
-  obtain[5] = 19
-  obtain[6] = 14
-  obtain[7] = 40
-  obtain[8] = 45
-  obtain[9] = 20
+  timeout = 0
+  obtain[0] = 1
+  obtain[1] = 0
+  obtain[2] = 3
+  obtain[3] = 0
+  obtain[4] = 0
+  obtain[5] = 0
+  obtain[6] = 0
+  obtain[7] = 0
+  obtain[8] = 0
+  obtain[9] = 0
   obtain[10] = 0
-  obtain[11] = 48
-  obtain[12] = 13
-  obtain[13] = 57
-  obtain[14] = 30
-  obtain[15] = 48
-  obtain[16] = 36
-  obtain[17] = 36
-  obtain[18] = 19
-  obtain[19] = 20
-  obtain[20] = 42
-  obtain[21] = 44
-  obtain[22] = 23
+  obtain[11] = 0
+  obtain[12] = 0
+  obtain[13] = 0
+  obtain[14] = 0
+  obtain[15] = 0
+  obtain[16] = 0
+  obtain[17] = 0
+  obtain[18] = 0
+  obtain[19] = 0
+  obtain[20] = 0
+  obtain[21] = 0
+  obtain[22] = 0
   obtain[23] = 0
   obtain[24] = 0
-  obtain[25] = 26
+  obtain[25] = 0
   obtain[26] = 0
   obtain[27] = 0
   obtain[28] = 0
   obtain[29] = 0
   obtain[30] = 0
   obtain[31] = 0
-  cpu[0] = 650
-  cpu[1] = 92
-  cpu[2] = 379
-  cpu[3] = 212
+  cpu[0] = 9
+  cpu[1] = 0
+  cpu[2] = 0
+  cpu[3] = 0
 worker[2]
-  sleep = 51
-  timeout = 3731
-  obtain[0] = 7182
-  obtain[1] = 5663
-  obtain[2] = 12945
-  obtain[3] = 9229
-  obtain[4] = 15592
-  obtain[5] = 12125
-  obtain[6] = 16767
-  obtain[7] = 14480
-  obtain[8] = 16620
-  obtain[9] = 16098
-  obtain[10] = 16409
-  obtain[11] = 18109
-  obtain[12] = 12995
-  obtain[13] = 19452
-  obtain[14] = 10719
-  obtain[15] = 20024
-  obtain[16] = 7769
-  obtain[17] = 21913
-  obtain[18] = 6636
-  obtain[19] = 18524
-  obtain[20] = 5952
-  obtain[21] = 16411
-  obtain[22] = 5228
-  obtain[23] = 14456
-  obtain[24] = 4292
-  obtain[25] = 11143
-  obtain[26] = 3019
-  obtain[27] = 8023
-  obtain[28] = 2006
-  obtain[29] = 4664
-  obtain[30] = 1109
-  obtain[31] = 1976
-  cpu[0] = 65356
-  cpu[1] = 381723
-  cpu[2] = 133444
-  cpu[3] = 134588
+  sleep = 5
+  timeout = 2083
+  obtain[0] = 740
+  obtain[1] = 489
+  obtain[2] = 1232
+  obtain[3] = 732
+  obtain[4] = 1361
+  obtain[5] = 1070
+  obtain[6] = 1334
+  obtain[7] = 997
+  obtain[8] = 1418
+  obtain[9] = 1087
+  obtain[10] = 1005
+  obtain[11] = 1088
+  obtain[12] = 865
+  obtain[13] = 1279
+  obtain[14] = 698
+  obtain[15] = 1152
+  obtain[16] = 339
+  obtain[17] = 1347
+  obtain[18] = 340
+  obtain[19] = 723
+  obtain[20] = 295
+  obtain[21] = 933
+  obtain[22] = 223
+  obtain[23] = 633
+  obtain[24] = 236
+  obtain[25] = 405
+  obtain[26] = 140
+  obtain[27] = 261
+  obtain[28] = 70
+  obtain[29] = 157
+  obtain[30] = 89
+  obtain[31] = 71
+  cpu[0] = 1931
+  cpu[1] = 35336
+  cpu[2] = 4338
+  cpu[3] = 4018
 worker[3]
   sleep = 1
-  timeout = 11
-  obtain[0] = 11
-  obtain[1] = 6
-  obtain[2] = 33
-  obtain[3] = 20
-  obtain[4] = 10
-  obtain[5] = 10
-  obtain[6] = 28
-  obtain[7] = 18
-  obtain[8] = 27
-  obtain[9] = 40
-  obtain[10] = 33
-  obtain[11] = 36
-  obtain[12] = 26
+  timeout = 1
+  obtain[0] = 0
+  obtain[1] = 0
+  obtain[2] = 3
+  obtain[3] = 0
+  obtain[4] = 5
+  obtain[5] = 0
+  obtain[6] = 0
+  obtain[7] = 0
+  obtain[8] = 0
+  obtain[9] = 0
+  obtain[10] = 0
+  obtain[11] = 0
+  obtain[12] = 0
   obtain[13] = 0
-  obtain[14] = 15
-  obtain[15] = 16
+  obtain[14] = 0
+  obtain[15] = 0
   obtain[16] = 0
-  obtain[17] = 18
+  obtain[17] = 0
   obtain[18] = 0
-  obtain[19] = 42
+  obtain[19] = 0
   obtain[20] = 0
-  obtain[21] = 88
+  obtain[21] = 0
   obtain[22] = 0
-  obtain[23] = 24
+  obtain[23] = 0
   obtain[24] = 0
   obtain[25] = 0
   obtain[26] = 0
-  obtain[27] = 28
+  obtain[27] = 0
   obtain[28] = 0
   obtain[29] = 0
-  obtain[30] = 31
+  obtain[30] = 0
   obtain[31] = 0
-  cpu[0] = 136
-  cpu[1] = 573
-  cpu[2] = 291
-  cpu[3] = 121
+  cpu[0] = 0
+  cpu[1] = 14
+  cpu[2] = 0
+  cpu[3] = 3
 worker[4]
-  sleep = 47
-  timeout = 3278
-  obtain[0] = 7397
-  obtain[1] = 5723
-  obtain[2] = 13399
-  obtain[3] = 9018
-  obtain[4] = 16575
-  obtain[5] = 12731
-  obtain[6] = 16571
-  obtain[7] = 14376
-  obtain[8] = 16786
-  obtain[9] = 17022
-  obtain[10] = 15889
-  obtain[11] = 19338
-  obtain[12] = 13240
-  obtain[13] = 19055
-  obtain[14] = 11533
-  obtain[15] = 22667
-  obtain[16] = 7521
-  obtain[17] = 21826
-  obtain[18] = 6320
-  obtain[19] = 18522
-  obtain[20] = 6874
-  obtain[21] = 16498
-  obtain[22] = 4983
-  obtain[23] = 14210
-  obtain[24] = 4019
-  obtain[25] = 11510
-  obtain[26] = 3425
-  obtain[27] = 8809
-  obtain[28] = 2002
-  obtain[29] = 5197
-  obtain[30] = 996
-  obtain[31] = 2276
-  cpu[0] = 20729
-  cpu[1] = 19760
-  cpu[2] = 343613
-  cpu[3] = 348561
+  sleep = 9
+  timeout = 2196
+  obtain[0] = 896
+  obtain[1] = 565
+  obtain[2] = 1443
+  obtain[3] = 936
+  obtain[4] = 1506
+  obtain[5] = 1028
+  obtain[6] = 1541
+  obtain[7] = 1088
+  obtain[8] = 1683
+  obtain[9] = 1494
+  obtain[10] = 1283
+  obtain[11] = 1075
+  obtain[12] = 1101
+  obtain[13] = 1038
+  obtain[14] = 758
+  obtain[15] = 1300
+  obtain[16] = 350
+  obtain[17] = 1180
+  obtain[18] = 396
+  obtain[19] = 1171
+  obtain[20] = 232
+  obtain[21] = 767
+  obtain[22] = 336
+  obtain[23] = 470
+  obtain[24] = 196
+  obtain[25] = 461
+  obtain[26] = 148
+  obtain[27] = 394
+  obtain[28] = 68
+  obtain[29] = 259
+  obtain[30] = 80
+  obtain[31] = 54
+  cpu[0] = 725
+  cpu[1] = 1001
+  cpu[2] = 25845
+  cpu[3] = 23032
 worker[5]
-  sleep = 61
-  timeout = 3183
-  obtain[0] = 7291
-  obtain[1] = 5782
-  obtain[2] = 13633
-  obtain[3] = 9864
-  obtain[4] = 16465
-  obtain[5] = 12581
-  obtain[6] = 17135
-  obtain[7] = 14616
-  obtain[8] = 16524
-  obtain[9] = 16472
-  obtain[10] = 15194
-  obtain[11] = 18038
-  obtain[12] = 13801
-  obtain[13] = 19959
-  obtain[14] = 11693
-  obtain[15] = 20770
-  obtain[16] = 7328
-  obtain[17] = 23222
-  obtain[18] = 7186
-  obtain[19] = 19739
-  obtain[20] = 6584
-  obtain[21] = 17450
-  obtain[22] = 5241
-  obtain[23] = 14808
-  obtain[24] = 4287
-  obtain[25] = 11387
-  obtain[26] = 3367
-  obtain[27] = 8149
-  obtain[28] = 1887
-  obtain[29] = 4969
-  obtain[30] = 1123
-  obtain[31] = 1695
-  cpu[0] = 19504
-  cpu[1] = 20069
-  cpu[2] = 346015
-  cpu[3] = 350953
+  sleep = 8
+  timeout = 2062
+  obtain[0] = 754
+  obtain[1] = 540
+  obtain[2] = 1318
+  obtain[3] = 886
+  obtain[4] = 1396
+  obtain[5] = 1030
+  obtain[6] = 1556
+  obtain[7] = 1126
+  obtain[8] = 1338
+  obtain[9] = 1061
+  obtain[10] = 1173
+  obtain[11] = 1396
+  obtain[12] = 1130
+  obtain[13] = 1189
+  obtain[14] = 867
+  obtain[15] = 1290
+  obtain[16] = 339
+  obtain[17] = 1177
+  obtain[18] = 396
+  obtain[19] = 915
+  obtain[20] = 236
+  obtain[21] = 1084
+  obtain[22] = 146
+  obtain[23] = 699
+  obtain[24] = 185
+  obtain[25] = 562
+  obtain[26] = 120
+  obtain[27] = 423
+  obtain[28] = 153
+  obtain[29] = 347
+  obtain[30] = 28
+  obtain[31] = 250
+  cpu[0] = 911
+  cpu[1] = 1018
+  cpu[2] = 23145
+  cpu[3] = 25154
 worker[6]
   sleep = 1
-  timeout = 15
-  obtain[0] = 26
-  obtain[1] = 22
-  obtain[2] = 45
-  obtain[3] = 32
-  obtain[4] = 45
-  obtain[5] = 76
-  obtain[6] = 49
-  obtain[7] = 64
-  obtain[8] = 99
-  obtain[9] = 70
-  obtain[10] = 55
-  obtain[11] = 48
-  obtain[12] = 39
-  obtain[13] = 28
-  obtain[14] = 60
-  obtain[15] = 48
-  obtain[16] = 17
-  obtain[17] = 74
-  obtain[18] = 38
-  obtain[19] = 60
-  obtain[20] = 63
-  obtain[21] = 66
-  obtain[22] = 23
-  obtain[23] = 48
+  timeout = 3
+  obtain[0] = 3
+  obtain[1] = 0
+  obtain[2] = 3
+  obtain[3] = 0
+  obtain[4] = 0
+  obtain[5] = 6
+  obtain[6] = 0
+  obtain[7] = 8
+  obtain[8] = 3
+  obtain[9] = 0
+  obtain[10] = 0
+  obtain[11] = 0
+  obtain[12] = 13
+  obtain[13] = 0
+  obtain[14] = 0
+  obtain[15] = 0
+  obtain[16] = 2
+  obtain[17] = 0
+  obtain[18] = 0
+  obtain[19] = 0
+  obtain[20] = 0
+  obtain[21] = 0
+  obtain[22] = 0
+  obtain[23] = 0
   obtain[24] = 0
-  obtain[25] = 78
+  obtain[25] = 0
   obtain[26] = 0
-  obtain[27] = 43
+  obtain[27] = 0
   obtain[28] = 0
   obtain[29] = 0
   obtain[30] = 0
-  obtain[31] = 32
-  cpu[0] = 71
-  cpu[1] = 39
-  cpu[2] = 1333
-  cpu[3] = 1254
+  obtain[31] = 0
+  cpu[0] = 0
+  cpu[1] = 11
+  cpu[2] = 42
+  cpu[3] = 24
 worker[7]
-  sleep = 1
+  sleep = 0
   timeout = 0
   obtain[0] = 0
   obtain[1] = 0
@@ -361,10 +369,10 @@ worker[7]
   obtain[31] = 0
   cpu[0] = 0
   cpu[1] = 0
-  cpu[2] = 1
+  cpu[2] = 0
   cpu[3] = 0
-migrations[0] = 437361
-migrations[1] = 437363
-migrations[2] = 441234
-migrations[3] = 433487
+migrations[0] = 20731
+migrations[1] = 20731
+migrations[2] = 20366
+migrations[3] = 21099
 *** END OF TEST SMPMRSP 1 ***




More information about the vc mailing list