[rtems commit] score: Fix _Thread_queue_Flush_critical()

Sebastian Huber sebh at rtems.org
Thu Nov 16 13:40:52 UTC 2017


Module:    rtems
Branch:    master
Commit:    9c30c31e24069555a9e3e9ccae0d8af04fba35f5
Changeset: http://git.rtems.org/rtems/commit/?id=9c30c31e24069555a9e3e9ccae0d8af04fba35f5

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Thu Nov 16 14:38:07 2017 +0100

score: Fix _Thread_queue_Flush_critical()

The thread queue extract operations performed by the
_Thread_queue_Flush_critical() may result in a priority change of the
thread queue owner. Carry out the scheduler priority update operation.
This is especially important in SMP configurations.

Close #3236.

---

 cpukit/score/src/threadqflush.c       |  18 ++++--
 testsuites/smptests/smpmutex01/init.c | 103 +++++++++++++++++++++++-----------
 2 files changed, 84 insertions(+), 37 deletions(-)

diff --git a/cpukit/score/src/threadqflush.c b/cpukit/score/src/threadqflush.c
index 8135e67..ef4d6b1 100644
--- a/cpukit/score/src/threadqflush.c
+++ b/cpukit/score/src/threadqflush.c
@@ -66,13 +66,15 @@ size_t _Thread_queue_Flush_critical(
   Thread_queue_Context          *queue_context
 )
 {
-  size_t         flushed;
-  Chain_Control  unblock;
-  Chain_Node    *node;
-  Chain_Node    *tail;
+  size_t          flushed;
+  Chain_Control   unblock;
+  Thread_Control *owner;
+  Chain_Node     *node;
+  Chain_Node     *tail;
 
   flushed = 0;
   _Chain_Initialize_empty( &unblock );
+  owner = queue->owner;
 
   while ( true ) {
     Thread_queue_Heads *heads;
@@ -131,6 +133,14 @@ size_t _Thread_queue_Flush_critical(
       node = next;
     } while ( node != tail );
 
+    if ( owner != NULL ) {
+      ISR_lock_Context lock_context;
+
+      _Thread_State_acquire( owner, &lock_context );
+      _Scheduler_Update_priority( owner );
+      _Thread_State_release( owner, &lock_context );
+    }
+
     _Thread_Dispatch_enable( cpu_self );
   } else {
     _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
diff --git a/testsuites/smptests/smpmutex01/init.c b/testsuites/smptests/smpmutex01/init.c
index 7f64415..6586788 100644
--- a/testsuites/smptests/smpmutex01/init.c
+++ b/testsuites/smptests/smpmutex01/init.c
@@ -38,15 +38,16 @@ typedef enum {
   REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
   REQ_MTX_OBTAIN = RTEMS_EVENT_2,
   REQ_MTX_OBTAIN_TIMEOUT = RTEMS_EVENT_3,
-  REQ_MTX_RELEASE = RTEMS_EVENT_4,
-  REQ_MTX_2_OBTAIN = RTEMS_EVENT_5,
-  REQ_MTX_2_RELEASE = RTEMS_EVENT_6,
-  REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_7,
-  REQ_SEM_RELEASE = RTEMS_EVENT_8,
-  REQ_SET_DONE = RTEMS_EVENT_9,
-  REQ_WAIT_FOR_DONE = RTEMS_EVENT_10,
-  REQ_SEND_EVENT_2 = RTEMS_EVENT_11,
-  REQ_SEND_EVENT_3 = RTEMS_EVENT_12
+  REQ_MTX_OBTAIN_UNSATISFIED = RTEMS_EVENT_4,
+  REQ_MTX_RELEASE = RTEMS_EVENT_5,
+  REQ_MTX_2_OBTAIN = RTEMS_EVENT_6,
+  REQ_MTX_2_RELEASE = RTEMS_EVENT_7,
+  REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_8,
+  REQ_SEM_RELEASE = RTEMS_EVENT_9,
+  REQ_SET_DONE = RTEMS_EVENT_10,
+  REQ_WAIT_FOR_DONE = RTEMS_EVENT_11,
+  REQ_SEND_EVENT_2 = RTEMS_EVENT_12,
+  REQ_SEND_EVENT_3 = RTEMS_EVENT_13
 } request_id;
 
 typedef enum {
@@ -248,6 +249,14 @@ static void obtain_timeout(test_context *ctx)
   rtems_test_assert(sc == RTEMS_TIMEOUT);
 }
 
+static void obtain_unsatisfied(test_context *ctx)
+{
+  rtems_status_code sc;
+
+  sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+  rtems_test_assert(sc == RTEMS_UNSATISFIED);
+}
+
 static void release(test_context *ctx)
 {
   rtems_status_code sc;
@@ -256,6 +265,14 @@ static void release(test_context *ctx)
   rtems_test_assert(sc == RTEMS_SUCCESSFUL);
 }
 
+static void flush(test_context *ctx)
+{
+  rtems_status_code sc;
+
+  sc = rtems_semaphore_flush(ctx->mtx);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
 static void obtain_2(test_context *ctx)
 {
   rtems_status_code sc;
@@ -416,6 +433,11 @@ static void worker(rtems_task_argument arg)
       ++ctx->generation[id];
     }
 
+    if ((events & REQ_MTX_OBTAIN_UNSATISFIED) != 0) {
+      obtain_unsatisfied(ctx);
+      ++ctx->generation[id];
+    }
+
     if ((events & REQ_MTX_RELEASE) != 0) {
       release(ctx);
       ++ctx->generation[id];
@@ -463,11 +485,14 @@ static void test_init(test_context *ctx)
   start_task(ctx, A_1, worker, 1, SCHED_A);
   start_task(ctx, A_2_0, worker, 2, SCHED_A);
   start_task(ctx, A_2_1, worker, 2, SCHED_A);
-  start_task(ctx, B_4, worker, 4, SCHED_B);
-  start_task(ctx, B_5_0, worker, 5, SCHED_B);
-  start_task(ctx, B_5_1, worker, 5, SCHED_B);
   start_task(ctx, H_A, helper, 3, SCHED_A);
-  start_task(ctx, H_B, helper, 6, SCHED_B);
+
+  if (rtems_get_processor_count() >= PART_COUNT) {
+    start_task(ctx, B_4, worker, 4, SCHED_B);
+    start_task(ctx, B_5_0, worker, 5, SCHED_B);
+    start_task(ctx, B_5_1, worker, 5, SCHED_B);
+    start_task(ctx, H_B, helper, 6, SCHED_B);
+  }
 
   sc = rtems_semaphore_create(
     rtems_build_name('M', 'T', 'X', '1'),
@@ -510,6 +535,19 @@ static void test_simple_inheritance(test_context *ctx)
   check_generations(ctx, A_1, NONE);
 }
 
+static void test_flush_inheritance(test_context *ctx)
+{
+  assert_prio(ctx, M, 3);
+  obtain(ctx);
+  send_event(ctx, A_1, REQ_MTX_OBTAIN_UNSATISFIED);
+  check_generations(ctx, NONE, NONE);
+  assert_prio(ctx, M, 1);
+  flush(ctx);
+  check_generations(ctx, A_1, NONE);
+  assert_prio(ctx, M, 3);
+  release(ctx);
+}
+
 static void test_dequeue_order_one_scheduler_instance(test_context *ctx)
 {
   obtain(ctx);
@@ -909,33 +947,32 @@ static void test_omip_yield(test_context *ctx)
   check_generations(ctx, B_5_0, NONE);
 }
 
-static void test(void)
+static void test(test_context *ctx)
 {
-  test_context *ctx = &test_instance;
-
   test_init(ctx);
-  test_task_get_priority_not_defined(ctx);
-  test_simple_inheritance(ctx);
-  test_dequeue_order_one_scheduler_instance(ctx);
-  test_mixed_queue_two_scheduler_instances(ctx);
-  test_mixed_queue_two_scheduler_instances_sem_only(ctx);
-  test_simple_inheritance_two_scheduler_instances(ctx);
-  test_nested_inheritance_two_scheduler_instances(ctx);
-  test_dequeue_order_two_scheduler_instances(ctx);
-  test_omip_pre_emption(ctx);
-  test_omip_rescue(ctx);
-  test_omip_timeout(ctx);
-  test_omip_yield(ctx);
+
+  if (rtems_get_processor_count() >= PART_COUNT) {
+    test_task_get_priority_not_defined(ctx);
+    test_simple_inheritance(ctx);
+    test_dequeue_order_one_scheduler_instance(ctx);
+    test_mixed_queue_two_scheduler_instances(ctx);
+    test_mixed_queue_two_scheduler_instances_sem_only(ctx);
+    test_simple_inheritance_two_scheduler_instances(ctx);
+    test_nested_inheritance_two_scheduler_instances(ctx);
+    test_dequeue_order_two_scheduler_instances(ctx);
+    test_omip_pre_emption(ctx);
+    test_omip_rescue(ctx);
+    test_omip_timeout(ctx);
+    test_omip_yield(ctx);
+  }
+
+  test_flush_inheritance(ctx);
 }
 
 static void Init(rtems_task_argument arg)
 {
   TEST_BEGIN();
-
-  if (rtems_get_processor_count() >= PART_COUNT) {
-    test();
-  }
-
+  test(&test_instance);
   TEST_END();
   rtems_test_exit(0);
 }




More information about the vc mailing list