[PATCH] testsupport: Add cascade option to parallel test

Alexander Krutwig alexander.krutwig at embedded-brains.de
Fri Mar 6 15:13:40 UTC 2015


---
 cpukit/libmisc/testsupport/test.h         | 25 ++++++++-
 cpukit/libmisc/testsupport/testparallel.c | 30 +++++++----
 testsuites/smptests/smpatomic01/init.c    | 90 ++++++++++++++++++++-----------
 3 files changed, 104 insertions(+), 41 deletions(-)

diff --git a/cpukit/libmisc/testsupport/test.h b/cpukit/libmisc/testsupport/test.h
index afed462..ae6c17e 100644
--- a/cpukit/libmisc/testsupport/test.h
+++ b/cpukit/libmisc/testsupport/test.h
@@ -141,13 +141,16 @@ typedef struct {
    *
    * @param[in] ctx The parallel context.
    * @param[in] arg The user specified argument.
+   * @param[in] active_workers Count of active workers.  Depends on the cascade
+   *   option.
    *
    * @return The desired job body execution time in clock ticks.  See
    *   rtems_test_parallel_stop_job().
    */
   rtems_interval (*init)(
     rtems_test_parallel_context *ctx,
-    void *arg
+    void *arg,
+    size_t active_workers
   );
 
   /**
@@ -155,12 +158,15 @@ typedef struct {
    *
    * @param[in] ctx The parallel context.
    * @param[in] arg The user specified argument.
+   * @param[in] active_workers Count of active workers.  Depends on the cascade
+   *   option.
    * @param[in] worker_index The worker index.  It ranges from 0 to the
    *   processor count minus one.
    */
   void (*body)(
     rtems_test_parallel_context *ctx,
     void *arg,
+    size_t active_workers,
     size_t worker_index
   );
 
@@ -172,13 +178,28 @@ typedef struct {
    *
    * @param[in] ctx The parallel context.
    * @param[in] arg The user specified argument.
+   * @param[in] active_workers Count of active workers.  Depends on the cascade
+   *   option.
    */
   void (*fini)(
     rtems_test_parallel_context *ctx,
-    void *arg
+    void *arg,
+    size_t active_workers
   );
 
+  /**
+   * @brief Job specific argument.
+   */
   void *arg;
+
+  /**
+   * @brief Job cascading flag.
+   *
+   * This flag indicates whether the job should be executed in a cascaded
+   * manner (the job is executed on one processor first, two processors
+   * afterwards and incremented step by step until all processors are used).
+   */
+  bool cascade;
 } rtems_test_parallel_job;
 
 /**
diff --git a/cpukit/libmisc/testsupport/testparallel.c b/cpukit/libmisc/testsupport/testparallel.c
index 681f769..c1bdeda 100644
--- a/cpukit/libmisc/testsupport/testparallel.c
+++ b/cpukit/libmisc/testsupport/testparallel.c
@@ -44,6 +44,7 @@ static void start_worker_stop_timer(
     ctx
   );
   _Assert(sc == RTEMS_SUCCESSFUL);
+  (void) sc;
 }
 
 static void run_tests(
@@ -58,21 +59,31 @@ static void run_tests(
 
   for (i = 0; i < job_count; ++i) {
     const rtems_test_parallel_job *job = &jobs[i];
+    size_t n = rtems_get_processor_count();
+    size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;
 
-    if (rtems_test_parallel_is_master_worker(worker_index)) {
-      rtems_interval duration = (*job->init)(ctx, job->arg);
+    while (j < n) {
+      size_t active_worker = j + 1;
 
-      start_worker_stop_timer(ctx, duration);
-    }
+      if (rtems_test_parallel_is_master_worker(worker_index)) {
+        rtems_interval duration = (*job->init)(ctx, job->arg, active_worker);
+
+        start_worker_stop_timer(ctx, duration);
+      }
+
+      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
 
-    _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
+      if (worker_index <= j) {
+        (*job->body)(ctx, job->arg, active_worker, worker_index);
+      }
 
-    (*job->body)(ctx, job->arg, worker_index);
+      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
 
-    _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
+      if (rtems_test_parallel_is_master_worker(worker_index)) {
+        (*job->fini)(ctx, job->arg, active_worker);
+      }
 
-    if (rtems_test_parallel_is_master_worker(worker_index)) {
-      (*job->fini)(ctx, job->arg);
+      ++j;
     }
   }
 }
@@ -91,6 +102,7 @@ static void worker_task(rtems_task_argument arg)
 
   sc = rtems_event_transient_send(warg.ctx->master_id);
   _Assert(sc == RTEMS_SUCCESSFUL);
+  (void) sc;
 
   run_tests(warg.ctx, warg.jobs, warg.job_count, warg.worker_index);
 
diff --git a/testsuites/smptests/smpatomic01/init.c b/testsuites/smptests/smpatomic01/init.c
index fbd20fa..0241a01 100644
--- a/testsuites/smptests/smpatomic01/init.c
+++ b/testsuites/smptests/smpatomic01/init.c
@@ -98,7 +98,8 @@ static void test_fini(
 
 static rtems_interval test_atomic_add_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -111,6 +112,7 @@ static rtems_interval test_atomic_add_init(
 static void test_atomic_add_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -125,7 +127,11 @@ static void test_atomic_add_body(
   ctx->per_worker_value[worker_index] = counter;
 }
 
-static void test_atomic_add_fini(rtems_test_parallel_context *base, void *arg)
+static void test_atomic_add_fini(
+  rtems_test_parallel_context *base,
+  void *arg,
+  size_t active_workers
+)
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
 
@@ -134,7 +140,8 @@ static void test_atomic_add_fini(rtems_test_parallel_context *base, void *arg)
 
 static rtems_interval test_atomic_flag_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -148,6 +155,7 @@ static rtems_interval test_atomic_flag_init(
 static void test_atomic_flag_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -168,7 +176,11 @@ static void test_atomic_flag_body(
   ctx->per_worker_value[worker_index] = counter;
 }
 
-static void test_atomic_flag_fini(rtems_test_parallel_context *base, void *arg)
+static void test_atomic_flag_fini(
+  rtems_test_parallel_context *base,
+  void *arg,
+  size_t active_workers
+  )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
 
@@ -177,7 +189,8 @@ static void test_atomic_flag_fini(rtems_test_parallel_context *base, void *arg)
 
 static rtems_interval test_atomic_sub_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -190,6 +203,7 @@ static rtems_interval test_atomic_sub_init(
 static void test_atomic_sub_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -204,7 +218,11 @@ static void test_atomic_sub_body(
   ctx->per_worker_value[worker_index] = counter;
 }
 
-static void test_atomic_sub_fini(rtems_test_parallel_context *base, void *arg)
+static void test_atomic_sub_fini(
+  rtems_test_parallel_context *base,
+  void *arg,
+  size_t active_workers
+)
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
 
@@ -213,7 +231,8 @@ static void test_atomic_sub_fini(rtems_test_parallel_context *base, void *arg)
 
 static rtems_interval test_atomic_compare_exchange_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -227,6 +246,7 @@ static rtems_interval test_atomic_compare_exchange_init(
 static void test_atomic_compare_exchange_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -259,7 +279,8 @@ static void test_atomic_compare_exchange_body(
 
 static void test_atomic_compare_exchange_fini(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -269,7 +290,8 @@ static void test_atomic_compare_exchange_fini(
 
 static rtems_interval test_atomic_or_and_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -282,6 +304,7 @@ static rtems_interval test_atomic_or_and_init(
 static void test_atomic_or_and_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -316,7 +339,8 @@ static void test_atomic_or_and_body(
 
 static void test_atomic_or_and_fini(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -326,7 +350,8 @@ static void test_atomic_or_and_fini(
 
 static rtems_interval test_atomic_fence_init(
   rtems_test_parallel_context *base,
-  void *arg
+  void *arg,
+  size_t active_workers
 )
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
@@ -341,6 +366,7 @@ static rtems_interval test_atomic_fence_init(
 static void test_atomic_fence_body(
   rtems_test_parallel_context *base,
   void *arg,
+  size_t active_workers,
   size_t worker_index
 )
 {
@@ -369,7 +395,11 @@ static void test_atomic_fence_body(
   }
 }
 
-static void test_atomic_fence_fini(rtems_test_parallel_context *base, void *arg)
+static void test_atomic_fence_fini(
+  rtems_test_parallel_context *base,
+  void *arg,
+  size_t active_workers
+)
 {
   smpatomic01_context *ctx = (smpatomic01_context *) base;
 
@@ -383,29 +413,29 @@ static void test_atomic_fence_fini(rtems_test_parallel_context *base, void *arg)
 
 static const rtems_test_parallel_job test_jobs[] = {
   {
-    test_atomic_add_init,
-    test_atomic_add_body,
-    test_atomic_add_fini
+    .init = test_atomic_add_init,
+    .body = test_atomic_add_body,
+    .fini = test_atomic_add_fini
   }, {
-    test_atomic_flag_init,
-    test_atomic_flag_body,
-    test_atomic_flag_fini
+    .init = test_atomic_flag_init,
+    .body =test_atomic_flag_body,
+    .fini =test_atomic_flag_fini
   }, {
-    test_atomic_sub_init,
-    test_atomic_sub_body,
-    test_atomic_sub_fini
+    .init = test_atomic_sub_init,
+    .body =test_atomic_sub_body,
+    .fini =test_atomic_sub_fini
   }, {
-    test_atomic_compare_exchange_init,
-    test_atomic_compare_exchange_body,
-    test_atomic_compare_exchange_fini
+    .init = test_atomic_compare_exchange_init,
+    .body =test_atomic_compare_exchange_body,
+    .fini =test_atomic_compare_exchange_fini
   }, {
-    test_atomic_or_and_init,
-    test_atomic_or_and_body,
-    test_atomic_or_and_fini
+    .init = test_atomic_or_and_init,
+    .body =test_atomic_or_and_body,
+    .fini =test_atomic_or_and_fini
   }, {
-    test_atomic_fence_init,
-    test_atomic_fence_body,
-    test_atomic_fence_fini
+    .init = test_atomic_fence_init,
+    .body =test_atomic_fence_body,
+    .fini =test_atomic_fence_fini
   },
 };
 
-- 
1.8.4.5



More information about the devel mailing list