[PATCH] Test_environment: cascading option implemented

Gedare Bloom gedare at rtems.org
Fri Mar 6 14:09:52 UTC 2015


On Fri, Mar 6, 2015 at 8:49 AM, Alexander Krutwig
<alexander.krutwig at embedded-brains.de> wrote:
> ---
>  cpukit/libmisc/testsupport/test.h         | 23 ++++++++-
>  cpukit/libmisc/testsupport/testparallel.c | 24 +++++----
>  testsuites/smptests/smpatomic01/init.c    | 84 ++++++++++++++++++++-----------
>  3 files changed, 90 insertions(+), 41 deletions(-)
>
> diff --git a/cpukit/libmisc/testsupport/test.h b/cpukit/libmisc/testsupport/test.h
> index afed462..6b46244 100644
> --- a/cpukit/libmisc/testsupport/test.h
> +++ b/cpukit/libmisc/testsupport/test.h
> @@ -141,13 +141,16 @@ typedef struct {
>     *
>     * @param[in] ctx The parallel context.
>     * @param[in] arg The user specified argument.
> +   * @param[in] active_workers Count of active workers.  Depends on the cascade
> +   *   option.
>     *
>     * @return The desired job body execution time in clock ticks.  See
>     *   rtems_test_parallel_stop_job().
>     */
>    rtems_interval (*init)(
>      rtems_test_parallel_context *ctx,
> -    void *arg
> +    void *arg,
> +    size_t active_workers
>    );
>
>    /**
> @@ -172,13 +175,29 @@ typedef struct {
>     *
>     * @param[in] ctx The parallel context.
>     * @param[in] arg The user specified argument.
> +   * @param[in] active_workers Count of active workers.  Depends on the cascade
> +   *   option.
>     */
>    void (*fini)(
>      rtems_test_parallel_context *ctx,
> -    void *arg
> +    void *arg,
> +    size_t active_workers
>    );
>
> +  /**
> +   * @brief Job specific argument.
> +   */
>    void *arg;
> +
> +  /**
> +   * @brief Job cascading flag.
> +   *
> +   * This flag indicates whether the job should be executed in a cascaded
> +   * manner (if n processors are available, test is executed on one processor
> +   * first, two processors afterwards and incremented step by step until all
> +   * processors are used)
> +   */
> +  bool cascade;
>  } rtems_test_parallel_job;
>
>  /**
> diff --git a/cpukit/libmisc/testsupport/testparallel.c b/cpukit/libmisc/testsupport/testparallel.c
> index 681f769..5e572b5 100644
> --- a/cpukit/libmisc/testsupport/testparallel.c
> +++ b/cpukit/libmisc/testsupport/testparallel.c
> @@ -58,21 +58,27 @@ static void run_tests(
>
>    for (i = 0; i < job_count; ++i) {
>      const rtems_test_parallel_job *job = &jobs[i];
> +    size_t n = job->cascade ? rtems_get_processor_count() : 1;
> +    size_t j;
>
> -    if (rtems_test_parallel_is_master_worker(worker_index)) {
> -      rtems_interval duration = (*job->init)(ctx, job->arg);
> +    for (j = 0; j < n; ++j) {
> +      if (rtems_test_parallel_is_master_worker(worker_index)) {
> +        rtems_interval duration = (*job->init)(ctx, job->arg, j + 1);
I don't quite understand the logic here. If cascade is false, you
should want to execute with max workers = rtems_get_processor_count
right? But here it looks like it will just execute with one worker.


>
> -      start_worker_stop_timer(ctx, duration);
> -    }
> +        start_worker_stop_timer(ctx, duration);
> +      }
>
> -    _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
> +      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
>
> -    (*job->body)(ctx, job->arg, worker_index);
> +      if (worker_index <= j) {
> +        (*job->body)(ctx, job->arg, worker_index);
> +      }
>
> -    _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
> +      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);
>
> -    if (rtems_test_parallel_is_master_worker(worker_index)) {
> -      (*job->fini)(ctx, job->arg);
> +      if (rtems_test_parallel_is_master_worker(worker_index)) {
> +        (*job->fini)(ctx, job->arg, j + 1);
> +      }
>      }
>    }
>  }
> diff --git a/testsuites/smptests/smpatomic01/init.c b/testsuites/smptests/smpatomic01/init.c
> index fbd20fa..53d32c2 100644
> --- a/testsuites/smptests/smpatomic01/init.c
> +++ b/testsuites/smptests/smpatomic01/init.c
> @@ -98,7 +98,8 @@ static void test_fini(
>
>  static rtems_interval test_atomic_add_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -125,7 +126,11 @@ static void test_atomic_add_body(
>    ctx->per_worker_value[worker_index] = counter;
>  }
>
> -static void test_atomic_add_fini(rtems_test_parallel_context *base, void *arg)
> +static void test_atomic_add_fini(
> +  rtems_test_parallel_context *base,
> +  void *arg,
> +  size_t active_workers
> +)
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
>
> @@ -134,7 +139,8 @@ static void test_atomic_add_fini(rtems_test_parallel_context *base, void *arg)
>
>  static rtems_interval test_atomic_flag_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -168,7 +174,11 @@ static void test_atomic_flag_body(
>    ctx->per_worker_value[worker_index] = counter;
>  }
>
> -static void test_atomic_flag_fini(rtems_test_parallel_context *base, void *arg)
> +static void test_atomic_flag_fini(
> +  rtems_test_parallel_context *base,
> +  void *arg,
> +  size_t active_workers
> +  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
>
> @@ -177,7 +187,8 @@ static void test_atomic_flag_fini(rtems_test_parallel_context *base, void *arg)
>
>  static rtems_interval test_atomic_sub_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -204,7 +215,11 @@ static void test_atomic_sub_body(
>    ctx->per_worker_value[worker_index] = counter;
>  }
>
> -static void test_atomic_sub_fini(rtems_test_parallel_context *base, void *arg)
> +static void test_atomic_sub_fini(
> +  rtems_test_parallel_context *base,
> +  void *arg,
> +  size_t active_workers
> +)
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
>
> @@ -213,7 +228,8 @@ static void test_atomic_sub_fini(rtems_test_parallel_context *base, void *arg)
>
>  static rtems_interval test_atomic_compare_exchange_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -259,7 +275,8 @@ static void test_atomic_compare_exchange_body(
>
>  static void test_atomic_compare_exchange_fini(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -269,7 +286,8 @@ static void test_atomic_compare_exchange_fini(
>
>  static rtems_interval test_atomic_or_and_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -316,7 +334,8 @@ static void test_atomic_or_and_body(
>
>  static void test_atomic_or_and_fini(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -326,7 +345,8 @@ static void test_atomic_or_and_fini(
>
>  static rtems_interval test_atomic_fence_init(
>    rtems_test_parallel_context *base,
> -  void *arg
> +  void *arg,
> +  size_t active_workers
>  )
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
> @@ -369,7 +389,11 @@ static void test_atomic_fence_body(
>    }
>  }
>
> -static void test_atomic_fence_fini(rtems_test_parallel_context *base, void *arg)
> +static void test_atomic_fence_fini(
> +  rtems_test_parallel_context *base,
> +  void *arg,
> +  size_t active_workers
> +)
>  {
>    smpatomic01_context *ctx = (smpatomic01_context *) base;
>
> @@ -383,29 +407,29 @@ static void test_atomic_fence_fini(rtems_test_parallel_context *base, void *arg)
>
>  static const rtems_test_parallel_job test_jobs[] = {
>    {
> -    test_atomic_add_init,
> -    test_atomic_add_body,
> -    test_atomic_add_fini
> +    .init = test_atomic_add_init,
> +    .body = test_atomic_add_body,
> +    .fini = test_atomic_add_fini
>    }, {
> -    test_atomic_flag_init,
> -    test_atomic_flag_body,
> -    test_atomic_flag_fini
> +    .init = test_atomic_flag_init,
> +    .body =test_atomic_flag_body,
> +    .fini =test_atomic_flag_fini
>    }, {
> -    test_atomic_sub_init,
> -    test_atomic_sub_body,
> -    test_atomic_sub_fini
> +    .init = test_atomic_sub_init,
> +    .body =test_atomic_sub_body,
> +    .fini =test_atomic_sub_fini
>    }, {
> -    test_atomic_compare_exchange_init,
> -    test_atomic_compare_exchange_body,
> -    test_atomic_compare_exchange_fini
> +    .init = test_atomic_compare_exchange_init,
> +    .body =test_atomic_compare_exchange_body,
> +    .fini =test_atomic_compare_exchange_fini
>    }, {
> -    test_atomic_or_and_init,
> -    test_atomic_or_and_body,
> -    test_atomic_or_and_fini
> +    .init = test_atomic_or_and_init,
> +    .body =test_atomic_or_and_body,
> +    .fini =test_atomic_or_and_fini
>    }, {
> -    test_atomic_fence_init,
> -    test_atomic_fence_body,
> -    test_atomic_fence_fini
> +    .init = test_atomic_fence_init,
> +    .body =test_atomic_fence_body,
> +    .fini =test_atomic_fence_fini
>    },
>  };
>
> --
> 1.8.4.5
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel



More information about the devel mailing list