[rtems commit] smptests/smpopenmp01: New test

Sebastian Huber sebh at rtems.org
Fri Feb 2 14:20:17 UTC 2018


Module:    rtems
Branch:    master
Commit:    0c5d22f50931f300180e0479e0a674ede416a5a6
Changeset: http://git.rtems.org/rtems/commit/?id=0c5d22f50931f300180e0479e0a674ede416a5a6

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Mon Dec 18 10:24:34 2017 +0100

smptests/smpopenmp01: New test

---

 testsuites/smptests/Makefile.am                 |   1 +
 testsuites/smptests/configure.ac                |   1 +
 testsuites/smptests/smpopenmp01/Makefile.am     |  20 ++
 testsuites/smptests/smpopenmp01/init.c          | 354 ++++++++++++++++++++++++
 testsuites/smptests/smpopenmp01/smpopenmp01.doc |  11 +
 testsuites/smptests/smpopenmp01/smpopenmp01.py  |  42 +++
 testsuites/smptests/smpopenmp01/smpopenmp01.scn |  81 ++++++
 7 files changed, 510 insertions(+)

diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index 08228ed..03b29a1 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -30,6 +30,7 @@ _SUBDIRS += smpmigration02
 _SUBDIRS += smpmrsp01
 _SUBDIRS += smpmutex01
 _SUBDIRS += smpmutex02
+_SUBDIRS += smpopenmp01
 _SUBDIRS += smpschedaffinity03
 _SUBDIRS += smpschedaffinity04
 _SUBDIRS += smpschedaffinity05
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index 78996b2..f8b5fe2 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -55,6 +55,7 @@ AC_CHECK_DECLS([pthread_getattr_np],[],[],[[
 
 # Explicitly list all Makefiles here
 AC_CONFIG_FILES([Makefile
+smpopenmp01/Makefile
 smp01/Makefile
 smp02/Makefile
 smp03/Makefile
diff --git a/testsuites/smptests/smpopenmp01/Makefile.am b/testsuites/smptests/smpopenmp01/Makefile.am
new file mode 100644
index 0000000..0850579
--- /dev/null
+++ b/testsuites/smptests/smpopenmp01/Makefile.am
@@ -0,0 +1,20 @@
+rtems_tests_PROGRAMS = smpopenmp01
+smpopenmp01_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpopenmp01.scn smpopenmp01.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+AM_CFLAGS += -fopenmp
+
+LINK_OBJS = $(smpopenmp01_OBJECTS)
+LINK_LIBS = $(smpopenmp01_LDLIBS)
+
+smpopenmp01$(EXEEXT): $(smpopenmp01_OBJECTS) $(smpopenmp01_DEPENDENCIES)
+	@rm -f smpopenmp01$(EXEEXT)
+	$(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpopenmp01/init.c b/testsuites/smptests/smpopenmp01/init.c
new file mode 100644
index 0000000..5d4ab12
--- /dev/null
+++ b/testsuites/smptests/smpopenmp01/init.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2017 embedded brains GmbH.  All rights reserved.
+ *
+ *  embedded brains GmbH
+ *  Dornierstr. 4
+ *  82178 Puchheim
+ *  Germany
+ *  <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/*
+ * This OpenMP micro benchmark is based on mailing list posts from Jakub
+ * Jelinek.
+ *
+ * Subject: [gomp3] libgomp performance improvements
+ * https://gcc.gnu.org/ml/gcc-patches/2008-03/msg00930.html
+ *
+ * Subject: [gomp3] Use private futexes if possible
+ * https://gcc.gnu.org/ml/gcc-patches/2008-03/msg01126.html
+ *
+ * This file can be compiled on Linux, etc. using:
+ *
+ * cc -std=c11 -O2 -fopenmp init.c
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <omp.h>
+#include <stdio.h>
+
+#ifdef __rtems__
+#include <pthread.h>
+#include <tmacros.h>
+
+const char rtems_test_name[] = "SMPOPENMP 1";
+
+#define CPU_COUNT_MAX 32
+#endif /* __rtems__ */
+
+static void work(void)
+{
+  __asm__ volatile ("" : : : "memory");
+}
+
+static void barrier_bench(void)
+{
+  #pragma omp parallel
+  for (int i = 0; i < 10000; ++i) {
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+    #pragma omp barrier
+    work();
+  }
+}
+
+static void parallel_bench(void)
+{
+  for (int i = 0; i < 20000; ++i) {
+    #pragma omp parallel
+    work();
+  }
+}
+
+static void static_bench(void)
+{
+  for (int i = 0; i < 1000; ++i) {
+    #pragma omp parallel for schedule (static)
+    for (int j = 0; j < 100; ++j) {
+      work();
+    }
+  }
+}
+
+static void dynamic_bench(void)
+{
+  #pragma omp parallel for schedule (dynamic)
+  for (int i = 0; i < 100000; ++i) {
+    work();
+  }
+}
+
+static void guided_bench(void)
+{
+  #pragma omp parallel for schedule (guided)
+  for (int i = 0; i < 100000; ++i) {
+    work();
+  }
+}
+
+static void runtime_bench(void)
+{
+  #pragma omp parallel for schedule (runtime)
+  for (int i = 0; i < 100000; ++i) {
+    work();
+  }
+}
+
+static void single_bench(void)
+{
+  #pragma omp parallel
+  for (int i = 0; i < 10000; ++i) {
+    #pragma omp single
+    work();
+  }
+}
+
+static void all(void)
+{
+  barrier_bench();
+  parallel_bench();
+  static_bench();
+  dynamic_bench();
+  guided_bench();
+  runtime_bench();
+  single_bench();
+}
+
+static void do_bench(const char *name, void (*bench)(void), int n)
+{
+  double start;
+  double delta;
+
+  (*bench)();
+  start = omp_get_wtime();
+  for (int i = 0; i < n; ++i) {
+    (*bench)();
+  }
+  delta = omp_get_wtime() - start;
+  printf("\t\t<%sBench unit=\"s\">%f</%sBench>\n", name, delta, name);
+}
+
+static void microbench(int num_threads, int n)
+{
+  printf("\t<Microbench numThreads=\"%i\" majorLoopCount=\"%i\">\n", num_threads, n);
+  omp_set_num_threads(num_threads);
+  do_bench("Barrier", barrier_bench, n);
+  do_bench("Parallel", parallel_bench, n);
+  do_bench("Static", static_bench, n);
+  do_bench("Dynamic", dynamic_bench, n);
+  do_bench("Guided", guided_bench, n);
+  do_bench("Runtime", runtime_bench, n);
+  do_bench("Single", single_bench, n);
+  printf("\t</Microbench>\n");
+}
+
+static int estimate_3s_runtime_with_one_proc(void)
+{
+  double start;
+  double delta;
+  int n;
+
+  omp_set_num_threads(1);
+  all();
+  start = omp_get_wtime();
+  all();
+  delta = omp_get_wtime() - start;
+
+  if (delta > 0.0 && delta <= 1.0) {
+    n = (int) (3.0 / delta);
+  } else {
+    n = 1;
+  }
+
+  return n;
+}
+
+static void test(void)
+{
+  int num_procs;
+  int n;
+
+  printf("<SMPOpenMP01>\n");
+
+  n = estimate_3s_runtime_with_one_proc();
+  num_procs = omp_get_num_procs();
+  omp_set_num_threads(num_procs);
+
+  for (int i = 1; i <= num_procs; ++i) {
+    microbench(i, n);
+  }
+
+  printf("</SMPOpenMP01>\n");
+}
+
+#ifdef __rtems__
+
+static void Init(rtems_task_argument arg)
+{
+  rtems_status_code sc;
+  cpu_set_t cpu_set;
+
+  rtems_print_printer_fprintf_putc(&rtems_test_printer);
+  TEST_BEGIN();
+
+  CPU_ZERO(&cpu_set);
+  CPU_SET(0, &cpu_set);
+
+  sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpu_set), &cpu_set);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  test();
+  TEST_END();
+  rtems_test_exit(0);
+}
+
+typedef struct {
+  pthread_mutex_t mtx;
+  pthread_cond_t cnd;
+  bool cpus_used[CPU_COUNT_MAX];
+} test_context;
+
+static test_context test_instance;
+
+static uint32_t find_free_cpu(test_context *ctx)
+{
+  uint32_t i;
+  uint32_t n;
+
+  n = rtems_get_processor_count();
+
+  pthread_mutex_lock(&ctx->mtx);
+
+  do {
+    for (i = 1; i < n; ++i) {
+      if (!ctx->cpus_used[i]) {
+        ctx->cpus_used[i] = true;
+        break;
+      }
+    }
+
+    if (i == n) {
+      pthread_cond_wait(&ctx->cnd, &ctx->mtx);
+    }
+  } while (i == n);
+
+  pthread_mutex_unlock(&ctx->mtx);
+
+  return i;
+}
+
+static void begin_extension(Thread_Control *th)
+{
+  rtems_id th_id;
+
+  th_id = th->Object.id;
+
+  if (rtems_object_id_get_api(th_id) == OBJECTS_POSIX_API) {
+    rtems_status_code sc;
+    rtems_id sched_id;
+    uint32_t cpu_index;
+    cpu_set_t cpu_set;
+    rtems_task_priority prio;
+
+    cpu_index = find_free_cpu(&test_instance);
+
+    sc = rtems_scheduler_ident_by_processor(cpu_index, &sched_id);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    sc = rtems_task_set_priority(th_id, RTEMS_CURRENT_PRIORITY, &prio);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    sc = rtems_task_set_scheduler(th_id, sched_id, prio);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    CPU_ZERO(&cpu_set);
+    CPU_SET((int) cpu_index, &cpu_set);
+
+    sc = rtems_task_set_affinity(th_id, sizeof(cpu_set), &cpu_set);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+  }
+}
+
+static void terminate_extension(Thread_Control *th)
+{
+  rtems_id th_id;
+
+  th_id = th->Object.id;
+
+  if (rtems_object_id_get_api(th_id) == OBJECTS_POSIX_API) {
+    rtems_status_code sc;
+    cpu_set_t cpu_set;
+    uint32_t cpu_index;
+    test_context *ctx;
+
+    sc = rtems_task_get_affinity(th_id, sizeof(cpu_set), &cpu_set);
+    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+    cpu_index = CPU_FFS(&cpu_set) - 1;
+
+    ctx = &test_instance;
+
+    pthread_mutex_lock(&ctx->mtx);
+    rtems_test_assert(ctx->cpus_used[cpu_index]);
+    ctx->cpus_used[cpu_index] = false;
+    pthread_cond_broadcast(&ctx->cnd);
+    pthread_mutex_unlock(&ctx->mtx);
+  }
+}
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT_MAX
+
+#define CONFIGURE_UNLIMITED_OBJECTS
+#define CONFIGURE_UNIFIED_WORK_AREAS
+
+#define CONFIGURE_INITIAL_EXTENSIONS \
+  { \
+    .thread_begin = begin_extension, \
+    .thread_terminate = terminate_extension \
+  }, \
+  RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_FLOATING_POINT
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
+
+#else /* __rtems__ */
+
+int main(void)
+{
+  test();
+  return 0;
+}
+
+#endif /* __rtems__ */
diff --git a/testsuites/smptests/smpopenmp01/smpopenmp01.doc b/testsuites/smptests/smpopenmp01/smpopenmp01.doc
new file mode 100644
index 0000000..688cccd
--- /dev/null
+++ b/testsuites/smptests/smpopenmp01/smpopenmp01.doc
@@ -0,0 +1,11 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpopenmp01
+
+directives:
+
+  - None
+
+concepts:
+
+  - OpenMP micro benchmark.
diff --git a/testsuites/smptests/smpopenmp01/smpopenmp01.py b/testsuites/smptests/smpopenmp01/smpopenmp01.py
new file mode 100644
index 0000000..139aedd
--- /dev/null
+++ b/testsuites/smptests/smpopenmp01/smpopenmp01.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2017 embedded brains GmbH.  All rights reserved.
+#
+# The license and distribution terms for this file may be
+# found in the file LICENSE in this distribution or at
+# http://www.rtems.org/license/LICENSE.
+#
+
+import re
+import libxml2
+from libxml2 import xmlNode
+import matplotlib.pyplot as plt
+data = open('smpopenmp01.scn').read()
+data = re.sub(r'\*\*\*.*', '', data)
+doc = libxml2.parseDoc(data)
+ctx = doc.xpathNewContext()
+
+plt.title('OpenMP Microbench')
+plt.xlabel('Number of Threads')
+plt.ylabel('Relative Duration')
+
+def m(n):
+	return float(n.getContent())
+
+def p(bench):
+	d = map(m, ctx.xpathEval('/SMPOpenMP01/Microbench/' + bench))
+	y = [x / d[0] for x in d]
+	x = range(1, len(y) + 1)
+	plt.xticks(x)
+	plt.plot(x, y, label = bench, marker = 'o')
+
+p('BarrierBench')
+p('ParallelBench')
+p('StaticBench')
+p('DynamicBench')
+p('GuidedBench')
+p('RuntimeBench')
+p('SingleBench')
+plt.legend(loc = 'best')
+plt.show()
diff --git a/testsuites/smptests/smpopenmp01/smpopenmp01.scn b/testsuites/smptests/smpopenmp01/smpopenmp01.scn
new file mode 100644
index 0000000..9a21ff5
--- /dev/null
+++ b/testsuites/smptests/smpopenmp01/smpopenmp01.scn
@@ -0,0 +1,81 @@
+*** BEGIN OF TEST SMPOPENMP 1 ***
+*** TEST VERSION: 5.0.0.4c8cffc19865eaa3b033ce2776bcce9992f24b18
+*** TEST STATE: EXPECTED-PASS
+*** TEST BUILD: RTEMS_POSIX_API RTEMS_SMP
+*** TEST TOOLS: 7.3.0 20180125 (RTEMS 5, RSB 6d9c77c77d271d1fc2dfe8493d6713930b52a6dd, Newlib 3.0.0)
+<SMPOpenMP01>
+        <Microbench numThreads="1" majorLoopCount="20">
+                <BarrierBench unit="s">0.720318</BarrierBench>
+                <ParallelBench unit="s">1.121403</ParallelBench>
+                <StaticBench unit="s">0.059288</StaticBench>
+                <DynamicBench unit="s">0.440113</DynamicBench>
+                <GuidedBench unit="s">0.003230</GuidedBench>
+                <RuntimeBench unit="s">0.440121</RuntimeBench>
+                <SingleBench unit="s">0.116486</SingleBench>
+        </Microbench>
+        <Microbench numThreads="2" majorLoopCount="20">
+                <BarrierBench unit="s">0.416734</BarrierBench>
+                <ParallelBench unit="s">0.259013</ParallelBench>
+                <StaticBench unit="s">0.015311</StaticBench>
+                <DynamicBench unit="s">0.196751</DynamicBench>
+                <GuidedBench unit="s">0.002367</GuidedBench>
+                <RuntimeBench unit="s">0.199640</RuntimeBench>
+                <SingleBench unit="s">0.077629</SingleBench>
+        </Microbench>
+        <Microbench numThreads="3" majorLoopCount="20">
+                <BarrierBench unit="s">0.748332</BarrierBench>
+                <ParallelBench unit="s">0.387318</ParallelBench>
+                <StaticBench unit="s">0.021244</StaticBench>
+                <DynamicBench unit="s">0.141558</DynamicBench>
+                <GuidedBench unit="s">0.001544</GuidedBench>
+                <RuntimeBench unit="s">0.142693</RuntimeBench>
+                <SingleBench unit="s">0.117683</SingleBench>
+        </Microbench>
+        <Microbench numThreads="4" majorLoopCount="20">
+                <BarrierBench unit="s">0.552830</BarrierBench>
+                <ParallelBench unit="s">0.323241</ParallelBench>
+                <StaticBench unit="s">0.017796</StaticBench>
+                <DynamicBench unit="s">0.099475</DynamicBench>
+                <GuidedBench unit="s">0.001259</GuidedBench>
+                <RuntimeBench unit="s">0.100053</RuntimeBench>
+                <SingleBench unit="s">0.091069</SingleBench>
+        </Microbench>
+        <Microbench numThreads="5" majorLoopCount="20">
+                <BarrierBench unit="s">0.882791</BarrierBench>
+                <ParallelBench unit="s">0.452561</ParallelBench>
+                <StaticBench unit="s">0.023620</StaticBench>
+                <DynamicBench unit="s">0.094107</DynamicBench>
+                <GuidedBench unit="s">0.000989</GuidedBench>
+                <RuntimeBench unit="s">0.093911</RuntimeBench>
+                <SingleBench unit="s">0.130070</SingleBench>
+        </Microbench>
+        <Microbench numThreads="6" majorLoopCount="20">
+                <BarrierBench unit="s">0.670385</BarrierBench>
+                <ParallelBench unit="s">0.393587</ParallelBench>
+                <StaticBench unit="s">0.021141</StaticBench>
+                <DynamicBench unit="s">0.072322</DynamicBench>
+                <GuidedBench unit="s">0.000937</GuidedBench>
+                <RuntimeBench unit="s">0.069804</RuntimeBench>
+                <SingleBench unit="s">0.104107</SingleBench>
+        </Microbench>
+        <Microbench numThreads="7" majorLoopCount="20">
+                <BarrierBench unit="s">1.031511</BarrierBench>
+                <ParallelBench unit="s">0.466571</ParallelBench>
+                <StaticBench unit="s">0.024944</StaticBench>
+                <DynamicBench unit="s">0.069194</DynamicBench>
+                <GuidedBench unit="s">0.000814</GuidedBench>
+                <RuntimeBench unit="s">0.069596</RuntimeBench>
+                <SingleBench unit="s">0.133137</SingleBench>
+        </Microbench>
+        <Microbench numThreads="8" majorLoopCount="20">
+                <BarrierBench unit="s">0.761015</BarrierBench>
+                <ParallelBench unit="s">0.452577</ParallelBench>
+                <StaticBench unit="s">0.023979</StaticBench>
+                <DynamicBench unit="s">0.061193</DynamicBench>
+                <GuidedBench unit="s">0.000799</GuidedBench>
+                <RuntimeBench unit="s">0.061519</RuntimeBench>
+                <SingleBench unit="s">0.114285</SingleBench>
+        </Microbench>
+</SMPOpenMP01>
+
+*** END OF TEST SMPOPENMP 1 ***




More information about the vc mailing list