[PATCH 5/6] add smppflock03 test case

WeiY wei.a.yang at gmail.com
Mon Aug 19 15:39:47 UTC 2013


---
 testsuites/smptests/smppflock03/Makefile.am     |   19 +
 testsuites/smptests/smppflock03/init.c          |  491 +++++++++++++++++++++++
 testsuites/smptests/smppflock03/smppflock03.doc |   17 +
 testsuites/smptests/smppflock03/smppflock03.scn |   53 +++
 4 files changed, 580 insertions(+)
 create mode 100644 testsuites/smptests/smppflock03/Makefile.am
 create mode 100644 testsuites/smptests/smppflock03/init.c
 create mode 100644 testsuites/smptests/smppflock03/smppflock03.doc
 create mode 100644 testsuites/smptests/smppflock03/smppflock03.scn

diff --git a/testsuites/smptests/smppflock03/Makefile.am b/testsuites/smptests/smppflock03/Makefile.am
new file mode 100644
index 0000000..1b0a04d
--- /dev/null
+++ b/testsuites/smptests/smppflock03/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smppflock03
+smppflock03_SOURCES = init.c ../../support/src/locked_print.c
+
+dist_rtems_tests_DATA = smppflock03.scn smppflock03.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smppflock03_OBJECTS)
+LINK_LIBS = $(smppflock03_LDLIBS)
+
+smppflock03$(EXEEXT): $(smppflock03_OBJECTS) $(smppflock03_DEPENDENCIES)
+	@rm -f smppflock03$(EXEEXT)
+	$(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smppflock03/init.c b/testsuites/smptests/smppflock03/init.c
new file mode 100644
index 0000000..3199bc1
--- /dev/null
+++ b/testsuites/smptests/smppflock03/init.c
@@ -0,0 +1,491 @@
+#ifdef HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/score/smprwlock.h>
+
+#include "tmacros.h"
+
+/* FIXME: Add barrier to Score */
+
+typedef struct {
+	Atomic_Uint value;
+	Atomic_Uint sense;
+} barrier_control;
+
+typedef struct {
+	uint_fast32_t sense;
+} barrier_state;
+
+#define BARRIER_CONTROL_INITIALIZER { ATOMIC_VAR_INITIALIZER(0), ATOMIC_VAR_INITIALIZER(0) }
+
+#define BARRIER_STATE_INITIALIZER { 0 }
+
+static void barrier_wait(
+  barrier_control *control,
+  barrier_state *state,
+  int cpu_count
+)
+{
+  uint_fast32_t sense = ~state->sense;
+
+  state->sense = sense;
+
+  _Atomic_Fetch_add_uint(&control->value, 1, ATOMIC_ORDER_ACQUIRE);
+
+  if (_Atomic_Load_uint(&control->value, ATOMIC_ORDER_ACQUIRE) == cpu_count) {
+    _Atomic_Store_uint(&control->value, 0, ATOMIC_ORDER_RELEASE);
+    _Atomic_Store_uint(&control->sense, sense, ATOMIC_ORDER_RELEASE);
+  }
+
+  while (_Atomic_Load_uint(&control->sense, ATOMIC_ORDER_ACQUIRE) != sense) {
+    /* Wait */
+  }
+}
+
+#define TASK_PRIORITY 1
+
+#define CPU_COUNT 32
+
+#define TEST_COUNT 10
+
+typedef enum {
+  INITIAL,
+  START_TEST,
+  STOP_TEST
+} states;
+
+typedef struct {
+  Atomic_Uint state;
+  barrier_control barrier;
+  rtems_id timer_id;
+  rtems_interval timeout;
+  unsigned long read_counter[TEST_COUNT];
+  unsigned long write_counter[TEST_COUNT];
+  unsigned long test_read_counter[TEST_COUNT][CPU_COUNT];
+  unsigned long test_write_counter[TEST_COUNT][CPU_COUNT];
+  SMP_rwlock_Control lock;
+} global_context;
+
+static global_context context = {
+  .state = ATOMIC_VAR_INITIALIZER(INITIAL),
+  .barrier = BARRIER_CONTROL_INITIALIZER,
+  .lock = SMP_RWLOCK_INITIALIZER
+};
+
+static const char *test_names[TEST_COUNT] = {
+  "aquire global read lock with local counter",
+  "aquire global write lock with local counter",
+  "aquire global read lock with global counter",
+  "aquire global write lock with global counter",
+  "aquire local read lock with local counter",
+  "aquire local write lock with local counter",
+  "aquire local read lock with global counter",
+  "aquire local write lock with global counter",
+  "aquire global read lock with busy section",
+  "aquire global write lock with busy section"
+};
+
+static void stop_test_timer(rtems_id timer_id, void *arg)
+{
+  global_context *ctx = arg;
+
+  _Atomic_Store_uint(&ctx->state, STOP_TEST, ATOMIC_ORDER_RELEASE);
+}
+
+static void wait_for_state(global_context *ctx, uint_fast32_t desired_state)
+{ 
+  while (_Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_ACQUIRE)
+    != desired_state) {
+    /* Wait */
+  }
+}
+
+static bool assert_state(global_context *ctx, uint_fast32_t desired_state)
+{
+  return _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_ACQUIRE) == desired_state;
+}
+
+typedef void (*test_body)(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+);
+
+static void test_0_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_read(&ctx->lock);
+    _SMP_rwlock_Release_read(&ctx->lock);
+    ++counter;
+  }
+  ctx->test_read_counter[test][cpu_self] = counter;
+}
+
+static void test_1_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) { 
+    _SMP_rwlock_Acquire_write(&ctx->lock);
+    _SMP_rwlock_Release_write(&ctx->lock);
+    ++counter;
+  }
+
+  ctx->test_write_counter[test][cpu_self] = counter;
+}
+
+static void test_2_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_read(&ctx->lock);
+    ++ctx->read_counter[test];
+    _SMP_rwlock_Release_read(&ctx->lock);
+    ++counter;
+  }
+
+  ctx->test_read_counter[test][cpu_self] = counter;
+}
+
+static void test_3_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) {  
+    _SMP_rwlock_Acquire_write(&ctx->lock);
+    ++ctx->write_counter[test];
+    _SMP_rwlock_Release_write(&ctx->lock);
+    ++counter;
+  }
+
+  ctx->test_write_counter[test][cpu_self] = counter;
+}
+
+static void test_4_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+  SMP_rwlock_Control lock = SMP_RWLOCK_INITIALIZER;
+ 
+   while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_read(&lock);
+    _SMP_rwlock_Release_read(&lock);
+    ++counter;
+  }
+
+  ctx->test_read_counter[test][cpu_self] = counter;
+}
+
+static void test_5_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+  SMP_rwlock_Control lock = SMP_RWLOCK_INITIALIZER;
+
+  while (assert_state(ctx, START_TEST)) {    
+    _SMP_rwlock_Acquire_write(&lock);
+    _SMP_rwlock_Release_write(&lock);
+    ++counter;
+  }
+
+  ctx->test_write_counter[test][cpu_self] = counter;
+}
+
+static void test_6_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+  SMP_rwlock_Control lock = SMP_RWLOCK_INITIALIZER;
+
+  while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_read(&lock);
+
+    /* The counter value is not interesting, only the access to it */
+    ++ctx->read_counter[test];
+
+    _SMP_rwlock_Release_read(&lock);
+    ++counter;
+  }
+
+  ctx->test_read_counter[test][cpu_self] = counter;
+}
+
+static void test_7_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+  SMP_rwlock_Control lock = SMP_RWLOCK_INITIALIZER;
+ 
+  while (assert_state(ctx, START_TEST)) { 
+    _SMP_rwlock_Acquire_write(&lock);
+
+    /* The counter value is not interesting, only the access to it */
+    ++ctx->write_counter[test];
+
+    _SMP_rwlock_Release_write(&lock);
+    ++counter;
+  }
+
+  ctx->test_write_counter[test][cpu_self] = counter;
+}
+
+static void busy_section(void)
+{
+  int i;
+
+  for (i = 0; i < 101; ++i) {
+    RTEMS_COMPILER_MEMORY_BARRIER();
+  }
+}
+
+static void test_8_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_read(&ctx->lock);
+    busy_section();
+    _SMP_rwlock_Release_read(&ctx->lock);
+    ++counter;
+  }
+
+  ctx->test_read_counter[test][cpu_self] = counter;
+}
+
+static void test_9_body(
+  int test,
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self
+)
+{
+  unsigned long counter = 0;
+
+  while (assert_state(ctx, START_TEST)) {
+    _SMP_rwlock_Acquire_write(&ctx->lock);
+    busy_section();
+    _SMP_rwlock_Release_write(&ctx->lock);
+    ++counter;
+  }
+
+  ctx->test_write_counter[test][cpu_self] = counter;
+}
+
+static const test_body test_bodies[TEST_COUNT] = {
+  test_0_body,
+  test_1_body,
+  test_2_body,
+  test_3_body,
+  test_4_body,
+  test_5_body,
+  test_6_body,
+  test_7_body,
+  test_8_body,
+  test_9_body
+};
+
+static void run_tests(
+  global_context *ctx,
+  barrier_state *bs,
+  int cpu_count,
+  int cpu_self,
+  bool master
+)
+{
+  int test;
+
+  for (test = 0; test < TEST_COUNT; ++test) {
+    barrier_wait(&ctx->barrier, bs, cpu_count);
+    if (master) {
+      rtems_status_code sc = rtems_timer_fire_after(
+        ctx->timer_id,
+        ctx->timeout,
+        stop_test_timer,
+        ctx
+      );
+      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+      _Atomic_Store_uint(&ctx->state, START_TEST, ATOMIC_ORDER_RELEASE);
+    }
+
+    wait_for_state(ctx, START_TEST);
+
+    (*test_bodies[test])(test, ctx, bs, cpu_count, cpu_self);
+  }
+
+  barrier_wait(&ctx->barrier, bs, cpu_count);
+}
+
+static void task(rtems_task_argument arg)
+{
+  global_context *ctx = (global_context *) arg;
+  uint32_t cpu_count = rtems_smp_get_processor_count();
+  uint32_t cpu_self = rtems_smp_get_current_processor();
+  rtems_status_code sc;
+  barrier_state bs = BARRIER_STATE_INITIALIZER;
+
+  run_tests(ctx, &bs, cpu_count, cpu_self, false);
+  
+  sc = rtems_task_suspend(RTEMS_SELF);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test(void)
+{
+  global_context *ctx = &context;
+  uint32_t cpu_count = rtems_smp_get_processor_count();
+  uint32_t cpu_self = rtems_smp_get_current_processor();
+  uint32_t cpu;
+  int test;
+  rtems_status_code sc;
+  barrier_state bs = BARRIER_STATE_INITIALIZER;
+
+  for (cpu = 0; cpu < cpu_count; ++cpu) {
+    if (cpu != cpu_self) {
+      rtems_id task_id;
+
+      sc = rtems_task_create(
+        rtems_build_name('T', 'A', 'S', 'K'),
+        TASK_PRIORITY,
+        RTEMS_MINIMUM_STACK_SIZE,
+        RTEMS_DEFAULT_MODES,
+        RTEMS_DEFAULT_ATTRIBUTES,
+        &task_id
+      );
+      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+      sc = rtems_task_start(task_id, task, (rtems_task_argument) ctx);
+      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+    }
+  }
+
+  ctx->timeout = 10 * rtems_clock_get_ticks_per_second();
+
+  sc = rtems_timer_create(rtems_build_name('T', 'I', 'M', 'R'), &ctx->timer_id);
+  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+  run_tests(ctx, &bs, cpu_count, cpu_self, true);
+
+  for (test = 0; test < TEST_COUNT; ++test) {
+    unsigned long sum_read = 0;
+    unsigned long sum_write = 0;
+
+    printf("%s\n", test_names[test]);
+
+    for (cpu = 0; cpu < cpu_count; ++cpu) {
+      unsigned long local_read_counter = ctx->test_read_counter[test][cpu];
+      unsigned long local_write_counter = ctx->test_write_counter[test][cpu];
+
+      sum_read += local_read_counter;
+      sum_write += local_write_counter;
+
+      printf(
+        "\tprocessor %" PRIu32 ", local read counter %lu, local write counter %lu\n",
+        cpu,
+        local_read_counter,
+        local_write_counter
+      );
+    }
+
+    printf(
+      "\tglobal read counter %lu, sum of local read counter %lu\n"
+      "\tglobal write counter %lu, sum of local write counter %lu\n",
+      ctx->read_counter[test],
+      sum_read,
+      ctx->write_counter[test],
+      sum_write      
+    );
+  }
+}
+
+static void Init(rtems_task_argument arg)
+{
+  puts("\n\n*** TEST SMP phase_fair lock 3 ***");
+
+  test();
+
+  puts("*** END OF TEST SMP phase_fair lock 3 ***");
+
+  rtems_test_exit(0);
+}
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_MAXIMUM_TASKS CPU_COUNT
+
+#define CONFIGURE_MAXIMUM_SEMAPHORES 1
+
+#define CONFIGURE_MAXIMUM_TIMERS 1
+
+#define CONFIGURE_INIT_TASK_PRIORITY TASK_PRIORITY
+#define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES
+#define CONFIGURE_INIT_TASK_ATTRIBUTES RTEMS_DEFAULT_ATTRIBUTES
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smppflock03/smppflock03.doc b/testsuites/smptests/smppflock03/smppflock03.doc
new file mode 100644
index 0000000..9684860
--- /dev/null
+++ b/testsuites/smptests/smppflock03/smppflock03.doc
@@ -0,0 +1,17 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smppflock3
+
+The screen file was obtained on a PowerPC QorIQ P1020E target running with a
+processor frequency of 800MHz.
+
+directives:
+
+  - _SMP_rwlock_Acquire_write()
+  - _SMP_rwlock_Release_write()
+  - _SMP_rwlock_Acquire_read()
+  - _SMP_rwlock_Release_read()
+
+concepts:
+
+  - Benchmark the SMP phase_fair implementation
diff --git a/testsuites/smptests/smppflock03/smppflock03.scn b/testsuites/smptests/smppflock03/smppflock03.scn
new file mode 100644
index 0000000..3833f16
--- /dev/null
+++ b/testsuites/smptests/smppflock03/smppflock03.scn
@@ -0,0 +1,53 @@
+*** TEST SMP phase_fair lock 3 ***
+aquire global read lock with local counter
+        processor 0, local read counter 212584, local write counter 0
+        processor 1, local read counter 46, local write counter 0
+        global read counter 0, sum of local read counter 212630
+        global write counter 0, sum of local write counter 0
+aquire global write lock with local counter
+        processor 0, local read counter 0, local write counter 1
+        processor 1, local read counter 0, local write counter 69
+        global read counter 0, sum of local read counter 0
+        global write counter 0, sum of local write counter 70
+aquire global read lock with global counter
+        processor 0, local read counter 198117, local write counter 0
+        processor 1, local read counter 32, local write counter 0
+        global read counter 198149, sum of local read counter 198149
+        global write counter 0, sum of local write counter 0
+aquire global write lock with global counter
+        processor 0, local read counter 0, local write counter 105405
+        processor 1, local read counter 0, local write counter 64
+        global read counter 0, sum of local read counter 0
+        global write counter 105469, sum of local write counter 105469
+aquire local read lock with local counter
+        processor 0, local read counter 215001, local write counter 0
+        processor 1, local read counter 138, local write counter 0
+        global read counter 0, sum of local read counter 215139
+        global write counter 0, sum of local write counter 0
+aquire local write lock with local counter
+        processor 0, local read counter 0, local write counter 110001
+        processor 1, local read counter 0, local write counter 66
+        global read counter 0, sum of local read counter 0
+        global write counter 0, sum of local write counter 110067
+aquire local read lock with global counter
+        processor 0, local read counter 200213, local write counter 0
+        processor 1, local read counter 171, local write counter 0
+        global read counter 200384, sum of local read counter 200384
+        global write counter 0, sum of local write counter 0
+aquire local write lock with global counter
+        processor 0, local read counter 0, local write counter 105996
+        processor 1, local read counter 0, local write counter 74
+        global read counter 0, sum of local read counter 0
+        global write counter 106070, sum of local write counter 106070
+aquire global read lock with busy section
+        processor 0, local read counter 47420, local write counter 0
+        processor 1, local read counter 11, local write counter 0
+        global read counter 0, sum of local read counter 47431
+        global write counter 0, sum of local write counter 0
+aquire global write lock with busy section
+        processor 0, local read counter 0, local write counter 1
+        processor 1, local read counter 0, local write counter 37
+        global read counter 0, sum of local read counter 0
+        global write counter 0, sum of local write counter 38
+*** END OF TEST SMP phase_fair lock 3 ***
+
-- 
1.7.9.5




More information about the devel mailing list