[rtems commit] smptests/smpatomic01: Add seqlock test case
Sebastian Huber
sebh at rtems.org
Tue Jun 7 13:30:15 UTC 2016
Module: rtems
Branch: master
Commit: cc8bb9e3376ce1d36ec9da06501e62f45c9b3b3b
Changeset: http://git.rtems.org/rtems/commit/?id=cc8bb9e3376ce1d36ec9da06501e62f45c9b3b3b
Author: Sebastian Huber <sebastian.huber at embedded-brains.de>
Date: Tue Jun 7 15:26:52 2016 +0200
smptests/smpatomic01: Add seqlock test case
---
testsuites/smptests/smpatomic01/init.c | 185 ++++++++++++++++++++++++
testsuites/smptests/smpatomic01/smpatomic01.scn | 50 +++++++
2 files changed, 235 insertions(+)
diff --git a/testsuites/smptests/smpatomic01/init.c b/testsuites/smptests/smpatomic01/init.c
index 673ff28..00a0cb5 100644
--- a/testsuites/smptests/smpatomic01/init.c
+++ b/testsuites/smptests/smpatomic01/init.c
@@ -575,6 +575,183 @@ static void test_atomic_store_load_rmw_fini(
}
}
+/*
+ * See also Hans-J. Boehm, HP Laboratories,
+ * "Can Seqlocks Get Along With Programming Language Memory Models?",
+ * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+ */
+
+static rtems_interval test_seqlock_init(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ smpatomic01_context *ctx = (smpatomic01_context *) base;
+
+ ctx->normal_value = 0;
+ ctx->second_value = 0;
+ _Atomic_Store_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
+
+ return test_duration();
+}
+
+static unsigned long seqlock_read(smpatomic01_context *ctx)
+{
+ unsigned long counter = 0;
+
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
+ unsigned long seq0;
+ unsigned long seq1;
+ unsigned long a;
+ unsigned long b;
+
+ do {
+ seq0 = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_ACQUIRE);
+
+ a = ctx->normal_value;
+ b = ctx->second_value;
+
+ seq1 =
+ _Atomic_Fetch_add_ulong(&ctx->atomic_value, 0, ATOMIC_ORDER_RELEASE);
+ } while (seq0 != seq1 || seq0 % 2 != 0);
+
+ ++counter;
+ rtems_test_assert(a == b);
+ }
+
+ return counter;
+}
+
+static void test_single_writer_seqlock_body(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
+)
+{
+ smpatomic01_context *ctx = (smpatomic01_context *) base;
+ uint32_t cpu_self_index;
+ unsigned long counter;
+
+ /*
+ * Use the physical processor index, to observe timing differences introduced
+ * by the system topology.
+ */
+ cpu_self_index = rtems_get_current_processor();
+
+ if (cpu_self_index == 0) {
+ counter = 0;
+
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
+ unsigned long seq;
+
+ seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
+ _Atomic_Store_ulong(&ctx->atomic_value, seq + 1, ATOMIC_ORDER_RELAXED);
+ _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
+
+ ++counter;
+ ctx->normal_value = counter;
+ ctx->second_value = counter;
+
+ _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
+ }
+ } else {
+ counter = seqlock_read(ctx);
+ }
+
+ ctx->per_worker_value[cpu_self_index] = counter;
+}
+
+static void test_single_writer_seqlock_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ smpatomic01_context *ctx = (smpatomic01_context *) base;
+ size_t i;
+
+ printf("=== single writer seqlock test case ===\n");
+
+ for (i = 0; i < active_workers; ++i) {
+ printf(
+ "processor %zu count %lu\n",
+ i,
+ ctx->per_worker_value[i]
+ );
+ }
+}
+
+static void test_multi_writer_seqlock_body(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers,
+ size_t worker_index
+)
+{
+ smpatomic01_context *ctx = (smpatomic01_context *) base;
+ uint32_t cpu_self_index;
+ unsigned long counter;
+
+ /*
+ * Use the physical processor index, to observe timing differences introduced
+ * by the system topology.
+ */
+ cpu_self_index = rtems_get_current_processor();
+
+ if (cpu_self_index % 2 == 0) {
+ counter = 0;
+
+ while (!rtems_test_parallel_stop_job(&ctx->base)) {
+ unsigned long seq;
+
+ do {
+ seq = _Atomic_Load_ulong(&ctx->atomic_value, ATOMIC_ORDER_RELAXED);
+ } while (
+ seq % 2 != 0
+ || !_Atomic_Compare_exchange_ulong(
+ &ctx->atomic_value,
+ &seq,
+ seq + 1,
+ ATOMIC_ORDER_ACQ_REL,
+ ATOMIC_ORDER_RELAXED
+ )
+ );
+
+ ++counter;
+ ctx->normal_value = counter;
+ ctx->second_value = counter;
+
+ _Atomic_Store_ulong(&ctx->atomic_value, seq + 2, ATOMIC_ORDER_RELEASE);
+ }
+ } else {
+ counter = seqlock_read(ctx);
+ }
+
+ ctx->per_worker_value[cpu_self_index] = counter;
+}
+
+static void test_multi_writer_seqlock_fini(
+ rtems_test_parallel_context *base,
+ void *arg,
+ size_t active_workers
+)
+{
+ smpatomic01_context *ctx = (smpatomic01_context *) base;
+ size_t i;
+
+ printf("=== multi writer seqlock test case ===\n");
+
+ for (i = 0; i < active_workers; ++i) {
+ printf(
+ "processor %zu count %lu\n",
+ i,
+ ctx->per_worker_value[i]
+ );
+ }
+}
+
static const rtems_test_parallel_job test_jobs[] = {
{
.init = test_atomic_add_init,
@@ -604,6 +781,14 @@ static const rtems_test_parallel_job test_jobs[] = {
.init = test_atomic_store_load_rmw_init,
.body = test_atomic_store_load_rmw_body,
.fini = test_atomic_store_load_rmw_fini
+ }, {
+ .init = test_seqlock_init,
+ .body = test_single_writer_seqlock_body,
+ .fini = test_single_writer_seqlock_fini
+ }, {
+ .init = test_seqlock_init,
+ .body = test_multi_writer_seqlock_body,
+ .fini = test_multi_writer_seqlock_fini
}
};
diff --git a/testsuites/smptests/smpatomic01/smpatomic01.scn b/testsuites/smptests/smpatomic01/smpatomic01.scn
index f3de7c6..01f6ad4 100644
--- a/testsuites/smptests/smpatomic01/smpatomic01.scn
+++ b/testsuites/smptests/smpatomic01/smpatomic01.scn
@@ -181,4 +181,54 @@ processor 20 delta 2934ns, read-modify-write count 0
processor 21 delta 1547ns, read-modify-write count 0
processor 22 delta 1361ns, read-modify-write count 0
processor 23 delta 3200ns, read-modify-write count 0
+=== single writer seqlock test case ===
+processor 0 count 2451021
+processor 1 count 1
+processor 2 count 8
+processor 3 count 31
+processor 4 count 52
+processor 5 count 23
+processor 6 count 23
+processor 7 count 49
+processor 8 count 703
+processor 9 count 750
+processor 10 count 684
+processor 11 count 770
+processor 12 count 710
+processor 13 count 691
+processor 14 count 687
+processor 15 count 695
+processor 16 count 774
+processor 17 count 828
+processor 18 count 732
+processor 19 count 719
+processor 20 count 728
+processor 21 count 761
+processor 22 count 685
+processor 23 count 764
+=== multi writer seqlock test case ===
+processor 0 count 124410
+processor 1 count 7865
+processor 2 count 123950
+processor 3 count 7797
+processor 4 count 124253
+processor 5 count 7773
+processor 6 count 124763
+processor 7 count 7817
+processor 8 count 124593
+processor 9 count 7781
+processor 10 count 124647
+processor 11 count 7753
+processor 12 count 124322
+processor 13 count 7692
+processor 14 count 124906
+processor 15 count 7715
+processor 16 count 124568
+processor 17 count 7605
+processor 18 count 125060
+processor 19 count 7908
+processor 20 count 124499
+processor 21 count 7804
+processor 22 count 124538
+processor 23 count 7874
*** END OF TEST SMPATOMIC 1 ***
More information about the vc
mailing list