[PATCH 2/8] tmcontext01: Improve timing measuremnts for sparc
Sebastian Huber
sebastian.huber at embedded-brains.de
Tue Jan 9 09:36:51 UTC 2024
Use a SPARC-specific method to flush the register windows. This
improves the timing measurements.
---
testsuites/tmtests/tmcontext01/init.c | 28 ++++++++++++---------------
1 file changed, 12 insertions(+), 16 deletions(-)
diff --git a/testsuites/tmtests/tmcontext01/init.c b/testsuites/tmtests/tmcontext01/init.c
index 5aea18e5f1..7f4ababfe5 100644
--- a/testsuites/tmtests/tmcontext01/init.c
+++ b/testsuites/tmtests/tmcontext01/init.c
@@ -73,25 +73,21 @@ static int dirty_data_cache(volatile int *data, size_t n, size_t clsz, int j)
static __attribute__((__noipa__)) void call_at_level(
int start,
int fl,
- int s,
- bool dirty
+ int s
)
{
+#if defined(__sparc__)
if (fl == start) {
- /*
- * Some architectures like the SPARC have register windows. A side-effect
- * of this context switch is that we start with a fresh window set. On
- * architectures like ARM or PowerPC this context switch has no effect.
- */
- _Context_Switch(&ctx, &ctx);
+ /* Flush register windows */
+ __asm__ volatile ("ta 3" : : : "memory");
}
+#endif
if (fl > 0) {
call_at_level(
start,
fl - 1,
- s,
- dirty
+ s
);
__asm__ volatile ("" : : : "memory");
} else {
@@ -99,11 +95,6 @@ static __attribute__((__noipa__)) void call_at_level(
rtems_counter_ticks a;
rtems_counter_ticks b;
- if (dirty) {
- dirty_data_cache(main_data, data_size, cache_line_size, fl);
- rtems_cache_invalidate_entire_instruction();
- }
-
a = rtems_counter_read();
/* Ensure that we use an untouched stack area */
@@ -157,7 +148,12 @@ static __attribute__((__noipa__)) void test_by_function_level(int fl, bool dirty
rtems_interrupt_lock_acquire(&lock, &lock_context);
for (s = 0; s < SAMPLES; ++s) {
- call_at_level(fl, fl, s, dirty);
+ if (dirty) {
+ dirty_data_cache(main_data, data_size, cache_line_size, fl);
+ rtems_cache_invalidate_entire_instruction();
+ }
+
+ call_at_level(fl, fl, s);
}
rtems_interrupt_lock_release(&lock, &lock_context);
--
2.35.3
More information about the devel
mailing list