[PATCH 6/6] score: Improve _SMP_Multicast_action()
Sebastian Huber
sebastian.huber at embedded-brains.de
Thu Apr 11 13:45:40 UTC 2019
Let it work during system initialization.
---
cpukit/include/rtems/score/smpimpl.h | 3 +-
cpukit/score/src/percpu.c | 1 +
cpukit/score/src/smpmulticastaction.c | 20 +++++++----
testsuites/smptests/smpmulticast01/init.c | 59 ++++++++++---------------------
4 files changed, 34 insertions(+), 49 deletions(-)
diff --git a/cpukit/include/rtems/score/smpimpl.h b/cpukit/include/rtems/score/smpimpl.h
index a501339176..c0a3ccb610 100644
--- a/cpukit/include/rtems/score/smpimpl.h
+++ b/cpukit/include/rtems/score/smpimpl.h
@@ -78,7 +78,8 @@ typedef enum {
SMP_FATAL_SHUTDOWN,
SMP_FATAL_SHUTDOWN_RESPONSE,
SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED,
- SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED
+ SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED,
+ SMP_FATAL_INVALID_CPU_STATE_TO_PERFORM_JOBS
} SMP_Fatal_code;
static inline void _SMP_Fatal( SMP_Fatal_code code )
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 0e4c0678e7..e4c3b881e5 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -99,6 +99,7 @@ static void _Per_CPU_State_busy_wait(
&& state != PER_CPU_STATE_SHUTDOWN
) {
_Per_CPU_State_before_multitasking_action( cpu );
+ _Per_CPU_Perform_jobs( cpu );
_CPU_SMP_Processor_event_receive();
state = cpu->state;
}
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index 2288dbe939..69bc87d7e0 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -31,7 +31,6 @@
#include <rtems/score/smpimpl.h>
#include <rtems/score/threaddispatch.h>
-#include <rtems/score/sysstate.h>
typedef struct Per_CPU_Job {
struct Per_CPU_Job *next;
@@ -138,11 +137,23 @@ static void _SMP_Wait_for_action_jobs(
for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
const Per_CPU_Job *job;
+ Per_CPU_Control *cpu;
job = &jobs[ cpu_index ];
+ cpu = _Per_CPU_Get_by_index( cpu_index );
while ( _Atomic_Load_uint( &job->done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
- _Per_CPU_Try_perform_jobs( cpu_self );
+ switch ( cpu->state ) {
+ case PER_CPU_STATE_INITIAL:
+ case PER_CPU_STATE_READY_TO_START_MULTITASKING:
+ case PER_CPU_STATE_REQUEST_START_MULTITASKING:
+ case PER_CPU_STATE_UP:
+ _Per_CPU_Try_perform_jobs( cpu_self );
+ break;
+ default:
+ _SMP_Fatal( SMP_FATAL_INVALID_CPU_STATE_TO_PERFORM_JOBS );
+ break;
+ }
}
}
}
@@ -161,11 +172,6 @@ void _SMP_Multicast_action(
cpu_max = _SMP_Get_processor_maximum();
_Assert( cpu_max <= CPU_MAXIMUM_PROCESSORS );
- if ( ! _System_state_Is_up( _System_state_Get() ) ) {
- ( *handler )( arg );
- return;
- }
-
if ( targets == NULL ) {
targets = _SMP_Get_online_processors();
}
diff --git a/testsuites/smptests/smpmulticast01/init.c b/testsuites/smptests/smpmulticast01/init.c
index e599a78bde..c421767b1c 100644
--- a/testsuites/smptests/smpmulticast01/init.c
+++ b/testsuites/smptests/smpmulticast01/init.c
@@ -104,18 +104,15 @@ static void action(void *arg)
static void test_unicast(
test_context *ctx,
- void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *),
- bool before_multitasking
+ void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *)
)
{
uint32_t step;
uint32_t i;
uint32_t n;
- uint32_t self;
T_plan(1);
step = 0;
- self = rtems_scheduler_get_processor();
n = rtems_scheduler_get_processor_maximum();
for (i = 0; i < n; ++i) {
@@ -134,18 +131,10 @@ static void test_unicast(
++step;
id = _Atomic_Load_uint(&ctx->id[j], ATOMIC_ORDER_RELAXED);
- if (before_multitasking) {
- if (j == self) {
- T_quiet_eq_uint(j + 1, id);
- } else {
- T_quiet_eq_uint(0, id);
- }
+ if (j == i) {
+ T_quiet_eq_uint(j + 1, id);
} else {
- if (j == i) {
- T_quiet_eq_uint(j + 1, id);
- } else {
- T_quiet_eq_uint(0, id);
- }
+ T_quiet_eq_uint(0, id);
}
}
}
@@ -155,18 +144,15 @@ static void test_unicast(
static void test_broadcast(
test_context *ctx,
- void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *),
- bool before_multitasking
+ void (*multicast_action)(const Processor_mask *, SMP_Action_handler, void *)
)
{
uint32_t step;
uint32_t i;
uint32_t n;
- uint32_t self;
T_plan(1);
step = 0;
- self = rtems_scheduler_get_processor();
n = rtems_scheduler_get_processor_maximum();
for (i = 0; i < n; ++i) {
@@ -181,16 +167,7 @@ static void test_broadcast(
++step;
id = _Atomic_Load_uint(&ctx->id[j], ATOMIC_ORDER_RELAXED);
-
- if (before_multitasking) {
- if (j == self) {
- T_quiet_eq_uint(j + 1, id);
- } else {
- T_quiet_eq_uint(0, id);
- }
- } else {
- T_quiet_eq_uint(j + 1, id);
- }
+ T_quiet_eq_uint(j + 1, id);
}
}
@@ -204,27 +181,27 @@ static void test_before_multitasking(void)
ctx = &test_instance;
T_case_begin("UnicastBeforeMultitasking", NULL);
- test_unicast(ctx, _SMP_Multicast_action, true);
+ test_unicast(ctx, _SMP_Multicast_action);
T_case_end();
T_case_begin("UnicastBeforeMultitaskingIRQDisabled", NULL);
- test_unicast(ctx, multicast_action_irq_disabled, true);
+ test_unicast(ctx, multicast_action_irq_disabled);
T_case_end();
T_case_begin("UnicastBeforeMultitaskingDispatchDisabled", NULL);
- test_unicast(ctx, multicast_action_dispatch_disabled, true);
+ test_unicast(ctx, multicast_action_dispatch_disabled);
T_case_end();
T_case_begin("BroadcastBeforeMultitasking", NULL);
- test_broadcast(ctx, _SMP_Multicast_action, true);
+ test_broadcast(ctx, _SMP_Multicast_action);
T_case_end();
T_case_begin("BroadcastBeforeMultitaskingIRQDisabled", NULL);
- test_broadcast(ctx, multicast_action_irq_disabled, true);
+ test_broadcast(ctx, multicast_action_irq_disabled);
T_case_end();
T_case_begin("BroadcastBeforeMultitaskingDispatchDisabled", NULL);
- test_broadcast(ctx, multicast_action_dispatch_disabled, true);
+ test_broadcast(ctx, multicast_action_dispatch_disabled);
T_case_end();
}
@@ -249,27 +226,27 @@ static void Init(rtems_task_argument arg)
ctx = &test_instance;
T_case_begin("UnicastDuringMultitasking", NULL);
- test_unicast(ctx, _SMP_Multicast_action, false);
+ test_unicast(ctx, _SMP_Multicast_action);
T_case_end();
T_case_begin("UnicastDuringMultitaskingIRQDisabled", NULL);
- test_unicast(ctx, multicast_action_irq_disabled, false);
+ test_unicast(ctx, multicast_action_irq_disabled);
T_case_end();
T_case_begin("UnicastDuringMultitaskingDispatchDisabled", NULL);
- test_unicast(ctx, multicast_action_dispatch_disabled, false);
+ test_unicast(ctx, multicast_action_dispatch_disabled);
T_case_end();
T_case_begin("BroadcastDuringMultitasking", NULL);
- test_broadcast(ctx, _SMP_Multicast_action, false);
+ test_broadcast(ctx, _SMP_Multicast_action);
T_case_end();
T_case_begin("BroadcastDuringMultitaskingIRQDisabled", NULL);
- test_broadcast(ctx, multicast_action_irq_disabled, false);
+ test_broadcast(ctx, multicast_action_irq_disabled);
T_case_end();
T_case_begin("BroadcastDuringMultitaskingDispatchDisabled", NULL);
- test_broadcast(ctx, multicast_action_dispatch_disabled, false);
+ test_broadcast(ctx, multicast_action_dispatch_disabled);
T_case_end();
ok = T_run_finalize();
--
2.16.4
More information about the devel
mailing list