[PATCH 5/6] score: Rework SMP multicast action
Sebastian Huber
sebastian.huber at embedded-brains.de
Thu Apr 11 13:45:39 UTC 2019
---
cpukit/include/rtems/score/percpu.h | 32 +++++
cpukit/include/rtems/score/smpimpl.h | 13 +-
cpukit/score/src/smpmulticastaction.c | 203 +++++++++++++++++++-----------
testsuites/smptests/smpcache01/init.c | 5 +-
testsuites/smptests/smpmulticast01/init.c | 3 +
5 files changed, 170 insertions(+), 86 deletions(-)
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index d7232c632f..a0eb46c045 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -74,6 +74,8 @@ struct _Thread_Control;
struct Scheduler_Context;
+struct Per_CPU_Job;
+
/**
* @defgroup PerCPU RTEMS Per CPU Information
*
@@ -494,6 +496,29 @@ typedef struct Per_CPU_Control {
*/
Atomic_Uintptr before_multitasking_action;
+ /**
+ * @brief FIFO list of jobs to be performed by this processor.
+ *
+ * The members are protected by the Per_CPU_Control::Lock lock.
+ *
+ * @see _SMP_Multicast_action().
+ */
+ struct {
+ /**
+ * @brief Head of the FIFO list of jobs to be performed by this
+ * processor.
+ */
+ struct Per_CPU_Job *head;
+
+ /**
+ * @brief Tail of the FIFO list of jobs to be performed by this
+ * processor.
+ *
+ * This member is only valid if the head is not @c NULL.
+ */
+ struct Per_CPU_Job **tail;
+ } Jobs;
+
/**
* @brief Indicates if the processor has been successfully started via
* _CPU_SMP_Start_processor().
@@ -710,6 +735,13 @@ bool _Per_CPU_State_wait_for_non_initial_state(
uint32_t timeout_in_ns
);
+/**
+ * @brief Performs the jobs of the specified processor.
+ *
+ * @param[in, out] cpu The jobs of this processor will be performed.
+ */
+void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
+
#endif /* defined( RTEMS_SMP ) */
/*
diff --git a/cpukit/include/rtems/score/smpimpl.h b/cpukit/include/rtems/score/smpimpl.h
index 6b59b9497d..a501339176 100644
--- a/cpukit/include/rtems/score/smpimpl.h
+++ b/cpukit/include/rtems/score/smpimpl.h
@@ -51,11 +51,11 @@ extern "C" {
#define SMP_MESSAGE_TEST 0x2UL
/**
- * @brief SMP message to request a multicast action.
+ * @brief SMP message to perform per-CPU jobs.
*
* @see _SMP_Send_message().
*/
-#define SMP_MESSAGE_MULTICAST_ACTION 0x4UL
+#define SMP_MESSAGE_PERFORM_JOBS 0x4UL
/**
* @brief SMP message to request a clock tick.
@@ -157,11 +157,6 @@ static inline void _SMP_Set_test_message_handler(
_SMP_Test_message_handler = handler;
}
-/**
- * @brief Processes all pending multicast actions.
- */
-void _SMP_Multicast_actions_process( void );
-
/**
* @brief Interrupt handler for inter-processor interrupts.
*
@@ -195,8 +190,8 @@ static inline long unsigned _SMP_Inter_processor_interrupt_handler(
( *_SMP_Test_message_handler )( cpu_self );
}
- if ( ( message & SMP_MESSAGE_MULTICAST_ACTION ) != 0 ) {
- _SMP_Multicast_actions_process();
+ if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
+ _Per_CPU_Perform_jobs( cpu_self );
}
}
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index 0b9641c3db..2288dbe939 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -1,91 +1,151 @@
/*
- * Copyright (c) 2014 Aeroflex Gaisler AB. All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * Copyright (C) 2019 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
- #include "config.h"
+#include "config.h"
#endif
#include <rtems/score/smpimpl.h>
-#include <rtems/score/isrlock.h>
-#include <rtems/score/chainimpl.h>
+#include <rtems/score/threaddispatch.h>
#include <rtems/score/sysstate.h>
-typedef struct {
- Chain_Node Node;
+typedef struct Per_CPU_Job {
+ struct Per_CPU_Job *next;
SMP_Action_handler handler;
void *arg;
- Processor_mask targets;
- Atomic_Ulong done;
-} SMP_Multicast_action;
+ Atomic_Uint done;
+} Per_CPU_Job;
+
+void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Job *job;
+
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
-typedef struct {
- ISR_lock_Control Lock;
- Chain_Control Actions;
-} SMP_Multicast_context;
+ while ( ( job = cpu->Jobs.head ) != NULL ) {
+ cpu->Jobs.head = job->next;
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
-static SMP_Multicast_context _SMP_Multicast = {
- .Lock = ISR_LOCK_INITIALIZER( "SMP Multicast Action" ),
- .Actions = CHAIN_INITIALIZER_EMPTY( _SMP_Multicast.Actions )
-};
+ ( *job->handler )( job->arg );
+ _Atomic_Store_uint( &job->done, 1, ATOMIC_ORDER_RELEASE );
+
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
+ }
+
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+}
-void _SMP_Multicast_actions_process( void )
+static void _Per_CPU_Try_perform_jobs( Per_CPU_Control *cpu_self )
{
- ISR_lock_Context lock_context;
- uint32_t cpu_self_index;
- SMP_Multicast_action *node;
- SMP_Multicast_action *next;
+ unsigned long message;
- _ISR_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
- cpu_self_index = _SMP_Get_current_processor();
- node = (SMP_Multicast_action *) _Chain_First( &_SMP_Multicast.Actions );
+ message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
- while ( !_Chain_Is_tail( &_SMP_Multicast.Actions, &node->Node ) ) {
- next = (SMP_Multicast_action *) _Chain_Next( &node->Node );
+ if ( ( message & SMP_MESSAGE_PERFORM_JOBS ) != 0 ) {
+ bool success;
- if ( _Processor_mask_Is_set( &node->targets, cpu_self_index ) ) {
- _Processor_mask_Clear( &node->targets, cpu_self_index );
+ success = _Atomic_Compare_exchange_ulong(
+ &cpu_self->message, &message,
+ message & ~SMP_MESSAGE_PERFORM_JOBS, ATOMIC_ORDER_RELAXED,
+ ATOMIC_ORDER_RELAXED
+ );
- ( *node->handler )( node->arg );
+ if ( success ) {
+ _Per_CPU_Perform_jobs( cpu_self );
+ }
+ }
+}
- if ( _Processor_mask_Is_zero( &node->targets ) ) {
- _Chain_Extract_unprotected( &node->Node );
- _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
+static void _SMP_Issue_action_jobs(
+ const Processor_mask *targets,
+ SMP_Action_handler handler,
+ void *arg,
+ Per_CPU_Job *jobs,
+ uint32_t cpu_max
+)
+{
+ uint32_t cpu_index;
+
+ for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
+ if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
+ ISR_lock_Context lock_context;
+ Per_CPU_Job *job;
+ Per_CPU_Control *cpu;
+
+ job = &jobs[ cpu_index ];
+ job->next = NULL;
+ job->handler = handler;
+ job->arg = arg;
+ _Atomic_Store_uint( &job->done, 0, ATOMIC_ORDER_RELAXED );
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ _ISR_lock_ISR_disable( &lock_context );
+ _Per_CPU_Acquire( cpu, &lock_context );
+
+ if ( cpu->Jobs.head == NULL ) {
+ cpu->Jobs.head = job;
+ } else {
+ *cpu->Jobs.tail = job;
}
- }
- node = next;
- }
+ cpu->Jobs.tail = &job->next;
- _ISR_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
+ _Per_CPU_Release( cpu, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+ _SMP_Send_message( cpu_index, SMP_MESSAGE_PERFORM_JOBS );
+ }
+ }
}
-static void
-_SMP_Multicasts_try_process( void )
+static void _SMP_Wait_for_action_jobs(
+ const Processor_mask *targets,
+ const Per_CPU_Job *jobs,
+ uint32_t cpu_max,
+ Per_CPU_Control *cpu_self
+)
{
- unsigned long message;
- Per_CPU_Control *cpu_self;
- ISR_Level isr_level;
+ uint32_t cpu_index;
- _ISR_Local_disable( isr_level );
+ for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
+ if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
+ const Per_CPU_Job *job;
- cpu_self = _Per_CPU_Get();
+ job = &jobs[ cpu_index ];
- message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
-
- if ( message & SMP_MESSAGE_MULTICAST_ACTION ) {
- if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message,
- message & ~SMP_MESSAGE_MULTICAST_ACTION, ATOMIC_ORDER_RELAXED,
- ATOMIC_ORDER_RELAXED ) ) {
- _SMP_Multicast_actions_process();
+ while ( _Atomic_Load_uint( &job->done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
+ _Per_CPU_Try_perform_jobs( cpu_self );
+ }
}
}
-
- _ISR_Local_enable( isr_level );
}
void _SMP_Multicast_action(
@@ -94,33 +154,24 @@ void _SMP_Multicast_action(
void *arg
)
{
- SMP_Multicast_action node;
- ISR_lock_Context lock_context;
- uint32_t i;
+ Per_CPU_Job jobs[ CPU_MAXIMUM_PROCESSORS ];
+ uint32_t cpu_max;
+ Per_CPU_Control *cpu_self;
+
+ cpu_max = _SMP_Get_processor_maximum();
+ _Assert( cpu_max <= CPU_MAXIMUM_PROCESSORS );
if ( ! _System_state_Is_up( _System_state_Get() ) ) {
( *handler )( arg );
return;
}
- if( targets == NULL ) {
+ if ( targets == NULL ) {
targets = _SMP_Get_online_processors();
}
- _Chain_Initialize_node( &node.Node );
- node.handler = handler;
- node.arg = arg;
- _Processor_mask_Assign( &node.targets, targets );
- _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );
-
- _ISR_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
- _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
- _ISR_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );
-
- _SMP_Send_message_multicast( targets, SMP_MESSAGE_MULTICAST_ACTION );
- _SMP_Multicasts_try_process();
-
- while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
- /* Wait */
- };
+ cpu_self = _Thread_Dispatch_disable();
+ _SMP_Issue_action_jobs( targets, handler, arg, jobs, cpu_max );
+ _SMP_Wait_for_action_jobs( targets, jobs, cpu_max, cpu_self );
+ _Thread_Dispatch_enable( cpu_self );
}
diff --git a/testsuites/smptests/smpcache01/init.c b/testsuites/smptests/smpcache01/init.c
index 878a015bf1..e9cee1eec5 100644
--- a/testsuites/smptests/smpcache01/init.c
+++ b/testsuites/smptests/smpcache01/init.c
@@ -126,12 +126,15 @@ static void call_tests_isr_disabled( SMP_barrier_State *bs )
broadcast_test_init();
for (i = 0; i < RTEMS_ARRAY_SIZE( test_cases ); ++i) {
- ISR_Level isr_level;
+ Per_CPU_Control *cpu_self;
+ ISR_Level isr_level;
+ cpu_self = _Thread_Dispatch_disable();
_ISR_Local_disable( isr_level );
barrier( bs );
( *test_cases[ i ] )();
_ISR_Local_enable( isr_level );
+ _Thread_Dispatch_enable( cpu_self );
barrier( bs );
}
diff --git a/testsuites/smptests/smpmulticast01/init.c b/testsuites/smptests/smpmulticast01/init.c
index 2319582ab6..e599a78bde 100644
--- a/testsuites/smptests/smpmulticast01/init.c
+++ b/testsuites/smptests/smpmulticast01/init.c
@@ -59,11 +59,14 @@ static void multicast_action_irq_disabled(
void *arg
)
{
+ Per_CPU_Control *cpu_self;
rtems_interrupt_level level;
+ cpu_self = _Thread_Dispatch_disable();
rtems_interrupt_local_disable(level);
_SMP_Multicast_action(targets, handler, arg);
rtems_interrupt_local_enable(level);
+ _Thread_Dispatch_enable(cpu_self);
}
static void multicast_action_dispatch_disabled(
--
2.16.4
More information about the devel
mailing list