[PATCH 2/2] score: Replace the single use of a sequence lock
Gedare Bloom
gedare at rtems.org
Tue Aug 10 14:47:59 UTC 2021
ok
On Tue, Aug 10, 2021 at 8:34 AM Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
>
> In SMP configurations, on 64-bit architectures use plain atomic
> operations to set/get the priority value of a scheduler node. On 32-bit
> architectures use an ISR lock. Using a sequence lock has no real
> benefit since it uses atomic read-modify-write operations for both the
> read and the write lock. Simply use a ticket lock instead so that only
> one SMP synchronization primitive is used for everything.
> ---
> cpukit/include/rtems/score/schedulernode.h | 14 ++--
> .../include/rtems/score/schedulernodeimpl.h | 64 +++++++++++++------
> .../score/src/schedulerdefaultnodedestroy.c | 4 +-
> 3 files changed, 57 insertions(+), 25 deletions(-)
>
> diff --git a/cpukit/include/rtems/score/schedulernode.h b/cpukit/include/rtems/score/schedulernode.h
> index 1dba200dca..e344479718 100644
> --- a/cpukit/include/rtems/score/schedulernode.h
> +++ b/cpukit/include/rtems/score/schedulernode.h
> @@ -28,7 +28,7 @@
> #include <rtems/score/basedefs.h>
> #include <rtems/score/chain.h>
> #include <rtems/score/priority.h>
> -#include <rtems/score/smplockseq.h>
> +#include <rtems/score/isrlock.h>
>
> /**
> * @addtogroup RTEMSScoreScheduler
> @@ -197,14 +197,20 @@ struct Scheduler_Node {
> * least-significant bit which indicates if the thread should be appended
> * (bit set) or prepended (bit cleared) to its priority group, see
> * SCHEDULER_PRIORITY_APPEND().
> + *
> + * @see _Scheduler_Node_get_priority() and _Scheduler_Node_set_priority().
> */
> +#if defined(RTEMS_SMP) && CPU_SIZEOF_POINTER == 8
> + Atomic_Ulong value;
> +#else
> Priority_Control value;
> +#endif
>
> -#if defined(RTEMS_SMP)
> +#if defined(RTEMS_SMP) && CPU_SIZEOF_POINTER != 8
> /**
> - * @brief Sequence lock to synchronize priority value updates.
> + * @brief The lock protects the priority value.
> */
> - SMP_sequence_lock_Control Lock;
> + ISR_lock_Control Lock;
> #endif
> } Priority;
> };
> diff --git a/cpukit/include/rtems/score/schedulernodeimpl.h b/cpukit/include/rtems/score/schedulernodeimpl.h
> index 3da29bb37e..3f90d4a6f5 100644
> --- a/cpukit/include/rtems/score/schedulernodeimpl.h
> +++ b/cpukit/include/rtems/score/schedulernodeimpl.h
> @@ -100,13 +100,36 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
> node->Wait.Priority.scheduler = scheduler;
> node->user = the_thread;
> node->idle = NULL;
> - _SMP_sequence_lock_Initialize( &node->Priority.Lock );
> +#if CPU_SIZEOF_POINTER != 8
> + _ISR_lock_Initialize( &node->Priority.Lock, "Scheduler Node Priority" );
> +#endif
> #else
> (void) scheduler;
> (void) the_thread;
> #endif
> }
>
> +/**
> + * @brief Destroys a node.
> + *
> + * @param scheduler is the scheduler of the node.
> + *
> + * @param[in, out] node is the node to destroy.
> + */
> +RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_destroy(
> + const struct _Scheduler_Control *scheduler,
> + Scheduler_Node *node
> +)
> +{
> + (void) scheduler;
> +
> +#if defined(RTEMS_SMP) && CPU_SIZEOF_POINTER != 8
> + _ISR_lock_Destroy( &node->Priority.Lock );
> +#else
> + (void) node;
> +#endif
> +}
> +
> /**
> * @brief Gets the scheduler of the node.
> *
> @@ -148,17 +171,18 @@ RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(
> {
> Priority_Control priority;
>
> -#if defined(RTEMS_SMP)
> - unsigned int seq;
> -
> - do {
> - seq = _SMP_sequence_lock_Read_begin( &node->Priority.Lock );
> -#endif
> -
> - priority = node->Priority.value;
> +#if defined(RTEMS_SMP) && CPU_SIZEOF_POINTER == 8
> + priority = _Atomic_Fetch_add_ulong(
> + &node->Priority.value,
> + 0,
> + ATOMIC_ORDER_RELAXED
> + );
> +#else
> + ISR_lock_Context lock_context;
>
> -#if defined(RTEMS_SMP)
> - } while ( _SMP_sequence_lock_Read_retry( &node->Priority.Lock, seq ) );
> + _ISR_lock_Acquire( &node->Priority.Lock, &lock_context );
> + priority = node->Priority.value;
> + _ISR_lock_Release( &node->Priority.Lock, &lock_context );
> #endif
>
> return priority;
> @@ -180,16 +204,18 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(
> Priority_Flags flags
> )
> {
> -#if defined(RTEMS_SMP)
> - unsigned int seq;
> -
> - seq = _SMP_sequence_lock_Write_begin( &node->Priority.Lock );
> -#endif
> +#if defined(RTEMS_SMP) && CPU_SIZEOF_POINTER == 8
> + _Atomic_Store_ulong(
> + &node->Priority.value,
> + new_priority | (Priority_Control) flags,
> + ATOMIC_ORDER_RELAXED
> + );
> +#else
> + ISR_lock_Context lock_context;
>
> + _ISR_lock_Acquire( &node->Priority.Lock, &lock_context );
> node->Priority.value = new_priority | ( (Priority_Control) flags );
> -
> -#if defined(RTEMS_SMP)
> - _SMP_sequence_lock_Write_end( &node->Priority.Lock, seq );
> + _ISR_lock_Release( &node->Priority.Lock, &lock_context );
> #endif
> }
>
> diff --git a/cpukit/score/src/schedulerdefaultnodedestroy.c b/cpukit/score/src/schedulerdefaultnodedestroy.c
> index 796896d854..33cdfd4c69 100644
> --- a/cpukit/score/src/schedulerdefaultnodedestroy.c
> +++ b/cpukit/score/src/schedulerdefaultnodedestroy.c
> @@ -21,12 +21,12 @@
> #endif
>
> #include <rtems/score/scheduler.h>
> +#include <rtems/score/schedulernodeimpl.h>
>
> void _Scheduler_default_Node_destroy(
> const Scheduler_Control *scheduler,
> Scheduler_Node *node
> )
> {
> - (void) scheduler;
> - (void) node;
> + _Scheduler_Node_do_destroy( scheduler, node );
> }
> --
> 2.26.2
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel
More information about the devel
mailing list