[rtems commit] score: Remove processor event broadcast/receive

Sebastian Huber sebh at rtems.org
Wed Jul 28 19:04:50 UTC 2021


Module:    rtems
Branch:    master
Commit:    4adaed7328e39eac4fe1879cba61919e74965cc8
Changeset: http://git.rtems.org/rtems/commit/?id=4adaed7328e39eac4fe1879cba61919e74965cc8

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Jul 27 11:08:54 2021 +0200

score: Remove processor event broadcast/receive

Remove _CPU_SMP_Processor_event_broadcast() and
_CPU_SMP_Processor_event_receive().  These functions are hard to use since they
are subject to the lost wake up problem.

---

 cpukit/include/rtems/score/percpu.h                | 35 +++++++++++++++++++++-
 cpukit/score/cpu/aarch64/include/rtems/score/cpu.h | 12 --------
 cpukit/score/cpu/arm/include/rtems/score/cpu.h     | 12 --------
 cpukit/score/cpu/i386/include/rtems/score/cpu.h    | 10 -------
 cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h  | 29 ------------------
 cpukit/score/cpu/powerpc/include/rtems/score/cpu.h | 10 -------
 cpukit/score/cpu/riscv/include/rtems/score/cpu.h   | 10 -------
 cpukit/score/cpu/sparc/include/rtems/score/cpu.h   | 10 -------
 cpukit/score/cpu/x86_64/include/rtems/score/cpu.h  | 10 -------
 cpukit/score/src/percpu.c                          | 20 ++++++-------
 cpukit/score/src/percpustatewait.c                 | 14 ++++-----
 cpukit/score/src/smpmulticastaction.c              |  4 +--
 testsuites/smptests/smpfatal01/init.c              |  6 ++--
 testsuites/smptests/smpfatal02/init.c              |  4 +--
 testsuites/smptests/smpmulticast01/init.c          |  2 +-
 testsuites/smptests/smppsxsignal01/init.c          |  5 ++--
 testsuites/smptests/smpsignal01/init.c             |  5 ++--
 17 files changed, 61 insertions(+), 137 deletions(-)

diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 3242383..e79596c 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -553,7 +553,7 @@ typedef struct Per_CPU_Control {
      *
      * @see _Per_CPU_State_change().
      */
-    Per_CPU_State state;
+    Atomic_Uint state;
 
     /**
      * @brief FIFO list of jobs to be performed by this processor.
@@ -775,6 +775,39 @@ RTEMS_INLINE_ROUTINE void _Per_CPU_Release_all(
 
 #if defined( RTEMS_SMP )
 
+/**
+ * @brief Gets the current processor state.
+ *
+ * @param cpu is the processor control.
+ *
+ * @return Returns the current state of the processor.
+ */
+static inline Per_CPU_State _Per_CPU_Get_state( const Per_CPU_Control *cpu )
+{
+  return (Per_CPU_State)
+    _Atomic_Load_uint( &cpu->state, ATOMIC_ORDER_ACQUIRE );
+}
+
+/**
+ * @brief Sets the processor state of the current processor.
+ *
+ * @param cpu_self is the processor control of the processor executing this
+ *   function.
+ *
+ * @param state is the new processor state.
+ */
+static inline void _Per_CPU_Set_state(
+  Per_CPU_Control *cpu_self,
+  Per_CPU_State    state
+)
+{
+  _Atomic_Store_uint(
+    &cpu_self->state,
+    (unsigned int) state,
+    ATOMIC_ORDER_RELEASE
+  );
+}
+
 void _Per_CPU_State_change(
   Per_CPU_Control *cpu,
   Per_CPU_State new_state
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index 316079a..595f6c7 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -360,18 +360,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
   {
     __asm__ volatile ( "wfe" : : : "memory" );
   }
-
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    _AARCH64_Data_synchronization_barrier();
-    _AARCH64_Send_event();
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    _AARCH64_Wait_for_event();
-    _AARCH64_Data_memory_barrier();
-  }
 #endif
 
 
diff --git a/cpukit/score/cpu/arm/include/rtems/score/cpu.h b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
index dcda4d5..b554aa2 100644
--- a/cpukit/score/cpu/arm/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/arm/include/rtems/score/cpu.h
@@ -510,18 +510,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
   {
     __asm__ volatile ( "wfe" : : : "memory" );
   }
-
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    _ARM_Data_synchronization_barrier();
-    _ARM_Send_event();
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    _ARM_Wait_for_event();
-    _ARM_Data_memory_barrier();
-  }
 #endif
 
 
diff --git a/cpukit/score/cpu/i386/include/rtems/score/cpu.h b/cpukit/score/cpu/i386/include/rtems/score/cpu.h
index 0cb4590..813ed52 100644
--- a/cpukit/score/cpu/i386/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/i386/include/rtems/score/cpu.h
@@ -470,16 +470,6 @@ void _CPU_Context_Initialize(
   uint32_t _CPU_SMP_Get_current_processor( void );
 
   void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
-
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
 #endif
 
 #define _CPU_Context_Initialize_fp( _fp_area ) \
diff --git a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
index 120b51b..c4cb512 100644
--- a/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/no_cpu/include/rtems/score/cpu.h
@@ -1348,35 +1348,6 @@ static inline CPU_Counter_ticks _CPU_Counter_difference(
   void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
 
   /**
-   * @brief Broadcasts a processor event.
-   *
-   * Some architectures provide a low-level synchronization primitive for
-   * processors in a multi-processor environment.  Processors waiting for this
-   * event may go into a low-power state and stop generating system bus
-   * transactions.  This function must ensure that preceding store operations
-   * can be observed by other processors.
-   *
-   * @see _CPU_SMP_Processor_event_receive().
-   */
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  /**
-   * @brief Receives a processor event.
-   *
-   * This function will wait for the processor event and may wait forever if no
-   * such event arrives.
-   *
-   * @see _CPU_SMP_Processor_event_broadcast().
-   */
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  /**
    * @brief Gets the is executing indicator of the thread context.
    *
    * @param[in] context The context.
diff --git a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
index f22e1cd..ee02bd4 100644
--- a/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/powerpc/include/rtems/score/cpu.h
@@ -973,16 +973,6 @@ void _CPU_Context_restore_fp(
   }
 
   void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
-
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
 #endif
 
 typedef struct {
diff --git a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
index d9056d0..16dde82 100644
--- a/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/riscv/include/rtems/score/cpu.h
@@ -474,16 +474,6 @@ static inline uint32_t _CPU_SMP_Get_current_processor( void )
 
 void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
 
-static inline void _CPU_SMP_Processor_event_broadcast( void )
-{
-  __asm__ volatile ( "" : : : "memory" );
-}
-
-static inline void _CPU_SMP_Processor_event_receive( void )
-{
-  __asm__ volatile ( "" : : : "memory" );
-}
-
 static inline bool _CPU_Context_Get_is_executing(
   const Context_Control *context
 )
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
index 0abc929..910ec83 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
@@ -1004,16 +1004,6 @@ RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
   #endif
 
   void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
-
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
 #endif
 
 #if defined(SPARC_USE_LAZY_FP_SWITCH)
diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
index 1e97250..056c022 100644
--- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
@@ -338,16 +338,6 @@ static inline CPU_Counter_ticks _CPU_Counter_difference(
 
   void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
 
-  static inline void _CPU_SMP_Processor_event_broadcast( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
-  static inline void _CPU_SMP_Processor_event_receive( void )
-  {
-    __asm__ volatile ( "" : : : "memory" );
-  }
-
   static inline bool _CPU_Context_Get_is_executing(
     const Context_Control *context
   )
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index e254f30..7fbc1c8 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -55,7 +55,9 @@ static void _Per_CPU_State_busy_wait(
   Per_CPU_State new_state
 )
 {
-  Per_CPU_State state = cpu->state;
+  Per_CPU_State state;
+
+  state = _Per_CPU_Get_state( cpu );
 
   switch ( new_state ) {
     case PER_CPU_STATE_REQUEST_START_MULTITASKING:
@@ -64,8 +66,7 @@ static void _Per_CPU_State_busy_wait(
           && state != PER_CPU_STATE_SHUTDOWN
       ) {
         _Per_CPU_Perform_jobs( cpu );
-        _CPU_SMP_Processor_event_receive();
-        state = cpu->state;
+        state = _Per_CPU_Get_state( cpu );
       }
       break;
     case PER_CPU_STATE_UP:
@@ -74,8 +75,7 @@ static void _Per_CPU_State_busy_wait(
           && state != PER_CPU_STATE_SHUTDOWN
       ) {
         _Per_CPU_Perform_jobs( cpu );
-        _CPU_SMP_Processor_event_receive();
-        state = cpu->state;
+        state = _Per_CPU_Get_state( cpu );
       }
       break;
     default:
@@ -143,8 +143,8 @@ void _Per_CPU_State_change(
 
   _Per_CPU_State_acquire( &lock_context );
 
-  next_state = _Per_CPU_State_get_next( cpu->state, new_state );
-  cpu->state = next_state;
+  next_state = _Per_CPU_State_get_next( _Per_CPU_Get_state( cpu ), new_state );
+  _Per_CPU_Set_state( cpu, next_state );
 
   if ( next_state == PER_CPU_STATE_SHUTDOWN ) {
     uint32_t cpu_max = rtems_configuration_get_maximum_processors();
@@ -154,7 +154,7 @@ void _Per_CPU_State_change(
       Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index( cpu_index );
 
       if ( cpu_other != cpu ) {
-        switch ( cpu_other->state ) {
+        switch ( _Per_CPU_Get_state( cpu_other ) ) {
           case PER_CPU_STATE_UP:
             _SMP_Send_message( cpu_index, SMP_MESSAGE_SHUTDOWN );
             break;
@@ -163,13 +163,11 @@ void _Per_CPU_State_change(
             break;
         }
 
-        cpu_other->state = PER_CPU_STATE_SHUTDOWN;
+        _Per_CPU_Set_state( cpu_other, PER_CPU_STATE_SHUTDOWN );
       }
     }
   }
 
-  _CPU_SMP_Processor_event_broadcast();
-
   _Per_CPU_State_release( &lock_context );
 
   if (
diff --git a/cpukit/score/src/percpustatewait.c b/cpukit/score/src/percpustatewait.c
index 9d9f590..7186626 100644
--- a/cpukit/score/src/percpustatewait.c
+++ b/cpukit/score/src/percpustatewait.c
@@ -33,8 +33,11 @@ bool _Per_CPU_State_wait_for_non_initial_state(
   uint32_t timeout_in_ns
 )
 {
-  const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
-  Per_CPU_State state = cpu->state;
+  const Per_CPU_Control *cpu;
+  Per_CPU_State          state;
+
+  cpu = _Per_CPU_Get_by_index( cpu_index );
+  state = _Per_CPU_Get_state( cpu );
 
   if ( timeout_in_ns > 0 ) {
     rtems_counter_ticks ticks =
@@ -45,19 +48,16 @@ bool _Per_CPU_State_wait_for_non_initial_state(
     while ( ticks > delta && state == PER_CPU_STATE_INITIAL ) {
       rtems_counter_ticks b;
 
-      _CPU_SMP_Processor_event_receive();
-      state = cpu->state;
+      state = _Per_CPU_Get_state( cpu );
 
       ticks -= delta;
-
       b = rtems_counter_read();
       delta = rtems_counter_difference( b, a );
       a = b;
     }
   } else {
     while ( state == PER_CPU_STATE_INITIAL ) {
-      _CPU_SMP_Processor_event_receive();
-      state = cpu->state;
+      state = _Per_CPU_Get_state( cpu );
     }
   }
 
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index 55c495a..5d65ef1 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -122,12 +122,10 @@ void _Per_CPU_Wait_for_job(
     _Atomic_Load_ulong( &job->done, ATOMIC_ORDER_ACQUIRE )
       != PER_CPU_JOB_DONE
   ) {
-    switch ( cpu->state ) {
+    switch ( _Per_CPU_Get_state( cpu ) ) {
       case PER_CPU_STATE_INITIAL:
       case PER_CPU_STATE_READY_TO_START_MULTITASKING:
       case PER_CPU_STATE_REQUEST_START_MULTITASKING:
-        _CPU_SMP_Processor_event_broadcast();
-        /* Fall through */
       case PER_CPU_STATE_UP:
         /*
          * Calling this function with the current processor is intentional.
diff --git a/testsuites/smptests/smpfatal01/init.c b/testsuites/smptests/smpfatal01/init.c
index 3f276a4..0fd5f3f 100644
--- a/testsuites/smptests/smpfatal01/init.c
+++ b/testsuites/smptests/smpfatal01/init.c
@@ -58,7 +58,7 @@ static void fatal_extension(
 
       for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
         const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
-        Per_CPU_State state = per_cpu->state;
+        Per_CPU_State state = _Per_CPU_Get_state(per_cpu);
 
         assert(state == PER_CPU_STATE_SHUTDOWN);
       }
@@ -92,7 +92,7 @@ static rtems_status_code test_driver_init(
 
   for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
     const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
-    Per_CPU_State state = per_cpu->state;
+    Per_CPU_State state = _Per_CPU_Get_state(per_cpu);
 
     if (cpu == self) {
       assert(state == PER_CPU_STATE_INITIAL);
@@ -110,7 +110,7 @@ static rtems_status_code test_driver_init(
     uint32_t other = (self + 1) % cpu_count;
     Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( other );
 
-    per_cpu->state = PER_CPU_STATE_SHUTDOWN;
+    _Per_CPU_Set_state(per_cpu, PER_CPU_STATE_SHUTDOWN);
   } else {
     TEST_END();
     exit(0);
diff --git a/testsuites/smptests/smpfatal02/init.c b/testsuites/smptests/smpfatal02/init.c
index 6aa48d6..25321ca 100644
--- a/testsuites/smptests/smpfatal02/init.c
+++ b/testsuites/smptests/smpfatal02/init.c
@@ -60,7 +60,7 @@ static void fatal_extension(
 
     for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
       const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
-      Per_CPU_State state = per_cpu->state;
+      Per_CPU_State state = _Per_CPU_Get_state(per_cpu);
 
       assert(state == PER_CPU_STATE_SHUTDOWN);
     }
@@ -96,7 +96,7 @@ static rtems_status_code test_driver_init(
 
   for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
     const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
-    Per_CPU_State state = per_cpu->state;
+    Per_CPU_State state = _Per_CPU_Get_state(per_cpu);
 
     if (cpu == self) {
       assert(state == PER_CPU_STATE_INITIAL);
diff --git a/testsuites/smptests/smpmulticast01/init.c b/testsuites/smptests/smpmulticast01/init.c
index 96462e3..fbe6fb6 100644
--- a/testsuites/smptests/smpmulticast01/init.c
+++ b/testsuites/smptests/smpmulticast01/init.c
@@ -390,7 +390,7 @@ static void set_wrong_cpu_state(void *arg)
 
   cpu_self = arg;
   T_step_eq_ptr(0, cpu_self, _Per_CPU_Get());
-  cpu_self->state = 123;
+  _Per_CPU_Set_state(cpu_self, 123);
 
   while (true) {
     /* Do nothing */
diff --git a/testsuites/smptests/smppsxsignal01/init.c b/testsuites/smptests/smppsxsignal01/init.c
index 1882715..d23a664 100644
--- a/testsuites/smptests/smppsxsignal01/init.c
+++ b/testsuites/smptests/smppsxsignal01/init.c
@@ -32,7 +32,7 @@ typedef enum {
 } test_state;
 
 typedef struct {
-  test_state state;
+  volatile test_state state;
   pthread_t consumer;
   pthread_t producer;
   uint32_t consumer_processor;
@@ -42,13 +42,12 @@ typedef struct {
 static void change_state(test_context *ctx, test_state new_state)
 {
   ctx->state = new_state;
-  _CPU_SMP_Processor_event_broadcast();
 }
 
 static void wait_for_state(const test_context *ctx, test_state desired_state)
 {
   while ( ctx->state != desired_state ) {
-    _CPU_SMP_Processor_event_receive();
+    /* Wait */
   }
 }
 
diff --git a/testsuites/smptests/smpsignal01/init.c b/testsuites/smptests/smpsignal01/init.c
index 025e84c..471c058 100644
--- a/testsuites/smptests/smpsignal01/init.c
+++ b/testsuites/smptests/smpsignal01/init.c
@@ -33,7 +33,7 @@ typedef enum {
 } test_state;
 
 typedef struct {
-  test_state state;
+  volatile test_state state;
   rtems_id consumer;
   rtems_id producer;
   uint32_t consumer_processor;
@@ -45,13 +45,12 @@ typedef struct {
 static void change_state(test_context *ctx, test_state new_state)
 {
   ctx->state = new_state;
-  _CPU_SMP_Processor_event_broadcast();
 }
 
 static void wait_for_state(const test_context *ctx, test_state desired_state)
 {
   while ( ctx->state != desired_state ) {
-    _CPU_SMP_Processor_event_receive();
+    /* Wait */
   }
 }
 



More information about the vc mailing list