[PATCH 09/18] score: Add _Per_CPU_Add_job()

Sebastian Huber sebastian.huber at embedded-brains.de
Mon May 20 07:33:35 UTC 2019


---
 cpukit/include/rtems/score/percpu.h       | 15 ++++++++++-
 cpukit/score/src/smpmulticastaction.c     | 37 ++++++++++++++------------
 testsuites/smptests/smpmulticast01/init.c | 43 +++++++++++++++++++++++++++++++
 3 files changed, 78 insertions(+), 17 deletions(-)

diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index eff71c4ec1..85e10fbbaa 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -792,12 +792,25 @@ bool _Per_CPU_State_wait_for_non_initial_state(
 );
 
 /**
- * @brief Performs the jobs of the specified processor.
+ * @brief Performs the jobs of the specified processor in FIFO order.
  *
  * @param[in, out] cpu The jobs of this processor will be performed.
  */
 void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
 
+/**
+ * @brief Adds the job to the tail of the processing list of the specified
+ * processor.
+ *
+ * This function does not send the SMP_MESSAGE_PERFORM_JOBS message the
+ * specified processor.
+ *
+ * @param[in, out] cpu The processor to add the job.
+ * @param[in, out] job The job.  The Per_CPU_Job::context member must be
+ *   initialized by the caller.
+ */
+void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
+
 #endif /* defined( RTEMS_SMP ) */
 
 /*
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
index ad3b1531ec..1207000e6a 100644
--- a/cpukit/score/src/smpmulticastaction.c
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -57,6 +57,24 @@ void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu )
 
     _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
   }
+}
+
+void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job )
+{
+  ISR_lock_Context lock_context;
+
+  _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
+  _Assert( job->next == NULL );
+
+  _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
+
+  if ( cpu->Jobs.head == NULL ) {
+    cpu->Jobs.head = job;
+  } else {
+    *cpu->Jobs.tail = job;
+  }
+
+  cpu->Jobs.tail = &job->next;
 
   _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
 }
@@ -97,27 +115,14 @@ static void _SMP_Issue_action_jobs(
 
   for ( cpu_index = 0; cpu_index < cpu_max; ++cpu_index ) {
     if ( _Processor_mask_Is_set( targets, cpu_index ) ) {
-      ISR_lock_Context  lock_context;
-      Per_CPU_Job      *job;
-      Per_CPU_Control  *cpu;
+      Per_CPU_Job     *job;
+      Per_CPU_Control *cpu;
 
       job = &jobs->Jobs[ cpu_index ];
-      _Atomic_Store_ulong( &job->done, 0, ATOMIC_ORDER_RELAXED );
-      _Assert( job->next == NULL );
       job->context = &jobs->Context;
-
       cpu = _Per_CPU_Get_by_index( cpu_index );
-      _Per_CPU_Jobs_ISR_disable_and_acquire( cpu, &lock_context );
-
-      if ( cpu->Jobs.head == NULL ) {
-        cpu->Jobs.head = job;
-      } else {
-        *cpu->Jobs.tail = job;
-      }
-
-      cpu->Jobs.tail = &job->next;
 
-      _Per_CPU_Jobs_release_and_ISR_enable( cpu, &lock_context );
+      _Per_CPU_Add_job( cpu, job );
       _SMP_Send_message( cpu_index, SMP_MESSAGE_PERFORM_JOBS );
     }
   }
diff --git a/testsuites/smptests/smpmulticast01/init.c b/testsuites/smptests/smpmulticast01/init.c
index 5f10400ce6..9e5d15f795 100644
--- a/testsuites/smptests/smpmulticast01/init.c
+++ b/testsuites/smptests/smpmulticast01/init.c
@@ -351,6 +351,49 @@ static void test_wrong_cpu_state_to_perform_jobs(void)
   rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0);
 }
 
+#define TEST_JOB_ORDER_JOBS 3
+
+static Per_CPU_Job job_order_jobs[TEST_JOB_ORDER_JOBS];
+
+static void job_order_handler_0(void *arg)
+{
+  T_step(1, "invalid job order");
+}
+
+static void job_order_handler_1(void *arg)
+{
+  T_step(2, "invalid job order");
+}
+
+static void job_order_handler_2(void *arg)
+{
+  T_step(3, "invalid job order");
+}
+
+static const Per_CPU_Job_context job_order_contexts[TEST_JOB_ORDER_JOBS] = {
+  { .handler = job_order_handler_0 },
+  { .handler = job_order_handler_1 },
+  { .handler = job_order_handler_2 }
+};
+
+T_TEST_CASE(JobOrder)
+{
+  Per_CPU_Control *cpu_self;
+  size_t i;
+
+  T_plan(4);
+  cpu_self = _Thread_Dispatch_disable();
+
+  for (i = 0; i < TEST_JOB_ORDER_JOBS; ++i) {
+    job_order_jobs[i].context = &job_order_contexts[i];
+    _Per_CPU_Add_job(cpu_self, &job_order_jobs[i]);
+  }
+
+  T_step(0, "wrong job processing time");
+  _SMP_Send_message(_Per_CPU_Get_index(cpu_self), SMP_MESSAGE_PERFORM_JOBS);
+  _Thread_Dispatch_enable(cpu_self);
+}
+
 T_TEST_CASE(UnicastDuringMultitaskingIRQDisabled)
 {
   test_unicast(&test_instance, multicast_action_irq_disabled);
-- 
2.16.4




More information about the devel mailing list