[rtems commit] score: Fix multiprocessing thread proxies

Sebastian Huber sebh at rtems.org
Tue Mar 29 11:43:14 UTC 2016


Module:    rtems
Branch:    master
Commit:    16832b0d9e1f6448a57b6f5f2909cff7ad360706
Changeset: http://git.rtems.org/rtems/commit/?id=16832b0d9e1f6448a57b6f5f2909cff7ad360706

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue Mar 29 12:06:55 2016 +0200

score: Fix multiprocessing thread proxies

We must provide thread queue heads for the thread wait information for
each thread proxy (thread queue heads were introduced by
d7665823b208daefb6855591d808e1f3075cedcb).  The thread proxy must be
allocated before the enqueue operation.

---

 cpukit/sapi/include/confdefs.h                |  4 ++-
 cpukit/score/include/rtems/score/thread.h     |  9 +++++++
 cpukit/score/include/rtems/score/threadimpl.h | 10 +++++++
 cpukit/score/src/threadinitialize.c           |  4 +--
 cpukit/score/src/threadmp.c                   | 39 ++++++++++++++++++++++-----
 cpukit/score/src/threadqenqueue.c             | 11 ++++----
 6 files changed, 62 insertions(+), 15 deletions(-)

diff --git a/cpukit/sapi/include/confdefs.h b/cpukit/sapi/include/confdefs.h
index 89beb23..228a9dc 100644
--- a/cpukit/sapi/include/confdefs.h
+++ b/cpukit/sapi/include/confdefs.h
@@ -1899,7 +1899,9 @@ extern rtems_initialization_tasks_table Initialization_tasks[];
         #define CONFIGURE_MP_MAXIMUM_PROXIES            32
       #endif
       #define CONFIGURE_MEMORY_FOR_PROXIES(_proxies) \
-        _Configure_Object_RAM((_proxies) + 1, sizeof(Thread_Proxy_control) )
+        _Configure_From_workspace((_proxies) \
+          * (sizeof(Thread_Proxy_control) \
+            + THREAD_QUEUE_HEADS_SIZE(CONFIGURE_SCHEDULER_COUNT)))
 
       #ifndef CONFIGURE_MP_MPCI_TABLE_POINTER
         #include <mpci.h>
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 4e0d8cf..ffca164 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -417,6 +417,15 @@ typedef struct {
      /****************** end of common block ********************/
   /** This field is used to manage the set of proxies in the system. */
   Chain_Node               Active;
+
+  /**
+   * @brief Provide thread queue heads for this thread proxy.
+   *
+   * The actual size of the thread queue heads depends on the application
+   * configuration.  Since thread proxies are never destroyed we can use the
+   * same storage place for the thread queue heads.
+   */
+  Thread_queue_Heads       Thread_queue_heads[ RTEMS_ZERO_LENGTH_ARRAY ];
 }   Thread_Proxy_control;
 
 /**
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 55fdb22..516441e 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -1455,6 +1455,16 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
  */
 void _Thread_Timeout( Watchdog_Control *watchdog );
 
+RTEMS_INLINE_ROUTINE void _Thread_Timer_initialize(
+  Thread_Timer_information *timer,
+  Per_CPU_Control          *cpu
+)
+{
+  _ISR_lock_Initialize( &timer->Lock, "Thread Timer" );
+  timer->header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
+  _Watchdog_Preinitialize( &timer->Watchdog, cpu );
+}
+
 RTEMS_INLINE_ROUTINE void _Thread_Timer_insert_relative(
   Thread_Control                 *the_thread,
   Per_CPU_Control                *cpu,
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index bcf03bf..18d29f8 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -160,9 +160,7 @@ bool _Thread_Initialize(
   the_thread->Start.budget_algorithm = budget_algorithm;
   the_thread->Start.budget_callout   = budget_callout;
 
-  _ISR_lock_Initialize( &the_thread->Timer.Lock, "Thread Timer" );
-  the_thread->Timer.header = &cpu->Watchdog.Header[ PER_CPU_WATCHDOG_RELATIVE ];
-  _Watchdog_Preinitialize( &the_thread->Timer.Watchdog, cpu );
+  _Thread_Timer_initialize( &the_thread->Timer, cpu );
 
   switch ( budget_algorithm ) {
     case THREAD_CPU_BUDGET_ALGORITHM_NONE:
diff --git a/cpukit/score/src/threadmp.c b/cpukit/score/src/threadmp.c
index a084624..2d7e924 100644
--- a/cpukit/score/src/threadmp.c
+++ b/cpukit/score/src/threadmp.c
@@ -22,6 +22,8 @@
 #include <rtems/score/isrlevel.h>
 #include <rtems/score/wkspace.h>
 
+#include <string.h>
+
 Chain_Control _Thread_MP_Active_proxies;
 
 Chain_Control _Thread_MP_Inactive_proxies;
@@ -30,6 +32,10 @@ void _Thread_MP_Handler_initialization (
   uint32_t    maximum_proxies
 )
 {
+  size_t    proxy_size;
+  size_t    alloc_size;
+  char     *proxies;
+  uint32_t  i;
 
   _Chain_Initialize_empty( &_Thread_MP_Active_proxies );
 
@@ -38,16 +44,29 @@ void _Thread_MP_Handler_initialization (
     return;
   }
 
+  proxy_size = sizeof( Thread_Proxy_control )
+    + THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count );
+  alloc_size = maximum_proxies * proxy_size;
+  proxies = _Workspace_Allocate_or_fatal_error( alloc_size );
+  memset( proxies, 0, alloc_size );
 
   _Chain_Initialize(
     &_Thread_MP_Inactive_proxies,
-    _Workspace_Allocate_or_fatal_error(
-      maximum_proxies * sizeof( Thread_Proxy_control )
-    ),
+    proxies,
     maximum_proxies,
-    sizeof( Thread_Proxy_control )
+    proxy_size
   );
 
+  for ( i = 0 ; i < maximum_proxies ; ++i ) {
+    Thread_Proxy_control *proxy;
+
+    proxy = (Thread_Proxy_control *) ( proxies + i * proxy_size );
+
+    _Thread_Timer_initialize( &proxy->Timer, _Per_CPU_Get_by_index( 0 ) );
+
+    proxy->Wait.spare_heads = &proxy->Thread_queue_heads[ 0 ];
+    _Thread_queue_Heads_initialize( proxy->Wait.spare_heads );
+  }
 }
 
 Thread_Control *_Thread_MP_Allocate_proxy (
@@ -60,10 +79,12 @@ Thread_Control *_Thread_MP_Allocate_proxy (
   the_thread = (Thread_Control *)_Chain_Get( &_Thread_MP_Inactive_proxies );
 
   if ( !_Thread_Is_null( the_thread ) ) {
+    Thread_Control *executing;
 
+    executing = _Thread_Executing;
     the_proxy = (Thread_Proxy_control *) the_thread;
 
-    _Thread_Executing->Wait.return_code = THREAD_STATUS_PROXY_BLOCKING;
+    executing->Wait.return_code = THREAD_STATUS_PROXY_BLOCKING;
 
     the_proxy->receive_packet = _MPCI_Receive_server_tcb->receive_packet;
 
@@ -74,7 +95,13 @@ Thread_Control *_Thread_MP_Allocate_proxy (
 
     the_proxy->current_state = _States_Set( STATES_DORMANT, the_state );
 
-    the_proxy->Wait = _Thread_Executing->Wait;
+    the_proxy->Wait.id                      = executing->Wait.id;
+    the_proxy->Wait.count                   = executing->Wait.count;
+    the_proxy->Wait.return_argument         = executing->Wait.return_argument;
+    the_proxy->Wait.return_argument_second  = executing->Wait.return_argument_second;
+    the_proxy->Wait.option                  = executing->Wait.option;
+    the_proxy->Wait.return_code             = executing->Wait.return_code;
+    the_proxy->Wait.timeout_code            = executing->Wait.timeout_code;
 
     _Chain_Append( &_Thread_MP_Active_proxies, &the_proxy->Active );
 
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index e775135..803b556 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -58,6 +58,12 @@ void _Thread_queue_Enqueue_critical(
   Per_CPU_Control *cpu_self;
   bool             success;
 
+#if defined(RTEMS_MULTIPROCESSING)
+  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
+    the_thread = _Thread_MP_Allocate_proxy( state );
+  }
+#endif
+
   _Thread_Lock_set( the_thread, &queue->Lock );
 
   _Thread_Wait_set_queue( the_thread, queue );
@@ -69,11 +75,6 @@ void _Thread_queue_Enqueue_critical(
   cpu_self = _Thread_Dispatch_disable_critical( lock_context );
   _Thread_queue_Queue_release( queue, lock_context );
 
-#if defined(RTEMS_MULTIPROCESSING)
-  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet )
-    the_thread = _Thread_MP_Allocate_proxy( state );
-  else
-#endif
   /*
    *  Set the blocking state for this thread queue in the thread.
    */




More information about the vc mailing list