[PATCH 32/45] score: Fine grained locking for MrsP

Sebastian Huber sebastian.huber at embedded-brains.de
Fri May 15 11:41:32 UTC 2015


Update #2273.
---
 cpukit/rtems/src/semobtain.c                |   7 +-
 cpukit/rtems/src/semrelease.c               |  10 +--
 cpukit/score/include/rtems/score/mrsp.h     |  27 ++++--
 cpukit/score/include/rtems/score/mrspimpl.h | 135 ++++++++++++++++++++--------
 cpukit/score/src/schedulerchangeroot.c      |   5 --
 5 files changed, 123 insertions(+), 61 deletions(-)

diff --git a/cpukit/rtems/src/semobtain.c b/cpukit/rtems/src/semobtain.c
index 1ebc98b..bda39fa 100644
--- a/cpukit/rtems/src/semobtain.c
+++ b/cpukit/rtems/src/semobtain.c
@@ -59,16 +59,13 @@ rtems_status_code rtems_semaphore_obtain(
       if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
         MRSP_Status mrsp_status;
 
-        _Thread_Disable_dispatch();
-        _ISR_lock_ISR_enable( &lock_context );
         mrsp_status = _MRSP_Obtain(
           &the_semaphore->Core_control.mrsp,
           executing,
           wait,
-          timeout
+          timeout,
+          &lock_context
         );
-        _Thread_Enable_dispatch();
-        _Objects_Put_for_get_isr_disable( &the_semaphore->Object );
         return _Semaphore_Translate_MRSP_status_code( mrsp_status );
       } else
 #endif
diff --git a/cpukit/rtems/src/semrelease.c b/cpukit/rtems/src/semrelease.c
index 7e4b98e..5d41b6c 100644
--- a/cpukit/rtems/src/semrelease.c
+++ b/cpukit/rtems/src/semrelease.c
@@ -75,13 +75,13 @@ rtems_status_code rtems_semaphore_release(
       attribute_set = the_semaphore->attribute_set;
 #if defined(RTEMS_SMP)
       if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
-        _Thread_Disable_dispatch();
-        _ISR_lock_ISR_enable( &lock_context );
-        MRSP_Status mrsp_status = _MRSP_Release(
+        MRSP_Status mrsp_status;
+
+        mrsp_status = _MRSP_Release(
           &the_semaphore->Core_control.mrsp,
-          _Thread_Get_executing()
+          _Thread_Executing,
+          &lock_context
         );
-        _Thread_Enable_dispatch();
         return _Semaphore_Translate_MRSP_status_code( mrsp_status );
       } else
 #endif
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
index 9eb2887..08f96ac 100644
--- a/cpukit/score/include/rtems/score/mrsp.h
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -20,6 +20,7 @@
 #if defined(RTEMS_SMP)
 
 #include <rtems/score/chain.h>
+#include <rtems/score/isrlock.h>
 #include <rtems/score/scheduler.h>
 #include <rtems/score/thread.h>
 
@@ -75,24 +76,30 @@ typedef enum {
   MRSP_WAIT_FOR_OWNERSHIP = 255
 } MRSP_Status;
 
+typedef struct MRSP_Control MRSP_Control;
+
 /**
  * @brief MrsP rival.
  *
  * The rivals are used by threads waiting for resource ownership.  They are
- * registered in the MRSP control block.
+ * registered in the MrsP control block.
  */
 typedef struct {
   /**
-   * @brief The node for registration in the MRSP rival chain.
+   * @brief The node for registration in the MrsP rival chain.
    *
-   * The chain operations are protected by the Giant lock and disabled
-   * interrupts.
+   * The chain operations are protected by the MrsP control lock.
    *
    * @see MRSP_Control::Rivals.
    */
   Chain_Node Node;
 
   /**
+   * @brief The corresponding MrsP control block.
+   */
+  MRSP_Control *resource;
+
+  /**
    * @brief Identification of the rival thread.
    */
   Thread_Control *thread;
@@ -118,8 +125,7 @@ typedef struct {
    *
    * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP.  The rival will
    * busy wait until a status change happens.  This can be MRSP_SUCCESSFUL or
-   * MRSP_TIMEOUT.  State changes are protected by the Giant lock and disabled
-   * interrupts.
+   * MRSP_TIMEOUT.  State changes are protected by the MrsP control lock.
    */
   volatile MRSP_Status status;
 } MRSP_Rival;
@@ -127,7 +133,7 @@ typedef struct {
 /**
  * @brief MrsP control block.
  */
-typedef struct {
+struct MRSP_Control {
   /**
    * @brief Basic resource control.
    */
@@ -141,6 +147,11 @@ typedef struct {
   Chain_Control Rivals;
 
   /**
+   * @brief Lock to protect the resource dependency tree.
+   */
+  ISR_LOCK_MEMBER( Lock )
+
+  /**
    * @brief The initial priority of the owner before it was elevated to the
    * ceiling priority.
    */
@@ -150,7 +161,7 @@ typedef struct {
    * @brief One ceiling priority per scheduler instance.
    */
   Priority_Control *ceiling_priorities;
-} MRSP_Control;
+};
 
 /** @} */
 
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
index 05aee42..2cf11a5 100644
--- a/cpukit/score/include/rtems/score/mrspimpl.h
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -36,6 +36,23 @@ extern "C" {
  * @{
  */
 
+/*
+ * FIXME: Operations with the resource dependency tree are protected by the
+ * global scheduler lock.  Since the scheduler lock should be scheduler
+ * instance specific in the future this will only work temporarily.  A more
+ * sophisticated locking strategy is necessary.
+ */
+
+RTEMS_INLINE_ROUTINE void _MRSP_Giant_acquire( ISR_lock_Context *lock_context )
+{
+  _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Giant_release( ISR_lock_Context *lock_context )
+{
+  _ISR_lock_Release( &_Scheduler_Lock, lock_context );
+}
+
 RTEMS_INLINE_ROUTINE bool _MRSP_Restore_priority_filter(
   Thread_Control   *thread,
   Priority_Control *new_priority,
@@ -74,14 +91,23 @@ RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
   MRSP_Control     *mrsp,
   Thread_Control   *new_owner,
   Priority_Control  initial_priority,
-  Priority_Control  ceiling_priority
+  Priority_Control  ceiling_priority,
+  ISR_lock_Context *lock_context
 )
 {
+  Per_CPU_Control *cpu_self;
+
   _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
   _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
   mrsp->initial_priority_of_owner = initial_priority;
-  _Thread_Raise_priority( new_owner, ceiling_priority );
   _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
+
+  cpu_self = _Thread_Dispatch_disable_critical();
+  _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context );
+
+  _Thread_Raise_priority( new_owner, ceiling_priority );
+
+  _Thread_Dispatch_enable( cpu_self );
 }
 
 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
@@ -111,6 +137,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
 
   _Resource_Initialize( &mrsp->Resource );
   _Chain_Initialize_empty( &mrsp->Rivals );
+  _ISR_lock_Initialize( &mrsp->Lock, "MrsP" );
 
   return MRSP_SUCCESSFUL;
 }
@@ -138,27 +165,32 @@ RTEMS_INLINE_ROUTINE void _MRSP_Timeout(
 )
 {
   MRSP_Rival *rival = arg;
+  MRSP_Control *mrsp = rival->resource;
   Thread_Control *thread = rival->thread;
-  ISR_Level level;
+  ISR_lock_Context lock_context;
 
   (void) id;
 
-  _ISR_Disable( level );
+  _ISR_lock_ISR_disable_and_acquire( &mrsp->Lock, &lock_context );
 
   if ( rival->status == MRSP_WAIT_FOR_OWNERSHIP ) {
-    rival->status = MRSP_TIMEOUT;
+    ISR_lock_Context giant_lock_context;
+
+    _MRSP_Giant_acquire( &giant_lock_context );
 
     _Chain_Extract_unprotected( &rival->Node );
     _Resource_Node_extract( &thread->Resource_node );
     _Resource_Node_set_dependency( &thread->Resource_node, NULL );
-
-    _ISR_Enable( level );
-
     _Scheduler_Thread_change_help_state( thread, rival->initial_help_state );
     _Scheduler_Thread_change_resource_root( thread, thread );
-    _MRSP_Restore_priority( thread, rival->initial_priority );
+
+    _MRSP_Giant_release( &giant_lock_context );
+
+    rival->status = MRSP_TIMEOUT;
+
+    _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context );
   } else {
-    _ISR_Enable( level );
+    _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, &lock_context );
   }
 }
 
@@ -168,35 +200,41 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
   Thread_Control    *executing,
   Priority_Control   initial_priority,
   Priority_Control   ceiling_priority,
-  Watchdog_Interval  timeout
+  Watchdog_Interval  timeout,
+  ISR_lock_Context  *lock_context
 )
 {
   MRSP_Status status;
   MRSP_Rival rival;
   bool initial_life_protection;
-  ISR_Level level;
+  Per_CPU_Control *cpu_self;
+  ISR_lock_Context giant_lock_context;
 
   rival.thread = executing;
+  rival.resource = mrsp;
   rival.initial_priority = initial_priority;
+
+  _MRSP_Giant_acquire( &giant_lock_context );
+
   rival.initial_help_state =
     _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_ACTIVE_RIVAL );
   rival.status = MRSP_WAIT_FOR_OWNERSHIP;
 
-  _Thread_Raise_priority( executing, ceiling_priority );
-
-  _ISR_Disable( level );
-
   _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
   _Resource_Add_rival( &mrsp->Resource, &executing->Resource_node );
   _Resource_Node_set_dependency( &executing->Resource_node, &mrsp->Resource );
-
-  _ISR_Enable( level );
-
   _Scheduler_Thread_change_resource_root(
     executing,
     THREAD_RESOURCE_NODE_TO_THREAD( _Resource_Node_get_root( owner ) )
   );
 
+  _MRSP_Giant_release( &giant_lock_context );
+
+  cpu_self = _Thread_Dispatch_disable_critical();
+  _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context );
+
+  _Thread_Raise_priority( executing, ceiling_priority );
+
   if ( timeout > 0 ) {
     _Watchdog_Initialize(
       &executing->Timer,
@@ -208,7 +246,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
   }
 
   initial_life_protection = _Thread_Set_life_protection( true );
-  _Thread_Enable_dispatch();
+  _Thread_Dispatch_enable( cpu_self );
 
   _Assert( _Debug_Is_thread_dispatching_allowed() );
 
@@ -217,11 +255,14 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
     status = rival.status;
   } while ( status == MRSP_WAIT_FOR_OWNERSHIP );
 
-  _Thread_Disable_dispatch();
   _Thread_Set_life_protection( initial_life_protection );
 
   if ( timeout > 0 ) {
     _Watchdog_Remove_ticks( &executing->Timer );
+
+    if ( status == MRSP_TIMEOUT ) {
+      _MRSP_Restore_priority( executing, initial_priority );
+    }
   }
 
   return status;
@@ -231,7 +272,8 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
   MRSP_Control      *mrsp,
   Thread_Control    *executing,
   bool               wait,
-  Watchdog_Interval  timeout
+  Watchdog_Interval  timeout,
+  ISR_lock_Context  *lock_context
 )
 {
   MRSP_Status status;
@@ -247,31 +289,37 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
   Resource_Node *owner;
 
   if ( !priority_ok) {
+    _ISR_lock_ISR_enable( lock_context );
     return MRSP_INVALID_PRIORITY;
   }
 
+  _ISR_lock_Acquire( &mrsp->Lock, lock_context );
   owner = _Resource_Get_owner( &mrsp->Resource );
   if ( owner == NULL ) {
     _MRSP_Claim_ownership(
       mrsp,
       executing,
       initial_priority,
-      ceiling_priority
+      ceiling_priority,
+      lock_context
     );
     status = MRSP_SUCCESSFUL;
-  } else if ( _Resource_Node_get_root( owner ) == &executing->Resource_node ) {
-    /* Nested access or deadlock */
-    status = MRSP_UNSATISFIED;
-  } else if ( wait ) {
+  } else if (
+    wait
+      && _Resource_Node_get_root( owner ) != &executing->Resource_node
+  ) {
     status = _MRSP_Wait_for_ownership(
       mrsp,
       owner,
       executing,
       initial_priority,
       ceiling_priority,
-      timeout
+      timeout,
+      lock_context
     );
   } else {
+    _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context );
+    /* Not available, nested access or deadlock */
     status = MRSP_UNSATISFIED;
   }
 
@@ -279,13 +327,17 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
 }
 
 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
-  MRSP_Control   *mrsp,
-  Thread_Control *executing
+  MRSP_Control     *mrsp,
+  Thread_Control   *executing,
+  ISR_lock_Context *lock_context
 )
 {
-  ISR_Level level;
+  Priority_Control initial_priority;
+  Per_CPU_Control *cpu_self;
+  ISR_lock_Context giant_lock_context;
 
   if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
+    _ISR_lock_ISR_enable( lock_context );
     return MRSP_NOT_OWNER_OF_RESOURCE;
   }
 
@@ -295,18 +347,20 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
       &executing->Resource_node
     )
   ) {
+    _ISR_lock_ISR_enable( lock_context );
     return MRSP_INCORRECT_STATE;
   }
 
-  _MRSP_Restore_priority( executing, mrsp->initial_priority_of_owner );
+  initial_priority = mrsp->initial_priority_of_owner;
+
+  _ISR_lock_Acquire( &mrsp->Lock, lock_context );
+  cpu_self = _Thread_Dispatch_disable_critical();
 
-  _ISR_Disable( level );
+  _MRSP_Giant_acquire( &giant_lock_context );
 
   _Resource_Extract( &mrsp->Resource );
 
   if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
-    _ISR_Enable( level );
-
     _Resource_Set_owner( &mrsp->Resource, NULL );
   } else {
     MRSP_Rival *rival = (MRSP_Rival *)
@@ -325,9 +379,6 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
     _Resource_Node_set_dependency( &new_owner->Resource_node, NULL );
     _Resource_Node_add_resource( &new_owner->Resource_node, &mrsp->Resource );
     _Resource_Set_owner( &mrsp->Resource, &new_owner->Resource_node );
-
-    _ISR_Enable( level );
-
     _Scheduler_Thread_change_help_state( new_owner, SCHEDULER_HELP_ACTIVE_OWNER );
     _Scheduler_Thread_change_resource_root( new_owner, new_owner );
   }
@@ -336,6 +387,13 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
     _Scheduler_Thread_change_help_state( executing, SCHEDULER_HELP_YOURSELF );
   }
 
+  _MRSP_Giant_release( &giant_lock_context );
+
+  _ISR_lock_Release_and_ISR_enable( &mrsp->Lock, lock_context );
+
+  _MRSP_Restore_priority( executing, initial_priority );
+  _Thread_Dispatch_enable( cpu_self );
+
   return MRSP_SUCCESSFUL;
 }
 
@@ -345,6 +403,7 @@ RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Destroy( MRSP_Control *mrsp )
     return MRSP_RESOUCE_IN_USE;
   }
 
+  _ISR_lock_Destroy( &mrsp->Lock );
   _Workspace_Free( mrsp->ceiling_priorities );
 
   return MRSP_SUCCESSFUL;
diff --git a/cpukit/score/src/schedulerchangeroot.c b/cpukit/score/src/schedulerchangeroot.c
index f731117..d036fd8 100644
--- a/cpukit/score/src/schedulerchangeroot.c
+++ b/cpukit/score/src/schedulerchangeroot.c
@@ -61,9 +61,6 @@ void _Scheduler_Thread_change_resource_root(
   Thread_Control *offers_help = top;
   Scheduler_Node *offers_help_node;
   Thread_Control *offers_help_too;
-  ISR_Level level;
-
-  _ISR_Disable( level );
 
   offers_help_node = _Scheduler_Thread_get_node( offers_help );
   offers_help_too = _Scheduler_Node_get_owner( offers_help_node );
@@ -80,6 +77,4 @@ void _Scheduler_Thread_change_resource_root(
   if ( ctx.needs_help != NULL ) {
     _Scheduler_Ask_for_help( ctx.needs_help );
   }
-
-  _ISR_Enable( level );
 }
-- 
1.8.4.5




More information about the devel mailing list