[rtems commit] score: Fix thread lock on SMP configurations

Sebastian Huber sebh at rtems.org
Thu Jun 30 05:58:51 UTC 2016


Module:    rtems
Branch:    master
Commit:    b6b25a9e68f2c34fe9cb5b69fc802b4b9f25d53b
Changeset: http://git.rtems.org/rtems/commit/?id=b6b25a9e68f2c34fe9cb5b69fc802b4b9f25d53b

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed Jun 29 15:32:51 2016 +0200

score: Fix thread lock on SMP configurations

---

 cpukit/score/include/rtems/score/thread.h     | 16 +++++++++++--
 cpukit/score/include/rtems/score/threadimpl.h | 34 ++++++++++++++++++++-------
 cpukit/score/src/threadinitialize.c           |  6 ++++-
 cpukit/score/src/threadqenqueue.c             |  2 +-
 4 files changed, 46 insertions(+), 12 deletions(-)

diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index 4d498e5..46c222f 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -666,7 +666,19 @@ typedef struct {
    * of the actual RTEMS build configuration, e.g. profiling enabled or
    * disabled.
    */
-  SMP_ticket_lock_Control *current;
+  union {
+    /**
+     * @brief The current thread lock as an atomic unsigned integer pointer value.
+     */
+    Atomic_Uintptr atomic;
+
+    /**
+     * @brief The current thread lock as a normal pointer.
+     *
+     * Only provided for debugging purposes.
+     */
+    SMP_ticket_lock_Control *normal;
+  } current;
 
   /**
    * @brief The default thread lock in case the thread is not blocked on a
@@ -680,7 +692,7 @@ typedef struct {
    *
    * These statistics are used by the executing thread in case it acquires a
    * thread lock.  Thus the statistics are an aggregation of acquire and
-   * release operations of diffent locks.
+   * release operations of different locks.
    */
   SMP_lock_Stats Stats;
 #endif
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index c878266..a4e7469 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -1118,33 +1118,47 @@ RTEMS_INLINE_ROUTINE void *_Thread_Lock_acquire(
 )
 {
 #if defined(RTEMS_SMP)
-  SMP_ticket_lock_Control *lock;
+  SMP_ticket_lock_Control *lock_0;
 
   while ( true ) {
+    SMP_ticket_lock_Control *lock_1;
+
     _ISR_lock_ISR_disable( lock_context );
 
     /*
+     * We must use a load acquire here paired with the store release in
+     * _Thread_Lock_set_unprotected() to observe corresponding thread wait
+     * queue and thread wait operations.
+     *
      * We assume that a normal load of pointer is identical to a relaxed atomic
      * load.  Here, we may read an out-of-date lock.  However, only the owner
      * of this out-of-date lock is allowed to set a new one.  Thus, we read at
      * least this new lock ...
      */
-    lock = the_thread->Lock.current;
+    lock_0 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr(
+      &the_thread->Lock.current.atomic,
+      ATOMIC_ORDER_ACQUIRE
+    );
 
     _SMP_ticket_lock_Acquire(
-      lock,
+      lock_0,
       &_Thread_Executing->Lock.Stats,
       &lock_context->Lock_context.Stats_context
     );
 
+    lock_1 = (SMP_ticket_lock_Control *) _Atomic_Load_uintptr(
+      &the_thread->Lock.current.atomic,
+      ATOMIC_ORDER_RELAXED
+    );
+
     /*
      * ... here, and so on.
      */
-    if ( lock == the_thread->Lock.current ) {
-      return lock;
+    if ( lock_0 == lock_1 ) {
+      return lock_0;
     }
 
-    _Thread_Lock_release( lock, lock_context );
+    _Thread_Lock_release( lock_0, lock_context );
   }
 #else
   _ISR_Local_disable( lock_context->isr_level );
@@ -1163,7 +1177,11 @@ RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
   SMP_ticket_lock_Control *new_lock
 )
 {
-  the_thread->Lock.current = new_lock;
+  _Atomic_Store_uintptr(
+    &the_thread->Lock.current.atomic,
+    (uintptr_t) new_lock,
+    ATOMIC_ORDER_RELEASE
+  );
 }
 #endif
 
@@ -1185,7 +1203,7 @@ RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
   ISR_lock_Context lock_context;
 
   _Thread_Lock_acquire_default_critical( the_thread, &lock_context );
-  _Assert( the_thread->Lock.current == &the_thread->Lock.Default );
+  _Assert( the_thread->Lock.current.normal == &the_thread->Lock.Default );
   _Thread_Lock_set_unprotected( the_thread, new_lock );
   _Thread_Lock_release_default_critical( the_thread, &lock_context );
 }
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index 10dbf02..7237cfe 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -179,7 +179,11 @@ bool _Thread_Initialize(
   the_thread->Scheduler.control = scheduler;
   the_thread->Scheduler.own_node = the_thread->Scheduler.node;
   _Resource_Node_initialize( &the_thread->Resource_node );
-  the_thread->Lock.current = &the_thread->Lock.Default;
+  _Atomic_Store_uintptr(
+    &the_thread->Lock.current.atomic,
+    (uintptr_t) &the_thread->Lock.Default,
+    ATOMIC_ORDER_RELAXED
+  );
   _SMP_ticket_lock_Initialize( &the_thread->Lock.Default );
   _SMP_lock_Stats_initialize( &the_thread->Lock.Stats, "Thread Lock" );
   _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" );
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 8bd1905..f014fc5 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -257,7 +257,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
   the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
 
   if ( the_thread != NULL ) {
-    _SMP_Assert( the_thread->Lock.current == &the_thread_queue->Queue.Lock );
+    _SMP_Assert( the_thread->Lock.current.normal == &the_thread_queue->Queue.Lock );
 
     _Thread_queue_Extract_critical(
       &the_thread_queue->Queue,




More information about the vc mailing list