[rtems commit] score: Adjust thread queue layout

Sebastian Huber sebh at rtems.org
Mon May 30 14:17:58 UTC 2016


Module:    rtems
Branch:    master
Commit:    dfcc8bb2a6a0c9983a22268a933ef1f21a07eaaa
Changeset: http://git.rtems.org/rtems/commit/?id=dfcc8bb2a6a0c9983a22268a933ef1f21a07eaaa

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Tue May  3 07:43:54 2016 +0200

score: Adjust thread queue layout

Adjust thread queue layout according to Newlib.  This makes it possible
to use the same implementation for <sys/lock.h> and CORE mutexes in the
future.

---

 cpukit/score/include/rtems/score/threadq.h     | 26 +++++++++++------
 cpukit/score/include/rtems/score/threadqimpl.h |  7 +++--
 cpukit/score/src/mutex.c                       | 39 +++++++++++---------------
 cpukit/score/src/threadq.c                     | 18 ++++++++----
 4 files changed, 49 insertions(+), 41 deletions(-)

diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 27af89f..467462d 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -155,26 +155,34 @@ typedef struct _Thread_queue_Heads {
 
 typedef struct {
   /**
-   * @brief The thread queue heads.
-   *
-   * This pointer is NULL, if and only if no threads are enqueued.  The first
-   * thread to enqueue will give its spare thread queue heads to this thread
-   * queue.
-   */
-  Thread_queue_Heads *heads;
-
-  /**
    * @brief Lock to protect this thread queue.
    *
    * It may be used to protect additional state of the object embedding this
    * thread queue.
    *
+   * Must be the first component of this structure to be able to re-use
+   * implementation parts for structures defined by Newlib <sys/lock.h>.
+   *
    * @see _Thread_queue_Acquire(), _Thread_queue_Acquire_critical() and
    * _Thread_queue_Release().
    */
 #if defined(RTEMS_SMP)
   SMP_ticket_lock_Control Lock;
 #endif
+
+  /**
+   * @brief The thread queue heads.
+   *
+   * This pointer is NULL, if and only if no threads are enqueued.  The first
+   * thread to enqueue will give its spare thread queue heads to this thread
+   * queue.
+   */
+  Thread_queue_Heads *heads;
+
+  /**
+   * @brief The thread queue owner.
+   */
+  Thread_Control *owner;
 } Thread_queue_Queue;
 
 /**
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 7489d54..a8f404f 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -40,8 +40,6 @@ extern "C" {
  * defined in Newlib <sys/lock.h>.
  */
 typedef struct {
-  Thread_queue_Queue Queue;
-
 #if !defined(RTEMS_SMP)
   /*
    * The struct _Thread_queue_Queue definition is independent of the RTEMS
@@ -51,6 +49,8 @@ typedef struct {
    */
   unsigned int reserved[2];
 #endif
+
+  Thread_queue_Queue Queue;
 } Thread_queue_Syslock_queue;
 
 RTEMS_INLINE_ROUTINE void _Thread_queue_Do_context_initialize(
@@ -115,10 +115,11 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_initialize(
   Thread_queue_Queue *queue
 )
 {
-  queue->heads = NULL;
 #if defined(RTEMS_SMP)
   _SMP_ticket_lock_Initialize( &queue->Lock );
 #endif
+  queue->heads = NULL;
+  queue->owner = NULL;
 }
 
 RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical(
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 28936d6..eea15a0 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -30,7 +30,6 @@
 
 typedef struct {
   Thread_queue_Syslock_queue Queue;
-  Thread_Control *owner;
 } Mutex_Control;
 
 RTEMS_STATIC_ASSERT(
@@ -40,12 +39,6 @@ RTEMS_STATIC_ASSERT(
 );
 
 RTEMS_STATIC_ASSERT(
-  offsetof( Mutex_Control, owner )
-    == offsetof( struct _Mutex_Control, _owner ),
-  MUTEX_CONTROL_OWNER
-);
-
-RTEMS_STATIC_ASSERT(
   sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
   MUTEX_CONTROL_SIZE
 );
@@ -139,7 +132,7 @@ static void _Mutex_Release_slow(
     operations = MUTEX_TQ_OPERATIONS;
     first = ( *operations->first )( heads );
 
-    mutex->owner = first;
+    mutex->Queue.Queue.owner = first;
     ++first->resource_count;
     unblock = _Thread_queue_Extract_locked(
       &mutex->Queue.Queue,
@@ -176,7 +169,7 @@ static void _Mutex_Release_critical(
   Thread_queue_Heads *heads;
   bool keep_priority;
 
-  mutex->owner = NULL;
+  mutex->Queue.Queue.owner = NULL;
 
   --executing->resource_count;
 
@@ -214,10 +207,10 @@ void _Mutex_Acquire( struct _Mutex_Control *_mutex )
   mutex = _Mutex_Get( _mutex );
   executing = _Mutex_Queue_acquire( mutex, &lock_context );
 
-  owner = mutex->owner;
+  owner = mutex->Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->owner = executing;
+    mutex->Queue.Queue.owner = executing;
     ++executing->resource_count;
     _Mutex_Queue_release( mutex, &lock_context );
   } else {
@@ -238,10 +231,10 @@ int _Mutex_Acquire_timed(
   mutex = _Mutex_Get( _mutex );
   executing = _Mutex_Queue_acquire( mutex, &lock_context );
 
-  owner = mutex->owner;
+  owner = mutex->Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->owner = executing;
+    mutex->Queue.Queue.owner = executing;
     ++executing->resource_count;
     _Mutex_Queue_release( mutex, &lock_context );
 
@@ -278,10 +271,10 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
   mutex = _Mutex_Get( _mutex );
   executing = _Mutex_Queue_acquire( mutex, &lock_context );
 
-  owner = mutex->owner;
+  owner = mutex->Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->owner = executing;
+    mutex->Queue.Queue.owner = executing;
     ++executing->resource_count;
     eno = 0;
   } else {
@@ -303,7 +296,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
   _Thread_queue_Context_initialize( &queue_context, NULL );
   executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
 
-  _Assert( mutex->owner == executing );
+  _Assert( mutex->Queue.Queue.owner == executing );
 
   _Mutex_Release_critical( mutex, executing, &queue_context );
 }
@@ -325,10 +318,10 @@ void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
   mutex = _Mutex_recursive_Get( _mutex );
   executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
 
-  owner = mutex->Mutex.owner;
+  owner = mutex->Mutex.Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->Mutex.owner = executing;
+    mutex->Mutex.Queue.Queue.owner = executing;
     ++executing->resource_count;
     _Mutex_Queue_release( &mutex->Mutex, &lock_context );
   } else if ( owner == executing ) {
@@ -352,10 +345,10 @@ int _Mutex_recursive_Acquire_timed(
   mutex = _Mutex_recursive_Get( _mutex );
   executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
 
-  owner = mutex->Mutex.owner;
+  owner = mutex->Mutex.Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->Mutex.owner = executing;
+    mutex->Mutex.Queue.Queue.owner = executing;
     ++executing->resource_count;
     _Mutex_Queue_release( &mutex->Mutex, &lock_context );
 
@@ -403,10 +396,10 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
   mutex = _Mutex_recursive_Get( _mutex );
   executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
 
-  owner = mutex->Mutex.owner;
+  owner = mutex->Mutex.Queue.Queue.owner;
 
   if ( __predict_true( owner == NULL ) ) {
-    mutex->Mutex.owner = executing;
+    mutex->Mutex.Queue.Queue.owner = executing;
     ++executing->resource_count;
     eno = 0;
   } else if ( owner == executing ) {
@@ -435,7 +428,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
     &queue_context.Lock_context
   );
 
-  _Assert( mutex->Mutex.owner == executing );
+  _Assert( mutex->Mutex.Queue.Queue.owner == executing );
 
   nest_level = mutex->nest_level;
 
diff --git a/cpukit/score/src/threadq.c b/cpukit/score/src/threadq.c
index 00d9cb1..ca2b900 100644
--- a/cpukit/score/src/threadq.c
+++ b/cpukit/score/src/threadq.c
@@ -25,12 +25,6 @@
 #if HAVE_STRUCT__THREAD_QUEUE_QUEUE
 
 RTEMS_STATIC_ASSERT(
-  offsetof( Thread_queue_Syslock_queue, Queue.heads )
-    == offsetof( struct _Thread_queue_Queue, _heads ),
-  THREAD_QUEUE_SYSLOCK_QUEUE_HEADS
-);
-
-RTEMS_STATIC_ASSERT(
 #if defined(RTEMS_SMP)
   offsetof( Thread_queue_Syslock_queue, Queue.Lock.next_ticket )
 #else
@@ -51,6 +45,18 @@ RTEMS_STATIC_ASSERT(
 );
 
 RTEMS_STATIC_ASSERT(
+  offsetof( Thread_queue_Syslock_queue, Queue.heads )
+    == offsetof( struct _Thread_queue_Queue, _heads ),
+  THREAD_QUEUE_SYSLOCK_QUEUE_HEADS
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( Thread_queue_Syslock_queue, Queue.owner )
+    == offsetof( struct _Thread_queue_Queue, _owner ),
+  THREAD_QUEUE_SYSLOCK_QUEUE_OWNER
+);
+
+RTEMS_STATIC_ASSERT(
   sizeof( Thread_queue_Syslock_queue )
     == sizeof( struct _Thread_queue_Queue ),
   THREAD_QUEUE_SYSLOCK_QUEUE_SIZE



More information about the vc mailing list