[PATCH 4/4] score: Adjust thread queue layout

Gedare Bloom gedare at rtems.org
Thu May 12 00:48:46 UTC 2016


ok

On Wed, May 11, 2016 at 9:19 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> Adjust thread queue layout according to Newlib.  This makes it possible
> to use the same implementation for <sys/lock.h> and CORE mutexes in the
> future.
> ---
>  cpukit/score/include/rtems/score/threadq.h     | 37 +++++++++++++++---------
>  cpukit/score/include/rtems/score/threadqimpl.h |  4 +--
>  cpukit/score/src/mutex.c                       | 39 +++++++++++---------------
>  cpukit/score/src/threadq.c                     | 18 ++++++++----
>  4 files changed, 54 insertions(+), 44 deletions(-)
>
> diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
> index fb20148..4fddb25 100644
> --- a/cpukit/score/include/rtems/score/threadq.h
> +++ b/cpukit/score/include/rtems/score/threadq.h
> @@ -143,26 +143,31 @@ typedef struct _Thread_queue_Heads {
>
>  typedef struct {
>    /**
> -   * @brief The thread queue heads.
> -   *
> -   * This pointer is NULL, if and only if no threads are enqueued.  The first
> -   * thread to enqueue will give its spare thread queue heads to this thread
> -   * queue.
> -   */
> -  Thread_queue_Heads *heads;
> -
> -  /**
>     * @brief Lock to protect this thread queue.
>     *
>     * It may be used to protect additional state of the object embedding this
>     * thread queue.
>     *
> +   * Must be the first component of this structure to be able to re-use
> +   * implementation parts for structures defined by Newlib <sys/lock.h>.
> +   *
>     * @see _Thread_queue_Acquire(), _Thread_queue_Acquire_critical() and
>     * _Thread_queue_Release().
>     */
>  #if defined(RTEMS_SMP)
>    SMP_ticket_lock_Control Lock;
>  #endif
> +
> +  /**
> +   * @brief The thread queue heads.
> +   *
> +   * This pointer is NULL, if and only if no threads are enqueued.  The first
> +   * thread to enqueue will give its spare thread queue heads to this thread
> +   * queue.
> +   */
> +  Thread_queue_Heads *heads;
> +
> +  Thread_Control *owner;
>  } Thread_queue_Queue;
>
>  /**
> @@ -264,14 +269,20 @@ typedef struct {
>   *  waiting to acquire a resource.
>   */
>  typedef struct {
> +#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
>    /**
> -   * @brief The actual thread queue.
> +   * @brief SMP lock statistics in case SMP and profiling are enabled.
> +   *
> +   * Must be the first component of this structure to be able to re-use
> +   * implementation parts for structures defined by Newlib <sys/lock.h>.
>     */
> -  Thread_queue_Queue Queue;
> -
> -#if defined(RTEMS_SMP) && defined(RTEMS_PROFILING)
>    SMP_lock_Stats Lock_stats;
>  #endif
> +
> +  /**
> +   * @brief The actual thread queue.
> +   */
> +  Thread_queue_Queue Queue;
>  } Thread_queue_Control;
>
>  /**@}*/
> diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
> index a31b4c3..0247842 100644
> --- a/cpukit/score/include/rtems/score/threadqimpl.h
> +++ b/cpukit/score/include/rtems/score/threadqimpl.h
> @@ -39,8 +39,6 @@ extern "C" {
>   * defined in Newlib <sys/lock.h>.
>   */
>  typedef struct {
> -  Thread_queue_Queue Queue;
> -
>  #if !defined(RTEMS_SMP)
>    /*
>     * The struct _Thread_queue_Queue definition is independent of the RTEMS
> @@ -50,6 +48,8 @@ typedef struct {
>     */
>    unsigned int reserved[2];
>  #endif
> +
> +  Thread_queue_Queue Queue;
>  } Thread_queue_Syslock_queue;
>
>  RTEMS_INLINE_ROUTINE void _Thread_queue_Heads_initialize(
> diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
> index 5588926..28b20b7 100644
> --- a/cpukit/score/src/mutex.c
> +++ b/cpukit/score/src/mutex.c
> @@ -30,7 +30,6 @@
>
>  typedef struct {
>    Thread_queue_Syslock_queue Queue;
> -  Thread_Control *owner;
>  } Mutex_Control;
>
>  RTEMS_STATIC_ASSERT(
> @@ -40,12 +39,6 @@ RTEMS_STATIC_ASSERT(
>  );
>
>  RTEMS_STATIC_ASSERT(
> -  offsetof( Mutex_Control, owner )
> -    == offsetof( struct _Mutex_Control, _owner ),
> -  MUTEX_CONTROL_OWNER
> -);
> -
> -RTEMS_STATIC_ASSERT(
>    sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
>    MUTEX_CONTROL_SIZE
>  );
> @@ -140,7 +133,7 @@ static void _Mutex_Release_slow(
>      operations = MUTEX_TQ_OPERATIONS;
>      first = ( *operations->first )( heads );
>
> -    mutex->owner = first;
> +    mutex->Queue.Queue.owner = first;
>      ++first->resource_count;
>      unblock = _Thread_queue_Extract_locked(
>        &mutex->Queue.Queue,
> @@ -180,7 +173,7 @@ static void _Mutex_Release_critical(
>    Thread_queue_Heads *heads;
>    bool keep_priority;
>
> -  mutex->owner = NULL;
> +  mutex->Queue.Queue.owner = NULL;
>
>    --executing->resource_count;
>
> @@ -218,10 +211,10 @@ void _Mutex_Acquire( struct _Mutex_Control *_mutex )
>    mutex = _Mutex_Get( _mutex );
>    executing = _Mutex_Queue_acquire( mutex, &lock_context );
>
> -  owner = mutex->owner;
> +  owner = mutex->Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->owner = executing;
> +    mutex->Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      _Mutex_Queue_release( mutex, &lock_context );
>    } else {
> @@ -242,10 +235,10 @@ int _Mutex_Acquire_timed(
>    mutex = _Mutex_Get( _mutex );
>    executing = _Mutex_Queue_acquire( mutex, &lock_context );
>
> -  owner = mutex->owner;
> +  owner = mutex->Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->owner = executing;
> +    mutex->Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      _Mutex_Queue_release( mutex, &lock_context );
>
> @@ -283,10 +276,10 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
>    mutex = _Mutex_Get( _mutex );
>    executing = _Mutex_Queue_acquire( mutex, &lock_context );
>
> -  owner = mutex->owner;
> +  owner = mutex->Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->owner = executing;
> +    mutex->Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      eno = 0;
>    } else {
> @@ -307,7 +300,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
>    mutex = _Mutex_Get( _mutex );
>    executing = _Mutex_Queue_acquire( mutex, &lock_context );
>
> -  _Assert( mutex->owner == executing );
> +  _Assert( mutex->Queue.Queue.owner == executing );
>
>    _Mutex_Release_critical( mutex, executing, &lock_context );
>  }
> @@ -329,10 +322,10 @@ void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
>    mutex = _Mutex_recursive_Get( _mutex );
>    executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
>
> -  owner = mutex->Mutex.owner;
> +  owner = mutex->Mutex.Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->Mutex.owner = executing;
> +    mutex->Mutex.Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      _Mutex_Queue_release( &mutex->Mutex, &lock_context );
>    } else if ( owner == executing ) {
> @@ -356,10 +349,10 @@ int _Mutex_recursive_Acquire_timed(
>    mutex = _Mutex_recursive_Get( _mutex );
>    executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
>
> -  owner = mutex->Mutex.owner;
> +  owner = mutex->Mutex.Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->Mutex.owner = executing;
> +    mutex->Mutex.Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      _Mutex_Queue_release( &mutex->Mutex, &lock_context );
>
> @@ -408,10 +401,10 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
>    mutex = _Mutex_recursive_Get( _mutex );
>    executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
>
> -  owner = mutex->Mutex.owner;
> +  owner = mutex->Mutex.Queue.Queue.owner;
>
>    if ( __predict_true( owner == NULL ) ) {
> -    mutex->Mutex.owner = executing;
> +    mutex->Mutex.Queue.Queue.owner = executing;
>      ++executing->resource_count;
>      eno = 0;
>    } else if ( owner == executing ) {
> @@ -436,7 +429,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
>    mutex = _Mutex_recursive_Get( _mutex );
>    executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
>
> -  _Assert( mutex->Mutex.owner == executing );
> +  _Assert( mutex->Mutex.Queue.Queue.owner == executing );
>
>    nest_level = mutex->nest_level;
>
> diff --git a/cpukit/score/src/threadq.c b/cpukit/score/src/threadq.c
> index b3ccbd6..fdb2bee 100644
> --- a/cpukit/score/src/threadq.c
> +++ b/cpukit/score/src/threadq.c
> @@ -25,12 +25,6 @@
>  #if HAVE_STRUCT__THREAD_QUEUE_QUEUE
>
>  RTEMS_STATIC_ASSERT(
> -  offsetof( Thread_queue_Syslock_queue, Queue.heads )
> -    == offsetof( struct _Thread_queue_Queue, _heads ),
> -  THREAD_QUEUE_SYSLOCK_QUEUE_HEADS
> -);
> -
> -RTEMS_STATIC_ASSERT(
>  #if defined(RTEMS_SMP)
>    offsetof( Thread_queue_Syslock_queue, Queue.Lock.next_ticket )
>  #else
> @@ -51,6 +45,18 @@ RTEMS_STATIC_ASSERT(
>  );
>
>  RTEMS_STATIC_ASSERT(
> +  offsetof( Thread_queue_Syslock_queue, Queue.heads )
> +    == offsetof( struct _Thread_queue_Queue, _heads ),
> +  THREAD_QUEUE_SYSLOCK_QUEUE_HEADS
> +);
> +
> +RTEMS_STATIC_ASSERT(
> +  offsetof( Thread_queue_Syslock_queue, Queue.owner )
> +    == offsetof( struct _Thread_queue_Queue, _owner ),
> +  THREAD_QUEUE_SYSLOCK_QUEUE_OWNER
> +);
> +
> +RTEMS_STATIC_ASSERT(
>    sizeof( Thread_queue_Syslock_queue )
>      == sizeof( struct _Thread_queue_Queue ),
>    THREAD_QUEUE_SYSLOCK_QUEUE_SIZE
> --
> 1.8.4.5
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel


More information about the devel mailing list