[PATCH 14/14] score: Avoid Giant lock for scheduler set/get

Gedare Bloom gedare at rtems.org
Thu May 12 01:05:45 UTC 2016


patch set looks good to me.

On Wed, May 11, 2016 at 9:23 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> Update #2555.
> ---
>  cpukit/rtems/src/taskgetscheduler.c              | 52 +++++++++++-------------
>  cpukit/rtems/src/tasksetscheduler.c              | 50 +++++++++++++----------
>  cpukit/score/include/rtems/score/schedulerimpl.h | 42 ++++++++++++++-----
>  cpukit/score/include/rtems/score/threadimpl.h    |  8 ++++
>  cpukit/score/include/rtems/score/threadqimpl.h   | 26 ++++++++++--
>  5 files changed, 113 insertions(+), 65 deletions(-)
>
> diff --git a/cpukit/rtems/src/taskgetscheduler.c b/cpukit/rtems/src/taskgetscheduler.c
> index 47895a4..9d8bbb6 100644
> --- a/cpukit/rtems/src/taskgetscheduler.c
> +++ b/cpukit/rtems/src/taskgetscheduler.c
> @@ -24,37 +24,31 @@ rtems_status_code rtems_task_get_scheduler(
>    rtems_id *scheduler_id
>  )
>  {
> -  rtems_status_code sc;
> -
> -  if ( scheduler_id != NULL ) {
> -    Thread_Control          *the_thread;
> -    Objects_Locations        location;
> -    const Scheduler_Control *scheduler;
> -
> -    the_thread = _Thread_Get( task_id, &location );
> -
> -    switch ( location ) {
> -      case OBJECTS_LOCAL:
> -        scheduler = _Scheduler_Get( the_thread );
> -        *scheduler_id = _Scheduler_Build_id(
> -          _Scheduler_Get_index( scheduler )
> -        );
> -        _Objects_Put( &the_thread->Object );
> -        sc = RTEMS_SUCCESSFUL;
> -        break;
> +  Thread_Control          *the_thread;
> +  ISR_lock_Context         lock_context;
> +  const Scheduler_Control *scheduler;
> +
> +  if ( scheduler_id == NULL ) {
> +    return RTEMS_INVALID_ADDRESS;
> +  }
> +
> +  the_thread = _Thread_Get_interrupt_disable( task_id, &lock_context );
> +
> +  if ( the_thread == NULL ) {
>  #if defined(RTEMS_MULTIPROCESSING)
> -      case OBJECTS_REMOTE:
> -        _Thread_Dispatch();
> -        sc = RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
> -        break;
> -#endif
> -      default:
> -        sc = RTEMS_INVALID_ID;
> -        break;
> +    if ( _Thread_MP_Is_remote( task_id ) ) {
> +      return RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
>      }
> -  } else {
> -    sc = RTEMS_INVALID_ADDRESS;
> +#endif
> +
> +    return RTEMS_INVALID_ID;
>    }
>
> -  return sc;
> +  _Thread_State_acquire_critical( the_thread, &lock_context );
> +
> +  scheduler = _Scheduler_Get( the_thread );
> +  *scheduler_id = _Scheduler_Build_id( _Scheduler_Get_index( scheduler ) );
> +
> +  _Thread_State_release( the_thread, &lock_context );
> +  return RTEMS_SUCCESSFUL;
>  }
> diff --git a/cpukit/rtems/src/tasksetscheduler.c b/cpukit/rtems/src/tasksetscheduler.c
> index e055530..428ef3d 100644
> --- a/cpukit/rtems/src/tasksetscheduler.c
> +++ b/cpukit/rtems/src/tasksetscheduler.c
> @@ -24,34 +24,40 @@ rtems_status_code rtems_task_set_scheduler(
>    rtems_id scheduler_id
>  )
>  {
> -  rtems_status_code        sc;
>    const Scheduler_Control *scheduler;
> +  Thread_Control          *the_thread;
> +  ISR_lock_Context         lock_context;
> +  ISR_lock_Context         state_lock_context;
> +  Per_CPU_Control         *cpu_self;
> +  void                    *lock;
> +  bool                     ok;
>
> -  if ( _Scheduler_Get_by_id( scheduler_id, &scheduler ) ) {
> -    Thread_Control    *the_thread;
> -    Objects_Locations  location;
> +  if ( !_Scheduler_Get_by_id( scheduler_id, &scheduler ) ) {
> +    return RTEMS_INVALID_ID;
> +  }
>
> -    the_thread = _Thread_Get( task_id, &location );
> +  the_thread = _Thread_Get_interrupt_disable( task_id, &lock_context );
>
> -    switch ( location ) {
> -      case OBJECTS_LOCAL:
> -        _Scheduler_Set( scheduler, the_thread );
> -        _Objects_Put( &the_thread->Object );
> -        sc = RTEMS_SUCCESSFUL;
> -        break;
> +  if ( the_thread == NULL ) {
>  #if defined(RTEMS_MULTIPROCESSING)
> -      case OBJECTS_REMOTE:
> -        _Thread_Dispatch();
> -        sc = RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
> -        break;
> -#endif
> -      default:
> -        sc = RTEMS_INVALID_ID;
> -        break;
> +    if ( _Thread_MP_Is_remote( task_id ) ) {
> +      return RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
>      }
> -  } else {
> -    sc = RTEMS_INVALID_ID;
> +#endif
> +
> +    return RTEMS_INVALID_ID;
>    }
>
> -  return sc;
> +  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
> +  _ISR_lock_ISR_enable( &lock_context );
> +
> +  lock = _Thread_Lock_acquire( the_thread, &lock_context );
> +  _Thread_State_acquire_critical( the_thread, &state_lock_context );
> +
> +  ok = _Scheduler_Set( scheduler, the_thread );
> +
> +  _Thread_State_release_critical( the_thread, &state_lock_context );
> +  _Thread_Lock_release( lock, &lock_context );
> +  _Thread_Dispatch_enable( cpu_self );
> +  return ok ? RTEMS_SUCCESSFUL : RTEMS_INCORRECT_STATE;
>  }
> diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
> index c149a9e..2e5b025 100644
> --- a/cpukit/score/include/rtems/score/schedulerimpl.h
> +++ b/cpukit/score/include/rtems/score/schedulerimpl.h
> @@ -599,25 +599,45 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
>  #endif
>  }
>
> -RTEMS_INLINE_ROUTINE void _Scheduler_Set(
> +RTEMS_INLINE_ROUTINE bool _Scheduler_Set(
>    const Scheduler_Control *scheduler,
>    Thread_Control          *the_thread
>  )
>  {
>  #if defined(RTEMS_SMP)
> -  const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );
> -
> -  if ( current_scheduler != scheduler ) {
> -    _Thread_Set_state( the_thread, STATES_MIGRATING );
> -    _Scheduler_Node_destroy( current_scheduler, the_thread );
> -    the_thread->Scheduler.own_control = scheduler;
> -    the_thread->Scheduler.control = scheduler;
> -    _Scheduler_Node_initialize( scheduler, the_thread );
> -    _Scheduler_Update_priority( the_thread, the_thread->current_priority );
> -    _Thread_Clear_state( the_thread, STATES_MIGRATING );
> +  const Scheduler_Control *current_scheduler;
> +  States_Control           current_state;
> +
> +  current_scheduler = _Scheduler_Get( the_thread );
> +
> +  if ( current_scheduler == scheduler ) {
> +    return true;
> +  }
> +
> +  if ( _Thread_Owns_resources( the_thread ) ) {
> +    return false;
> +  }
> +
> +  current_state = the_thread->current_state;
> +
> +  if ( _States_Is_ready( current_state ) ) {
> +    _Scheduler_Block( the_thread );
>    }
> +
> +  _Scheduler_Node_destroy( current_scheduler, the_thread );
> +  the_thread->Scheduler.own_control = scheduler;
> +  the_thread->Scheduler.control = scheduler;
> +  _Scheduler_Node_initialize( scheduler, the_thread );
> +  _Scheduler_Update_priority( the_thread, the_thread->current_priority );
> +
> +  if ( _States_Is_ready( current_state ) ) {
> +    _Scheduler_Unblock( the_thread );
> +  }
> +
> +  return true;
>  #else
>    (void) scheduler;
> +  return true;
>  #endif
>  }
>
> diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
> index 931eec0..1ea49dd 100644
> --- a/cpukit/score/include/rtems/score/threadimpl.h
> +++ b/cpukit/score/include/rtems/score/threadimpl.h
> @@ -359,6 +359,14 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Thread_State_acquire_for_executing(
>    return executing;
>  }
>
> +RTEMS_INLINE_ROUTINE void _Thread_State_release_critical(
> +  Thread_Control   *the_thread,
> +  ISR_lock_Context *lock_context
> +)
> +{
> +  _Thread_queue_Release_critical( &the_thread->Join_queue, lock_context );
> +}
> +
>  RTEMS_INLINE_ROUTINE void _Thread_State_release(
>    Thread_Control   *the_thread,
>    ISR_lock_Context *lock_context
> diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
> index e6cd401..ec222bd 100644
> --- a/cpukit/score/include/rtems/score/threadqimpl.h
> +++ b/cpukit/score/include/rtems/score/threadqimpl.h
> @@ -108,7 +108,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_do_acquire_critical(
>      _Thread_queue_Queue_do_acquire_critical( queue, lock_context )
>  #endif
>
> -RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release(
> +RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release_critical(
>    Thread_queue_Queue *queue,
>    ISR_lock_Context   *lock_context
>  )
> @@ -118,7 +118,18 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release(
>      &queue->Lock,
>      &lock_context->Lock_context.Stats_context
>    );
> +#else
> +  (void) queue;
> +  (void) lock_context;
>  #endif
> +}
> +
> +RTEMS_INLINE_ROUTINE void _Thread_queue_Queue_release(
> +  Thread_queue_Queue *queue,
> +  ISR_lock_Context   *lock_context
> +)
> +{
> +  _Thread_queue_Queue_release_critical( queue, lock_context );
>    _ISR_lock_ISR_enable( lock_context );
>  }
>
> @@ -159,7 +170,7 @@ RTEMS_INLINE_ROUTINE bool _Thread_queue_Is_lock_owner(
>  }
>  #endif
>
> -RTEMS_INLINE_ROUTINE void _Thread_queue_Release(
> +RTEMS_INLINE_ROUTINE void _Thread_queue_Release_critical(
>    Thread_queue_Control *the_thread_queue,
>    ISR_lock_Context     *lock_context
>  )
> @@ -170,12 +181,21 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Release(
>    the_thread_queue->owner = SMP_LOCK_NO_OWNER;
>  #endif
>  #endif
> -  _Thread_queue_Queue_release(
> +  _Thread_queue_Queue_release_critical(
>      &the_thread_queue->Queue,
>      lock_context
>    );
>  }
>
> +RTEMS_INLINE_ROUTINE void _Thread_queue_Release(
> +  Thread_queue_Control *the_thread_queue,
> +  ISR_lock_Context     *lock_context
> +)
> +{
> +  _Thread_queue_Release_critical( the_thread_queue, lock_context );
> +  _ISR_lock_ISR_enable( lock_context );
> +}
> +
>  Thread_Control *_Thread_queue_Do_dequeue(
>    Thread_queue_Control          *the_thread_queue,
>    const Thread_queue_Operations *operations
> --
> 1.8.4.5
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel



More information about the devel mailing list