[PATCH 10/12] smp: Optimize Simple SMP scheduler

Gedare Bloom gedare at rtems.org
Wed Aug 14 15:41:49 UTC 2013


On Tue, Aug 13, 2013 at 9:42 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> Add Thread_Control::is_in_the_air field if configured for SMP.  This
> helps to simplify the extract operation and avoids superfluous
> inter-processor interrupts.  Move the processor allocation step into the
> enqueue operation.
>
> Add and use _Scheduler_simple_smp_Get_highest_ready().  Add and use
> _Scheduler_SMP_Get_lowest_scheduled().
> ---
>  .../score/include/rtems/score/schedulersimplesmp.h |    4 +-
>  .../score/include/rtems/score/schedulersmpimpl.h   |   14 ++
>  cpukit/score/include/rtems/score/thread.h          |   10 ++
>  cpukit/score/src/schedulersimplesmp.c              |  135 +++++++++++++++-----
>  cpukit/score/src/threadinitialize.c                |    1 +
>  .../smptests/smpmigration01/smpmigration01.scn     |   24 ++--
>  6 files changed, 143 insertions(+), 45 deletions(-)
>
> diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
> index f62068a..7d29dda 100644
> --- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
> +++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
> @@ -57,7 +57,7 @@ extern "C" {
>      _Scheduler_simple_smp_Initialize, \
>      _Scheduler_simple_smp_Schedule, \
>      _Scheduler_simple_smp_Yield, \
> -    _Scheduler_simple_smp_Extract, \
> +    _Scheduler_simple_smp_Block, \
>      _Scheduler_simple_smp_Enqueue_priority_fifo, \
>      _Scheduler_default_Allocate, \
>      _Scheduler_default_Free, \
> @@ -73,6 +73,8 @@ extern "C" {
>
>  void _Scheduler_simple_smp_Initialize( void );
>
> +void _Scheduler_simple_smp_Block( Thread_Control *thread );
> +
>  void _Scheduler_simple_smp_Enqueue_priority_fifo( Thread_Control *thread );
>
>  void _Scheduler_simple_smp_Enqueue_priority_lifo( Thread_Control *thread );
> diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
> index d23b0c0..40e94cb 100644
> --- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
> +++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
> @@ -83,6 +83,20 @@ static inline void _Scheduler_SMP_Allocate_processor(
>    }
>  }
>
> +static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
> +  Scheduler_SMP_Control *self
> +)
> +{
> +  Thread_Control *lowest_ready = NULL;
> +  Chain_Control *scheduled = &self->scheduled;
> +
> +  if ( !_Chain_Is_empty( scheduled ) ) {
> +    lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
> +  }
> +
> +  return lowest_ready;
> +}
> +
>  /** @} */
>
>  #ifdef __cplusplus
> diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
> index d346ead..984bc0e 100644
> --- a/cpukit/score/include/rtems/score/thread.h
> +++ b/cpukit/score/include/rtems/score/thread.h
> @@ -378,6 +378,16 @@ struct Thread_Control_struct {
>    bool                                  is_scheduled;
>
>    /**
> +   * @brief This field is true if the thread is in the air.
> +   *
> +   * A thread is in the air if it is in a transient state.  The extract
> +   * operation on a scheduled thread will produce threads in the air.  Such
> +   * threads are no longer on the scheduled chain, but are still allocated to a
> +   * processor.  The next enqueue or schedule operation will decide what to do.
> +   */
> +  bool                                  is_in_the_air;
> +
At first I did not like this term "in the air" but I cannot think of a
better term right now.

> +  /**
>     * @brief This field is true if the thread is executing.
>     *
>     * A thread is executing if it executes on a processor.  An executing thread
> diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
> index fa128f8..f9fc2cc 100644
> --- a/cpukit/score/src/schedulersimplesmp.c
> +++ b/cpukit/score/src/schedulersimplesmp.c
> @@ -34,22 +34,42 @@ void _Scheduler_simple_smp_Initialize( void )
>    _Scheduler.information = self;
>  }
>
> +static Thread_Control *_Scheduler_simple_smp_Get_highest_ready(
> +  Scheduler_SMP_Control *self
> +)
> +{
> +  Thread_Control *highest_ready = NULL;
> +  Chain_Control *ready = &self->ready[ 0 ];
> +
> +  if ( !_Chain_Is_empty( ready ) ) {
> +    highest_ready = (Thread_Control *) _Chain_First( ready );
> +  }
> +
> +  return highest_ready;
> +}
> +
>  static void _Scheduler_simple_smp_Move_from_scheduled_to_ready(
> -  Chain_Control *ready_chain,
> +  Scheduler_SMP_Control *self,
>    Thread_Control *scheduled_to_ready
>  )
>  {
>    _Chain_Extract_unprotected( &scheduled_to_ready->Object.Node );
> -  _Scheduler_simple_Insert_priority_lifo( ready_chain, scheduled_to_ready );
> +  _Scheduler_simple_Insert_priority_lifo(
> +    &self->ready[ 0 ],
> +    scheduled_to_ready
> +  );
>  }
>
>  static void _Scheduler_simple_smp_Move_from_ready_to_scheduled(
> -  Chain_Control *scheduled_chain,
> +  Scheduler_SMP_Control *self,
>    Thread_Control *ready_to_scheduled
>  )
>  {
>    _Chain_Extract_unprotected( &ready_to_scheduled->Object.Node );
> -  _Scheduler_simple_Insert_priority_fifo( scheduled_chain, ready_to_scheduled );
> +  _Scheduler_simple_Insert_priority_fifo(
> +    &self->scheduled,
> +    ready_to_scheduled
> +  );
>  }
>
>  static void _Scheduler_simple_smp_Insert(
> @@ -61,6 +81,19 @@ static void _Scheduler_simple_smp_Insert(
>    _Chain_Insert_ordered_unprotected( chain, &thread->Object.Node, order );
>  }
>
> +static void _Scheduler_simple_smp_Schedule_highest_ready(
> +  Scheduler_SMP_Control *self,
> +  Thread_Control *victim
> +)
> +{
> +  Thread_Control *highest_ready =
> +    (Thread_Control *) _Chain_First( &self->ready[ 0 ] );
> +
> +  _Scheduler_SMP_Allocate_processor( highest_ready, victim );
> +
> +  _Scheduler_simple_smp_Move_from_ready_to_scheduled( self, highest_ready );
> +}
> +
>  static void _Scheduler_simple_smp_Enqueue_ordered(
>    Thread_Control *thread,
>    Chain_Node_order order
> @@ -68,24 +101,68 @@ static void _Scheduler_simple_smp_Enqueue_ordered(
>  {
>    Scheduler_SMP_Control *self = _Scheduler_SMP_Instance();
>
> -  /*
> -   * The scheduled chain has exactly processor count nodes after
> -   * initialization, thus the lowest priority scheduled thread exists.
> -   */
> -  Thread_Control *lowest_scheduled =
> -    (Thread_Control *) _Chain_Last( &self->scheduled );
> +  if ( thread->is_in_the_air ) {
> +    Thread_Control *highest_ready =
> +      _Scheduler_simple_smp_Get_highest_ready( self );
> +
> +    thread->is_in_the_air = false;
> +
> +    /*
> +     * The thread has been extracted from the scheduled chain.  We have to
> +     * place it now on the scheduled or ready chain.
> +     *
> +     * XXX: Do not exchange parameters to do the negation of the order check.
> +     */
What does it mean here to use XXX?

> +    if (
> +      highest_ready != NULL
> +        && !( *order )( &thread->Object.Node, &highest_ready->Object.Node )
> +    ) {
> +      _Scheduler_SMP_Allocate_processor( highest_ready, thread );
>
> -  if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
> -    _Scheduler_SMP_Allocate_processor( thread, lowest_scheduled );
> +      _Scheduler_simple_smp_Insert( &self->ready[ 0 ], thread, order );
>
> -    _Scheduler_simple_smp_Insert( &self->scheduled, thread, order );
> +      _Scheduler_simple_smp_Move_from_ready_to_scheduled(
> +        self,
> +        highest_ready
> +      );
> +    } else {
> +      thread->is_scheduled = true;
>
> -    _Scheduler_simple_smp_Move_from_scheduled_to_ready(
> -      &self->ready[ 0 ],
> -      lowest_scheduled
> -    );
> +      _Scheduler_simple_smp_Insert( &self->scheduled, thread, order );
> +    }
>    } else {
> -    _Scheduler_simple_smp_Insert( &self->ready[ 0 ], thread, order );
> +    Thread_Control *lowest_scheduled = _Scheduler_SMP_Get_lowest_scheduled( self );
> +
> +    /*
> +     * The scheduled chain is empty if nested interrupts change the priority of
> +     * all scheduled threads.  These threads are in the air.
> +     */
> +    if (
> +      lowest_scheduled != NULL
> +        && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node )
> +    ) {
> +      _Scheduler_SMP_Allocate_processor( thread, lowest_scheduled );
> +
> +      _Scheduler_simple_smp_Insert( &self->scheduled, thread, order );
> +
> +      _Scheduler_simple_smp_Move_from_scheduled_to_ready(
> +        self,
> +        lowest_scheduled
> +      );
> +    } else {
> +      _Scheduler_simple_smp_Insert( &self->ready[ 0 ], thread, order );
> +    }
> +  }
> +}
> +
> +void _Scheduler_simple_smp_Block( Thread_Control *thread )
> +{
> +  _Chain_Extract_unprotected( &thread->Object.Node );
> +
> +  if ( thread->is_scheduled ) {
> +    Scheduler_SMP_Control *self = _Scheduler_SMP_Instance();
> +
> +    _Scheduler_simple_smp_Schedule_highest_ready( self, thread );
>    }
>  }
>
> @@ -107,21 +184,9 @@ void _Scheduler_simple_smp_Enqueue_priority_fifo( Thread_Control *thread )
>
>  void _Scheduler_simple_smp_Extract( Thread_Control *thread )
>  {
> -  Scheduler_SMP_Control *self = _Scheduler_SMP_Instance();
> +  thread->is_in_the_air = true;
>
>    _Chain_Extract_unprotected( &thread->Object.Node );
> -
> -  if ( thread->is_scheduled ) {
> -    Thread_Control *highest_ready =
> -      (Thread_Control *) _Chain_First( &self->ready[ 0 ] );
> -
> -    _Scheduler_SMP_Allocate_processor( highest_ready, thread );
> -
> -    _Scheduler_simple_smp_Move_from_ready_to_scheduled(
> -      &self->scheduled,
> -      highest_ready
> -    );
> -  }
>  }
>
>  void _Scheduler_simple_smp_Yield( Thread_Control *thread )
> @@ -138,5 +203,11 @@ void _Scheduler_simple_smp_Yield( Thread_Control *thread )
>
>  void _Scheduler_simple_smp_Schedule( Thread_Control *thread )
>  {
> -  ( void ) thread;
> +  if ( thread->is_in_the_air ) {
> +    Scheduler_SMP_Control *self = _Scheduler_SMP_Instance();
> +
> +    thread->is_in_the_air = false;
> +
> +    _Scheduler_simple_smp_Schedule_highest_ready( self, thread );
> +  }
>  }
> diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
> index a876147..34198ca 100644
> --- a/cpukit/score/src/threadinitialize.c
> +++ b/cpukit/score/src/threadinitialize.c
> @@ -180,6 +180,7 @@ bool _Thread_Initialize(
>
>  #if defined(RTEMS_SMP)
>    the_thread->is_scheduled            = false;
> +  the_thread->is_in_the_air           = false;
>    the_thread->is_executing            = false;
>
>    /* Initialize the cpu field for the non-SMP schedulers */
> diff --git a/testsuites/smptests/smpmigration01/smpmigration01.scn b/testsuites/smptests/smpmigration01/smpmigration01.scn
> index dc3bae2..c5a2106 100644
> --- a/testsuites/smptests/smpmigration01/smpmigration01.scn
> +++ b/testsuites/smptests/smpmigration01/smpmigration01.scn
> @@ -1,17 +1,17 @@
>  *** TEST SMPMIGRATION 1 ***
>  runner 0
> -        cpu 0 tokens 411501
> -        cpu 0 cycles 9464534
> -        cpu 1 tokens 411501
> -        cpu 1 cycles 9464802
> +        cpu 0 tokens 530399
> +        cpu 0 cycles 10077490
> +        cpu 1 tokens 530399
> +        cpu 1 cycles 10071429
>  runner 1
> -        cpu 0 tokens 411500
> -        cpu 0 cycles 41936630
> -        cpu 1 tokens 411501
> -        cpu 1 cycles 42009945
> +        cpu 0 tokens 530399
> +        cpu 0 cycles 5978212
> +        cpu 1 tokens 530399
> +        cpu 1 cycles 7951897
>  runner 2
> -        cpu 0 tokens 411501
> -        cpu 0 cycles 6583983
> -        cpu 1 tokens 411500
> -        cpu 1 cycles 6583701
> +        cpu 0 tokens 530399
> +        cpu 0 cycles 10070929
> +        cpu 1 tokens 530398
> +        cpu 1 cycles 10106437
>  *** END OF TEST SMPMIGRATION 1 ***
> --
> 1.7.7
>
> _______________________________________________
> rtems-devel mailing list
> rtems-devel at rtems.org
> http://www.rtems.org/mailman/listinfo/rtems-devel



More information about the devel mailing list