[PATCH 10/10] rtems: Add scheduler processor add/remove
Gedare Bloom
gedare at rtems.org
Wed Nov 9 20:45:30 UTC 2016
No complaints from me on this set.
On Tue, Nov 8, 2016 at 3:59 AM, Sebastian Huber
<sebastian.huber at embedded-brains.de> wrote:
> Update #2797.
> ---
> cpukit/rtems/Makefile.am | 2 +
> cpukit/rtems/include/rtems/rtems/tasks.h | 45 ++++
> cpukit/rtems/src/scheduleraddprocessor.c | 125 +++++++++
> cpukit/rtems/src/schedulerremoveprocessor.c | 145 ++++++++++
> cpukit/score/include/rtems/score/percpu.h | 6 +
> cpukit/score/include/rtems/score/scheduler.h | 28 +-
> .../rtems/score/schedulerpriorityaffinitysmp.h | 12 +
> .../include/rtems/score/schedulerprioritysmp.h | 12 +
> .../include/rtems/score/schedulerprioritysmpimpl.h | 8 +
> .../score/include/rtems/score/schedulersimplesmp.h | 12 +
> .../score/include/rtems/score/schedulersmpimpl.h | 92 +++++++
> .../score/include/rtems/score/schedulerstrongapa.h | 12 +
> cpukit/score/src/schedulerpriorityaffinitysmp.c | 30 +++
> cpukit/score/src/schedulerprioritysmp.c | 30 +++
> cpukit/score/src/schedulersimplesmp.c | 38 +++
> cpukit/score/src/schedulersmpstartidle.c | 2 +-
> cpukit/score/src/schedulerstrongapa.c | 38 +++
> testsuites/smptests/Makefile.am | 1 +
> testsuites/smptests/configure.ac | 1 +
> testsuites/smptests/smpscheduler02/init.c | 130 +++++++++
> testsuites/smptests/smpscheduler04/Makefile.am | 19 ++
> testsuites/smptests/smpscheduler04/init.c | 298 +++++++++++++++++++++
> .../smptests/smpscheduler04/smpscheduler04.doc | 14 +
> .../smptests/smpscheduler04/smpscheduler04.scn | 4 +
> testsuites/sptests/spscheduler01/init.c | 28 ++
> 25 files changed, 1130 insertions(+), 2 deletions(-)
> create mode 100644 cpukit/rtems/src/scheduleraddprocessor.c
> create mode 100644 cpukit/rtems/src/schedulerremoveprocessor.c
> create mode 100644 testsuites/smptests/smpscheduler04/Makefile.am
> create mode 100644 testsuites/smptests/smpscheduler04/init.c
> create mode 100644 testsuites/smptests/smpscheduler04/smpscheduler04.doc
> create mode 100644 testsuites/smptests/smpscheduler04/smpscheduler04.scn
>
> diff --git a/cpukit/rtems/Makefile.am b/cpukit/rtems/Makefile.am
> index 6ecff9e..ada1f83 100644
> --- a/cpukit/rtems/Makefile.am
> +++ b/cpukit/rtems/Makefile.am
> @@ -105,8 +105,10 @@ librtems_a_SOURCES += src/taskstart.c
> librtems_a_SOURCES += src/tasksuspend.c
> librtems_a_SOURCES += src/taskwakeafter.c
> librtems_a_SOURCES += src/taskwakewhen.c
> +librtems_a_SOURCES += src/scheduleraddprocessor.c
> librtems_a_SOURCES += src/schedulergetprocessorset.c
> librtems_a_SOURCES += src/schedulerident.c
> +librtems_a_SOURCES += src/schedulerremoveprocessor.c
>
> ## RATEMON_C_FILES
> librtems_a_SOURCES += src/ratemon.c
> diff --git a/cpukit/rtems/include/rtems/rtems/tasks.h b/cpukit/rtems/include/rtems/rtems/tasks.h
> index 3a94e34..fcbf5ed 100644
> --- a/cpukit/rtems/include/rtems/rtems/tasks.h
> +++ b/cpukit/rtems/include/rtems/rtems/tasks.h
> @@ -585,6 +585,51 @@ rtems_status_code rtems_scheduler_get_processor_set(
> );
> #endif
>
> +/**
> + * @brief Adds a processor the set of processors owned by the scheduler.
> + *
> + * Must be called from task context. This operation obtains and releases the
> + * objects allocator lock.
> + *
> + * @param[in] scheduler_id Identifier of the scheduler.
> + * @param[in] cpu_index Index of the processor to add.
> + *
> + * @retval RTEMS_SUCCESSFUL Successful operation.
> + * @retval RTEMS_INVALID_ID Invalid scheduler identifier.
> + * @retval RTEMS_NOT_CONFIGURED The processor is not configured to be used by
> + * the application.
> + * @retval RTEMS_INCORRECT_STATE The processor is configured to be used by
> + * the application, however, it is not available.
> + * @retval RTEMS_RESOURCE_IN_USE The processor is already assigned to a
> + * scheduler instance.
> + */
> +rtems_status_code rtems_scheduler_add_processor(
> + rtems_id scheduler_id,
> + uint32_t cpu_index
> +);
> +
> +/**
> + * @brief Removes a processor from set of processors owned by the scheduler.
> + *
> + * Must be called from task context. This operation obtains and releases the
> + * objects allocator lock. Removing a processor from a scheduler is a complex
> + * operation that involves all tasks in the system.
> + *
> + * @param[in] scheduler_id Identifier of the scheduler.
> + * @param[in] cpu_index Index of the processor to add.
> + *
> + * @retval RTEMS_SUCCESSFUL Successful operation.
> + * @retval RTEMS_INVALID_ID Invalid scheduler identifier.
> + * @retval RTEMS_INVALID_NUMBER The processor is not owned by the scheduler.
> + * @retval RTEMS_RESOURCE_IN_USE The set of processors owned by the scheduler
> + * would be empty after the processor removal and there exists a non-idle
> + * task that uses this scheduler as its home scheduler.
> + */
> +rtems_status_code rtems_scheduler_remove_processor(
> + rtems_id scheduler_id,
> + uint32_t cpu_index
> +);
> +
> /**@}*/
>
> /**
> diff --git a/cpukit/rtems/src/scheduleraddprocessor.c b/cpukit/rtems/src/scheduleraddprocessor.c
> new file mode 100644
> index 0000000..c9f7a18
> --- /dev/null
> +++ b/cpukit/rtems/src/scheduleraddprocessor.c
> @@ -0,0 +1,125 @@
> +/*
> + * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
> + *
> + * embedded brains GmbH
> + * Dornierstr. 4
> + * 82178 Puchheim
> + * Germany
> + * <rtems at embedded-brains.de>
> + *
> + * The license and distribution terms for this file may be
> + * found in the file LICENSE in this distribution or at
> + * http://www.rtems.org/license/LICENSE.
> + */
> +
> +#if HAVE_CONFIG_H
> +#include "config.h"
> +#endif
> +
> +#include <rtems/rtems/tasks.h>
> +#include <rtems/score/assert.h>
> +#include <rtems/score/schedulerimpl.h>
> +#include <rtems/config.h>
> +
> +rtems_status_code rtems_scheduler_add_processor(
> + rtems_id scheduler_id,
> + uint32_t cpu_index
> +)
> +{
> + uint32_t scheduler_index;
> +#if defined(RTEMS_SMP)
> + Per_CPU_Control *cpu;
> + rtems_status_code status;
> +#endif
> +
> + scheduler_index = _Scheduler_Get_index_by_id( scheduler_id );
> +
> + if ( scheduler_index >= _Scheduler_Count ) {
> + return RTEMS_INVALID_ID;
> + }
> +
> + if ( cpu_index > rtems_configuration_get_maximum_processors() ) {
> + return RTEMS_NOT_CONFIGURED;
> + }
> +
> +#if defined(RTEMS_SMP)
> + cpu = _Per_CPU_Get_by_index( cpu_index );
> +
> + if ( _Scheduler_Initial_assignments[ cpu_index ].scheduler == NULL ) {
> + return RTEMS_NOT_CONFIGURED;
> + }
> +
> + if ( !_Per_CPU_Is_processor_online( cpu ) ) {
> + return RTEMS_INCORRECT_STATE;
> + }
> +
> + _Objects_Allocator_lock();
> +
> + if ( cpu->Scheduler.control == NULL ) {
> + const Scheduler_Control *scheduler;
> + Scheduler_Context *scheduler_context;
> + Priority_Control idle_priority;
> + Thread_Control *idle;
> + Scheduler_Node *scheduler_node;
> + ISR_lock_Context lock_context;
> + Thread_queue_Context queue_context;
> + Per_CPU_Control *cpu_self;
> +
> + scheduler = &_Scheduler_Table[ scheduler_index ];
> + scheduler_context = _Scheduler_Get_context( scheduler );
> + idle_priority =
> + _Scheduler_Map_priority( scheduler, scheduler->maximum_priority );
> +
> + idle = cpu->Scheduler.idle_if_online_and_unused;
> + _Assert( idle != NULL );
> + cpu->Scheduler.idle_if_online_and_unused = NULL;
> +
> + idle->Scheduler.home = scheduler;
> + idle->Start.initial_priority = idle_priority;
> + scheduler_node =
> + _Thread_Scheduler_get_node_by_index( idle, scheduler_index );
> + _Scheduler_Node_initialize(
> + scheduler,
> + scheduler_node,
> + idle,
> + idle_priority
> + );
> + _Priority_Node_set_priority( &idle->Real_priority, idle_priority );
> + _Priority_Initialize_one(
> + &scheduler_node->Wait.Priority,
> + &idle->Real_priority
> + );
> + _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) );
> + _Chain_Initialize_one(
> + &idle->Scheduler.Wait_nodes,
> + &scheduler_node->Thread.Wait_node
> + );
> + _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) );
> + _Chain_Initialize_one(
> + &idle->Scheduler.Scheduler_nodes,
> + &scheduler_node->Thread.Scheduler_node.Chain
> + );
> +
> + _ISR_lock_ISR_disable( &lock_context );
> + _Scheduler_Acquire_critical( scheduler, &lock_context );
> + ++scheduler_context->processor_count;
> + cpu->Scheduler.control = scheduler;
> + cpu->Scheduler.context = scheduler_context;
> + ( *scheduler->Operations.add_processor )( scheduler, idle );
> + cpu_self = _Thread_Dispatch_disable_critical(
> + &queue_context.Lock_context.Lock_context
> + );
> + _Scheduler_Release_critical( scheduler, &lock_context );
> + _ISR_lock_ISR_enable( &lock_context );
> + _Thread_Dispatch_enable( cpu_self );
> + status = RTEMS_SUCCESSFUL;
> + } else {
> + status = RTEMS_RESOURCE_IN_USE;
> + }
> +
> + _Objects_Allocator_unlock();
> + return status;
> +#else
> + return RTEMS_RESOURCE_IN_USE;
> +#endif
> +}
> diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c
> new file mode 100644
> index 0000000..82e27b5
> --- /dev/null
> +++ b/cpukit/rtems/src/schedulerremoveprocessor.c
> @@ -0,0 +1,145 @@
> +/*
> + * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
> + *
> + * embedded brains GmbH
> + * Dornierstr. 4
> + * 82178 Puchheim
> + * Germany
> + * <rtems at embedded-brains.de>
> + *
> + * The license and distribution terms for this file may be
> + * found in the file LICENSE in this distribution or at
> + * http://www.rtems.org/license/LICENSE.
> + */
> +
> +#if HAVE_CONFIG_H
> +#include "config.h"
> +#endif
> +
> +#include <rtems/rtems/tasks.h>
> +#include <rtems/score/schedulerimpl.h>
> +#include <rtems/config.h>
> +
> +#if defined(RTEMS_SMP)
> +typedef struct {
> + const Scheduler_Control *scheduler;
> + rtems_status_code status;
> +} Scheduler_Processor_removal_context;
> +
> +static bool _Scheduler_Check_processor_removal(
> + Thread_Control *the_thread,
> + void *arg
> +)
> +{
> + Scheduler_Processor_removal_context *iter_context;
> + Thread_queue_Context queue_context;
> + ISR_lock_Context state_context;
> +
> + if ( the_thread->is_idle ) {
> + return false;
> + }
> +
> + iter_context = arg;
> +
> + _Thread_Wait_acquire( the_thread, &queue_context );
> + _Thread_State_acquire_critical( the_thread, &state_context );
> +
> + if ( _Thread_Scheduler_get_home( the_thread ) == iter_context->scheduler ) {
> + iter_context->status = RTEMS_RESOURCE_IN_USE;
> + }
> +
> + _Thread_State_release_critical( the_thread, &state_context );
> + _Thread_Wait_release( the_thread, &queue_context );
> + return iter_context->status != RTEMS_SUCCESSFUL;
> +}
> +#endif
> +
> +rtems_status_code rtems_scheduler_remove_processor(
> + rtems_id scheduler_id,
> + uint32_t cpu_index
> +)
> +{
> + const Scheduler_Control *scheduler;
> +#if defined(RTEMS_SMP)
> + uint32_t processor_count;
> + Scheduler_Processor_removal_context iter_context;
> + ISR_lock_Context lock_context;
> + Scheduler_Context *scheduler_context;
> + Per_CPU_Control *cpu;
> + Per_CPU_Control *cpu_self;
> +#endif
> +
> + scheduler = _Scheduler_Get_by_id( scheduler_id );
> + if ( scheduler == NULL ) {
> + return RTEMS_INVALID_ID;
> + }
> +
> + if ( cpu_index > rtems_configuration_get_maximum_processors() ) {
> + return RTEMS_INVALID_NUMBER;
> + }
> +
> +#if defined(RTEMS_SMP)
> + iter_context.scheduler = scheduler;
> + iter_context.status = RTEMS_SUCCESSFUL;
> + scheduler_context = _Scheduler_Get_context( scheduler );
> + cpu = _Per_CPU_Get_by_index( cpu_index );
> +
> + _Objects_Allocator_lock();
> +
> + if ( cpu->Scheduler.control != scheduler ) {
> + _Objects_Allocator_unlock();
> + return RTEMS_INVALID_NUMBER;
> + }
> +
> + /*
> + * This prevents the selection of this scheduler instance by new threads in
> + * case the processor count changes to zero.
> + */
> + _ISR_lock_ISR_disable( &lock_context );
> + _Scheduler_Acquire_critical( scheduler, &lock_context );
> + processor_count = scheduler_context->processor_count - 1;
> + scheduler_context->processor_count = processor_count;
> + _Scheduler_Release_critical( scheduler, &lock_context );
> + _ISR_lock_ISR_enable( &lock_context );
> +
> + if ( processor_count == 0 ) {
> + _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context );
> + }
> +
> + _ISR_lock_ISR_disable( &lock_context );
> + _Scheduler_Acquire_critical( scheduler, &lock_context );
> +
> + if ( iter_context.status == RTEMS_SUCCESSFUL ) {
> + Thread_Control *idle;
> + Scheduler_Node *scheduler_node;
> +
> + cpu->Scheduler.control = NULL;
> + cpu->Scheduler.context = NULL;
> + idle = ( *scheduler->Operations.remove_processor )( scheduler, cpu );
> + cpu->Scheduler.idle_if_online_and_unused = idle;
> +
> + scheduler_node = _Thread_Scheduler_get_home_node( idle );
> + _Priority_Plain_extract(
> + &scheduler_node->Wait.Priority,
> + &idle->Real_priority
> + );
> + _Assert( _Priority_Is_empty( &scheduler_node->Wait.Priority ) );
> + _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
> + _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) );
> + _Chain_Extract_unprotected( &scheduler_node->Thread.Scheduler_node.Chain );
> + _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) );
> + _Scheduler_Node_destroy( scheduler, scheduler_node );
> + } else {
> + ++scheduler_context->processor_count;
> + }
> +
> + cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
> + _Scheduler_Release_critical( scheduler, &lock_context );
> + _ISR_lock_ISR_enable( &lock_context );
> + _Thread_Dispatch_enable( cpu_self );
> + _Objects_Allocator_unlock();
> + return iter_context.status;
> +#else
> + return RTEMS_RESOURCE_IN_USE;
> +#endif
> +}
> diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
> index 94aef1d..ae789b8 100644
> --- a/cpukit/score/include/rtems/score/percpu.h
> +++ b/cpukit/score/include/rtems/score/percpu.h
> @@ -426,6 +426,12 @@ typedef struct Per_CPU_Control {
> * scheduler instance.
> */
> const struct Scheduler_Context *context;
> +
> + /**
> + * @brief The idle thread for this processor in case it is online and
> + * currently not used by a scheduler instance.
> + */
> + struct _Thread_Control *idle_if_online_and_unused;
> } Scheduler;
>
> /**
> diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
> index 2e2f5f4..7d461f8 100644
> --- a/cpukit/score/include/rtems/score/scheduler.h
> +++ b/cpukit/score/include/rtems/score/scheduler.h
> @@ -148,6 +148,30 @@ typedef struct {
> Scheduler_Node *node,
> Thread_Scheduler_state next_state
> );
> +
> + /**
> + * @brief Add processor operation.
> + *
> + * @param[in] scheduler The scheduler instance to add the processor.
> + * @param[in] idle The idle thread of the processor to add.
> + */
> + void ( *add_processor )(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> + );
> +
> + /**
> + * @brief Remove processor operation.
> + *
> + * @param[in] scheduler The scheduler instance to remove the processor.
> + * @param[in] cpu The processor to remove.
> + *
> + * @return The idle thread of the removed processor.
> + */
> + Thread_Control *( *remove_processor )(
> + const Scheduler_Control *scheduler,
> + struct Per_CPU_Control *cpu
> + );
> #endif
>
> /** @see _Scheduler_Node_initialize() */
> @@ -392,7 +416,9 @@ Priority_Control _Scheduler_default_Map_priority(
> #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP \
> _Scheduler_default_Ask_for_help, \
> _Scheduler_default_Reconsider_help_request, \
> - _Scheduler_default_Withdraw_node,
> + _Scheduler_default_Withdraw_node, \
> + NULL, \
> + NULL,
> #else
> #define SCHEDULER_OPERATION_DEFAULT_ASK_FOR_HELP
> #endif
> diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
> index 4c5b8bb..d1275bc 100644
> --- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
> +++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
> @@ -60,6 +60,8 @@ extern "C" {
> _Scheduler_priority_affinity_SMP_Ask_for_help, \
> _Scheduler_priority_affinity_SMP_Reconsider_help_request, \
> _Scheduler_priority_affinity_SMP_Withdraw_node, \
> + _Scheduler_priority_affinity_SMP_Add_processor, \
> + _Scheduler_priority_affinity_SMP_Remove_processor, \
> _Scheduler_priority_affinity_SMP_Node_initialize, \
> _Scheduler_default_Node_destroy, \
> _Scheduler_default_Release_job, \
> @@ -143,6 +145,16 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
> Thread_Scheduler_state next_state
> );
>
> +void _Scheduler_priority_affinity_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +);
> +
> +Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + struct Per_CPU_Control *cpu
> +);
> +
> /**
> * @brief Set affinity for the priority affinity SMP scheduler.
> *
> diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmp.h b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
> index b5fdec4..75cc9b6 100644
> --- a/cpukit/score/include/rtems/score/schedulerprioritysmp.h
> +++ b/cpukit/score/include/rtems/score/schedulerprioritysmp.h
> @@ -89,6 +89,8 @@ typedef struct {
> _Scheduler_priority_SMP_Ask_for_help, \
> _Scheduler_priority_SMP_Reconsider_help_request, \
> _Scheduler_priority_SMP_Withdraw_node, \
> + _Scheduler_priority_SMP_Add_processor, \
> + _Scheduler_priority_SMP_Remove_processor, \
> _Scheduler_priority_SMP_Node_initialize, \
> _Scheduler_default_Node_destroy, \
> _Scheduler_default_Release_job, \
> @@ -144,6 +146,16 @@ void _Scheduler_priority_SMP_Withdraw_node(
> Thread_Scheduler_state next_state
> );
>
> +void _Scheduler_priority_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +);
> +
> +Thread_Control *_Scheduler_priority_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + struct Per_CPU_Control *cpu
> +);
> +
> bool _Scheduler_priority_SMP_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *thread,
> diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
> index 4fe4d29..5136565 100644
> --- a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
> +++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
> @@ -57,6 +57,14 @@ _Scheduler_priority_SMP_Node_downcast( Scheduler_Node *node )
> return (Scheduler_priority_SMP_Node *) node;
> }
>
> +static inline bool _Scheduler_priority_SMP_Has_ready( Scheduler_Context *context )
> +{
> + Scheduler_priority_SMP_Context *self =
> + _Scheduler_priority_SMP_Get_self( context );
> +
> + return !_Priority_bit_map_Is_empty( &self->Bit_map );
> +}
> +
> static inline void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
> Scheduler_Context *context,
> Scheduler_Node *scheduled_to_ready
> diff --git a/cpukit/score/include/rtems/score/schedulersimplesmp.h b/cpukit/score/include/rtems/score/schedulersimplesmp.h
> index a242325..0cf3877 100644
> --- a/cpukit/score/include/rtems/score/schedulersimplesmp.h
> +++ b/cpukit/score/include/rtems/score/schedulersimplesmp.h
> @@ -72,6 +72,8 @@ typedef struct {
> _Scheduler_simple_SMP_Ask_for_help, \
> _Scheduler_simple_SMP_Reconsider_help_request, \
> _Scheduler_simple_SMP_Withdraw_node, \
> + _Scheduler_simple_SMP_Add_processor, \
> + _Scheduler_simple_SMP_Remove_processor, \
> _Scheduler_simple_SMP_Node_initialize, \
> _Scheduler_default_Node_destroy, \
> _Scheduler_default_Release_job, \
> @@ -127,6 +129,16 @@ void _Scheduler_simple_SMP_Withdraw_node(
> Thread_Scheduler_state next_state
> );
>
> +void _Scheduler_simple_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +);
> +
> +Thread_Control *_Scheduler_simple_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + struct Per_CPU_Control *cpu
> +);
> +
> bool _Scheduler_simple_SMP_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *thread,
> diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
> index ece075a..95a9eae 100644
> --- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
> +++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
> @@ -275,6 +275,10 @@ extern "C" {
> * @{
> */
>
> +typedef bool ( *Scheduler_SMP_Has_ready )(
> + Scheduler_Context *context
> +);
> +
> typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
> Scheduler_Context *context,
> Scheduler_Node *node
> @@ -469,6 +473,13 @@ static inline void _Scheduler_SMP_Release_idle_thread(
> _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
> }
>
> +static inline void _Scheduler_SMP_Exctract_idle_thread(
> + Thread_Control *idle
> +)
> +{
> + _Chain_Extract_unprotected( &idle->Object.Node );
> +}
> +
> static inline void _Scheduler_SMP_Allocate_processor_lazy(
> Scheduler_Context *context,
> Thread_Control *scheduled_thread,
> @@ -1271,6 +1282,87 @@ static inline void _Scheduler_SMP_Withdraw_node(
> }
> }
>
> +static inline void _Scheduler_SMP_Add_processor(
> + Scheduler_Context *context,
> + Thread_Control *idle,
> + Scheduler_SMP_Has_ready has_ready,
> + Scheduler_SMP_Enqueue enqueue_scheduled_fifo
> +)
> +{
> + Scheduler_SMP_Context *self;
> + Scheduler_Node *node;
> +
> + self = _Scheduler_SMP_Get_self( context );
> + idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
> + _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
> + node = _Thread_Scheduler_get_home_node( idle );
> + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
> +
> + if ( ( *has_ready )( &self->Base ) ) {
> + ( *enqueue_scheduled_fifo )( &self->Base, node );
> + } else {
> + _Chain_Append_unprotected( &self->Scheduled, &node->Node );
> + }
> +}
> +
> +static inline Thread_Control *_Scheduler_SMP_Remove_processor(
> + Scheduler_Context *context,
> + Per_CPU_Control *cpu,
> + Scheduler_SMP_Extract extract_from_ready,
> + Scheduler_SMP_Enqueue enqueue_fifo
> +)
> +{
> + Scheduler_SMP_Context *self;
> + Chain_Node *chain_node;
> + Scheduler_Node *victim_node;
> + Thread_Control *victim_user;
> + Thread_Control *victim_owner;
> + Thread_Control *idle;
> +
> + self = _Scheduler_SMP_Get_self( context );
> + chain_node = _Chain_First( &self->Scheduled );
> +
> + do {
> + _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
> + victim_node = (Scheduler_Node *) chain_node;
> + victim_user = _Scheduler_Node_get_user( victim_node );
> + chain_node = _Chain_Next( chain_node );
> + } while ( _Thread_Get_CPU( victim_user ) != cpu );
> +
> + _Scheduler_SMP_Extract_from_scheduled( victim_node );
> + victim_owner = _Scheduler_Node_get_owner( victim_node );
> +
> + if ( !victim_owner->is_idle ) {
> + Scheduler_Node *idle_node;
> +
> + _Scheduler_Release_idle_thread(
> + &self->Base,
> + victim_node,
> + _Scheduler_SMP_Release_idle_thread
> + );
> + idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
> + idle_node = _Thread_Scheduler_get_home_node( idle );
> + ( *extract_from_ready )( &self->Base, idle_node );
> + _Scheduler_SMP_Preempt(
> + &self->Base,
> + idle_node,
> + victim_node,
> + _Scheduler_SMP_Allocate_processor_exact
> + );
> +
> + if ( !_Chain_Is_empty( &self->Scheduled ) ) {
> + ( *enqueue_fifo )( context, victim_node );
> + }
> + } else {
> + _Assert( victim_owner == victim_user );
> + _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
> + idle = victim_owner;
> + _Scheduler_SMP_Exctract_idle_thread( idle );
> + }
> +
> + return idle;
> +}
> +
> /** @} */
>
> #ifdef __cplusplus
> diff --git a/cpukit/score/include/rtems/score/schedulerstrongapa.h b/cpukit/score/include/rtems/score/schedulerstrongapa.h
> index 99013f2..29dee66 100644
> --- a/cpukit/score/include/rtems/score/schedulerstrongapa.h
> +++ b/cpukit/score/include/rtems/score/schedulerstrongapa.h
> @@ -89,6 +89,8 @@ typedef struct {
> _Scheduler_strong_APA_Ask_for_help, \
> _Scheduler_strong_APA_Reconsider_help_request, \
> _Scheduler_strong_APA_Withdraw_node, \
> + _Scheduler_strong_APA_Add_processor, \
> + _Scheduler_strong_APA_Remove_processor, \
> _Scheduler_strong_APA_Node_initialize, \
> _Scheduler_default_Node_destroy, \
> _Scheduler_default_Release_job, \
> @@ -144,6 +146,16 @@ void _Scheduler_strong_APA_Withdraw_node(
> Thread_Scheduler_state next_state
> );
>
> +void _Scheduler_strong_APA_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +);
> +
> +Thread_Control *_Scheduler_strong_APA_Remove_processor(
> + const Scheduler_Control *scheduler,
> + struct Per_CPU_Control *cpu
> +);
> +
> bool _Scheduler_strong_APA_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *the_thread,
> diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
> index 3ca3b73..7689469 100644
> --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
> +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
> @@ -578,6 +578,36 @@ void _Scheduler_priority_affinity_SMP_Withdraw_node(
> );
> }
>
> +void _Scheduler_priority_affinity_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + _Scheduler_SMP_Add_processor(
> + context,
> + idle,
> + _Scheduler_priority_SMP_Has_ready,
> + _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo
> + );
> +}
> +
> +Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + Per_CPU_Control *cpu
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + return _Scheduler_SMP_Remove_processor(
> + context,
> + cpu,
> + _Scheduler_priority_SMP_Extract_from_ready,
> + _Scheduler_priority_affinity_SMP_Enqueue_fifo
> + );
> +}
> +
> /*
> * This is the public scheduler specific Change Priority operation.
> */
> diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
> index 79b3d59..b4786ea 100644
> --- a/cpukit/score/src/schedulerprioritysmp.c
> +++ b/cpukit/score/src/schedulerprioritysmp.c
> @@ -312,6 +312,36 @@ void _Scheduler_priority_SMP_Withdraw_node(
> );
> }
>
> +void _Scheduler_priority_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + _Scheduler_SMP_Add_processor(
> + context,
> + idle,
> + _Scheduler_priority_SMP_Has_ready,
> + _Scheduler_priority_SMP_Enqueue_scheduled_fifo
> + );
> +}
> +
> +Thread_Control *_Scheduler_priority_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + Per_CPU_Control *cpu
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + return _Scheduler_SMP_Remove_processor(
> + context,
> + cpu,
> + _Scheduler_priority_SMP_Extract_from_ready,
> + _Scheduler_priority_SMP_Enqueue_fifo
> + );
> +}
> +
> bool _Scheduler_priority_SMP_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *thread,
> diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
> index 392d4ff..221fcc3 100644
> --- a/cpukit/score/src/schedulersimplesmp.c
> +++ b/cpukit/score/src/schedulersimplesmp.c
> @@ -69,6 +69,14 @@ static void _Scheduler_simple_SMP_Do_update(
> _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
> }
>
> +static bool _Scheduler_simple_SMP_Has_ready( Scheduler_Context *context )
> +{
> + Scheduler_simple_SMP_Context *self =
> + _Scheduler_simple_SMP_Get_self( context );
> +
> + return !_Chain_Is_empty( &self->Ready );
> +}
> +
> static Scheduler_Node *_Scheduler_simple_SMP_Get_highest_ready(
> Scheduler_Context *context,
> Scheduler_Node *node
> @@ -379,6 +387,36 @@ void _Scheduler_simple_SMP_Withdraw_node(
> );
> }
>
> +void _Scheduler_simple_SMP_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + _Scheduler_SMP_Add_processor(
> + context,
> + idle,
> + _Scheduler_simple_SMP_Has_ready,
> + _Scheduler_simple_SMP_Enqueue_scheduled_fifo
> + );
> +}
> +
> +Thread_Control *_Scheduler_simple_SMP_Remove_processor(
> + const Scheduler_Control *scheduler,
> + Per_CPU_Control *cpu
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + return _Scheduler_SMP_Remove_processor(
> + context,
> + cpu,
> + _Scheduler_simple_SMP_Extract_from_ready,
> + _Scheduler_simple_SMP_Enqueue_fifo
> + );
> +}
> +
> bool _Scheduler_simple_SMP_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *thread,
> diff --git a/cpukit/score/src/schedulersmpstartidle.c b/cpukit/score/src/schedulersmpstartidle.c
> index c28a4c3..d34ba12 100644
> --- a/cpukit/score/src/schedulersmpstartidle.c
> +++ b/cpukit/score/src/schedulersmpstartidle.c
> @@ -26,7 +26,7 @@ void _Scheduler_SMP_Start_idle(
> self = _Scheduler_SMP_Get_self( context );
> node = _Scheduler_SMP_Thread_get_node( idle );
>
> - _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
> + _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
> node->state = SCHEDULER_SMP_NODE_SCHEDULED;
>
> _Thread_Set_CPU( idle, cpu );
> diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
> index eaa352e..07d27e9 100644
> --- a/cpukit/score/src/schedulerstrongapa.c
> +++ b/cpukit/score/src/schedulerstrongapa.c
> @@ -200,6 +200,14 @@ void _Scheduler_strong_APA_Node_initialize(
> );
> }
>
> +static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
> +{
> + Scheduler_strong_APA_Context *self =
> + _Scheduler_strong_APA_Get_self( context );
> +
> + return !_Priority_bit_map_Is_empty( &self->Bit_map );
> +}
> +
> static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
> Scheduler_Context *context,
> Scheduler_Node *node
> @@ -438,6 +446,36 @@ void _Scheduler_strong_APA_Withdraw_node(
> );
> }
>
> +void _Scheduler_strong_APA_Add_processor(
> + const Scheduler_Control *scheduler,
> + Thread_Control *idle
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + _Scheduler_SMP_Add_processor(
> + context,
> + idle,
> + _Scheduler_strong_APA_Has_ready,
> + _Scheduler_strong_APA_Enqueue_scheduled_fifo
> + );
> +}
> +
> +Thread_Control *_Scheduler_strong_APA_Remove_processor(
> + const Scheduler_Control *scheduler,
> + Per_CPU_Control *cpu
> +)
> +{
> + Scheduler_Context *context = _Scheduler_Get_context( scheduler );
> +
> + return _Scheduler_SMP_Remove_processor(
> + context,
> + cpu,
> + _Scheduler_strong_APA_Extract_from_ready,
> + _Scheduler_strong_APA_Enqueue_fifo
> + );
> +}
> +
> bool _Scheduler_strong_APA_Yield(
> const Scheduler_Control *scheduler,
> Thread_Control *the_thread,
> diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
> index 63398e3..86b9fed 100644
> --- a/testsuites/smptests/Makefile.am
> +++ b/testsuites/smptests/Makefile.am
> @@ -37,6 +37,7 @@ SUBDIRS += smpschedaffinity05
> SUBDIRS += smpscheduler01
> SUBDIRS += smpscheduler02
> SUBDIRS += smpscheduler03
> +SUBDIRS += smpscheduler04
> SUBDIRS += smpschedsem01
> SUBDIRS += smpsignal01
> SUBDIRS += smpstrongapa01
> diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
> index 6c632ce..75fef51 100644
> --- a/testsuites/smptests/configure.ac
> +++ b/testsuites/smptests/configure.ac
> @@ -97,6 +97,7 @@ smpschedaffinity05/Makefile
> smpscheduler01/Makefile
> smpscheduler02/Makefile
> smpscheduler03/Makefile
> +smpscheduler04/Makefile
> smpschedsem01/Makefile
> smpsignal01/Makefile
> smpswitchextension01/Makefile
> diff --git a/testsuites/smptests/smpscheduler02/init.c b/testsuites/smptests/smpscheduler02/init.c
> index 1492d4c..082bd21 100644
> --- a/testsuites/smptests/smpscheduler02/init.c
> +++ b/testsuites/smptests/smpscheduler02/init.c
> @@ -37,6 +37,8 @@ static rtems_id cmtx_id;
>
> static rtems_id imtx_id;
>
> +static volatile bool ready;
> +
> static void task(rtems_task_argument arg)
> {
> rtems_status_code sc;
> @@ -67,6 +69,131 @@ static void task(rtems_task_argument arg)
> }
> }
>
> +static void sticky_task(rtems_task_argument arg)
> +{
> + rtems_status_code sc;
> + rtems_id mtx_id;
> +
> + (void) arg;
> +
> + rtems_test_assert(rtems_get_current_processor() == 0);
> +
> + sc = rtems_semaphore_create(
> + rtems_build_name(' ', 'M', 'T', 'X'),
> + 1,
> + RTEMS_BINARY_SEMAPHORE | RTEMS_MULTIPROCESSOR_RESOURCE_SHARING,
> + 2,
> + &mtx_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_obtain(mtx_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + ready = true;
> +
> + sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_release(mtx_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_semaphore_delete(mtx_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_event_transient_send(main_task_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + while (1) {
> + /* Do nothing */
> + }
> +}
> +
> +static void test_scheduler_add_remove_processors(void)
> +{
> + rtems_status_code sc;
> + rtems_id scheduler_a_id;
> + rtems_id scheduler_c_id;
> +
> + sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_add_processor(scheduler_c_id, 62);
> + rtems_test_assert(sc == RTEMS_NOT_CONFIGURED);
> +
> + sc = rtems_scheduler_add_processor(scheduler_c_id, 63);
> + rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_c_id, 62);
> + rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
> + rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
> +
> + if (rtems_get_processor_count() > 1) {
> + rtems_id scheduler_b_id;
> + rtems_id task_id;
> +
> + sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_b_id, 1);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_add_processor(scheduler_a_id, 1);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + rtems_test_assert(rtems_get_current_processor() == 0);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + rtems_test_assert(rtems_get_current_processor() == 1);
> +
> + sc = rtems_scheduler_add_processor(scheduler_a_id, 0);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + rtems_test_assert(rtems_get_current_processor() == 1);
> +
> + sc = rtems_task_create(
> + rtems_build_name('T', 'A', 'S', 'K'),
> + 2,
> + RTEMS_MINIMUM_STACK_SIZE,
> + RTEMS_DEFAULT_MODES,
> + RTEMS_DEFAULT_ATTRIBUTES,
> + &task_id
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_start(task_id, sticky_task, 0);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + while (!ready) {
> + /* Wait */
> + }
> +
> + sc = rtems_scheduler_remove_processor(scheduler_a_id, 1);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + rtems_test_assert(rtems_get_current_processor() == 0);
> +
> + sc = rtems_event_transient_send(task_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_delete(task_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_add_processor(scheduler_b_id, 1);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> + }
> +}
> +
> static void test(void)
> {
> rtems_status_code sc;
> @@ -248,6 +375,8 @@ static void test(void)
>
> sc = rtems_semaphore_delete(imtx_id);
> rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + test_scheduler_add_remove_processors();
> }
>
> static void Init(rtems_task_argument arg)
> @@ -271,6 +400,7 @@ static void Init(rtems_task_argument arg)
>
> #define CONFIGURE_MAXIMUM_TASKS 2
> #define CONFIGURE_MAXIMUM_SEMAPHORES 2
> +#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES 1
>
> #define CONFIGURE_SMP_APPLICATION
>
> diff --git a/testsuites/smptests/smpscheduler04/Makefile.am b/testsuites/smptests/smpscheduler04/Makefile.am
> new file mode 100644
> index 0000000..bcfea13
> --- /dev/null
> +++ b/testsuites/smptests/smpscheduler04/Makefile.am
> @@ -0,0 +1,19 @@
> +rtems_tests_PROGRAMS = smpscheduler04
> +smpscheduler04_SOURCES = init.c
> +
> +dist_rtems_tests_DATA = smpscheduler04.scn smpscheduler04.doc
> +
> +include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
> +include $(top_srcdir)/../automake/compile.am
> +include $(top_srcdir)/../automake/leaf.am
> +
> +AM_CPPFLAGS += -I$(top_srcdir)/../support/include
> +
> +LINK_OBJS = $(smpscheduler04_OBJECTS)
> +LINK_LIBS = $(smpscheduler04_LDLIBS)
> +
> +smpscheduler04$(EXEEXT): $(smpscheduler04_OBJECTS) $(smpscheduler04_DEPENDENCIES)
> + @rm -f smpscheduler04$(EXEEXT)
> + $(make-exe)
> +
> +include $(top_srcdir)/../automake/local.am
> diff --git a/testsuites/smptests/smpscheduler04/init.c b/testsuites/smptests/smpscheduler04/init.c
> new file mode 100644
> index 0000000..2e5b213
> --- /dev/null
> +++ b/testsuites/smptests/smpscheduler04/init.c
> @@ -0,0 +1,298 @@
> +/*
> + * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
> + *
> + * embedded brains GmbH
> + * Dornierstr. 4
> + * 82178 Puchheim
> + * Germany
> + * <rtems at embedded-brains.de>
> + *
> + * The license and distribution terms for this file may be
> + * found in the file LICENSE in this distribution or at
> + * http://www.rtems.org/license/LICENSE.
> + */
> +
> +#ifdef HAVE_CONFIG_H
> + #include "config.h"
> +#endif
> +
> +#include <inttypes.h>
> +#include <string.h>
> +#include <stdio.h>
> +
> +#include <rtems.h>
> +#include <rtems/libcsupport.h>
> +
> +#include "tmacros.h"
> +
> +const char rtems_test_name[] = "SMPSCHEDULER 4";
> +
> +#define CPU_COUNT 32
> +
> +#define PRIO_MIGRATION 2
> +
> +#define PRIO_SCHEDULER 3
> +
> +typedef struct {
> + rtems_id migration_task;
> + rtems_id scheduler_task;
> + rtems_id scheduler_ids[CPU_COUNT];
> + uint32_t migration_counter RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
> + uint32_t scheduler_counter RTEMS_ALIGNED(CPU_CACHE_LINE_BYTES);
> +} test_context;
> +
> +static test_context test_instance;
> +
> +static void migration_task(rtems_task_argument arg)
> +{
> + test_context *ctx = (test_context *) arg;
> + uint32_t cpu_count = rtems_get_processor_count();
> + uint32_t cpu_index = rtems_get_current_processor();
> +
> + while (true) {
> + rtems_status_code sc;
> +
> + cpu_index = (cpu_index + 1) % cpu_count;
> +
> + sc = rtems_task_set_scheduler(
> + RTEMS_SELF,
> + ctx->scheduler_ids[cpu_index],
> + PRIO_MIGRATION
> + );
> +
> + if (sc == RTEMS_UNSATISFIED) {
> + continue;
> + }
> +
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> + rtems_test_assert(cpu_index == rtems_get_current_processor());
> + ++ctx->migration_counter;
> + }
> +}
> +
> +static void scheduler_task(rtems_task_argument arg)
> +{
> + test_context *ctx = (test_context *) arg;
> + uint32_t cpu_count = rtems_get_processor_count();
> + uint32_t cpu_index = rtems_get_current_processor();
> +
> + while (true) {
> + rtems_status_code sc;
> +
> + cpu_index = (cpu_index - 1) % cpu_count;
> +
> + if (cpu_index == 0) {
> + cpu_index = 1;
> + }
> +
> + do {
> + sc = rtems_scheduler_remove_processor(
> + ctx->scheduler_ids[cpu_index],
> + cpu_index
> + );
> + } while (sc == RTEMS_RESOURCE_IN_USE);
> +
> + sc = rtems_scheduler_add_processor(
> + ctx->scheduler_ids[cpu_index],
> + cpu_index
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + ++ctx->scheduler_counter;
> + }
> +}
> +
> +static void test(test_context *ctx)
> +{
> + rtems_status_code sc;
> + uint32_t i;
> +
> + for (i = 0; i < rtems_get_processor_count(); ++i) {
> + sc = rtems_scheduler_ident(i, &ctx->scheduler_ids[i]);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> + }
> +
> + sc = rtems_task_create(
> + rtems_build_name('M', 'I', 'G', 'R'),
> + PRIO_MIGRATION,
> + RTEMS_MINIMUM_STACK_SIZE,
> + RTEMS_DEFAULT_MODES,
> + RTEMS_DEFAULT_ATTRIBUTES,
> + &ctx->migration_task
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_start(
> + ctx->migration_task,
> + migration_task,
> + (rtems_task_argument) ctx
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_create(
> + rtems_build_name('S', 'C', 'H', 'D'),
> + PRIO_SCHEDULER,
> + RTEMS_MINIMUM_STACK_SIZE,
> + RTEMS_DEFAULT_MODES,
> + RTEMS_DEFAULT_ATTRIBUTES,
> + &ctx->scheduler_task
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_start(
> + ctx->scheduler_task,
> + scheduler_task,
> + (rtems_task_argument) ctx
> + );
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_wake_after(10 * rtems_clock_get_ticks_per_second());
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_delete(ctx->migration_task);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_task_delete(ctx->scheduler_task);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + printf(
> + "migration counter = %" PRIu32 "\n"
> + "scheduler counter = %" PRIu32 "\n",
> + ctx->migration_counter,
> + ctx->scheduler_counter
> + );
> +}
> +
> +static void Init(rtems_task_argument arg)
> +{
> + rtems_resource_snapshot snapshot;
> +
> + TEST_BEGIN();
> + rtems_resource_snapshot_take(&snapshot);
> + test(&test_instance);
> + rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
> + TEST_END();
> + rtems_test_exit(0);
> +}
> +
> +#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
> +#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
> +
> +#define CONFIGURE_MAXIMUM_TASKS 3
> +
> +#define CONFIGURE_SMP_APPLICATION
> +
> +#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
> +
> +#define CONFIGURE_SCHEDULER_SIMPLE_SMP
> +
> +#include <rtems/scheduler.h>
> +
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(17);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(18);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(19);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(20);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(21);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(22);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(23);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(24);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(25);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(26);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(27);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(28);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(29);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(30);
> +RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(31);
> +
> +#define CONFIGURE_SCHEDULER_CONTROLS \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(17, 17), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(18, 18), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(19, 19), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(20, 20), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(21, 21), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(22, 22), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(23, 23), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(24, 24), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(25, 25), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(26, 26), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(27, 27), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(28, 28), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(29, 29), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(30, 30), \
> + RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(31, 31)
> +
> +#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
> + RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
> + RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(17, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(18, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(19, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(20, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(21, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(22, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(23, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(24, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(25, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(26, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(27, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(28, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(29, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(30, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
> + RTEMS_SCHEDULER_ASSIGN(31, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
> +
> +#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
> +
> +#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
> +
> +#define CONFIGURE_INIT
> +
> +#include <rtems/confdefs.h>
> diff --git a/testsuites/smptests/smpscheduler04/smpscheduler04.doc b/testsuites/smptests/smpscheduler04/smpscheduler04.doc
> new file mode 100644
> index 0000000..91bf76b
> --- /dev/null
> +++ b/testsuites/smptests/smpscheduler04/smpscheduler04.doc
> @@ -0,0 +1,14 @@
> +This file describes the directives and concepts tested by this test set.
> +
> +test set name: smpscheduler04
> +
> +directives:
> +
> + - rtems_task_set_scheduler()
> + - rtems_scheduler_add_processor()
> + - rtems_scheduler_remove_processor()
> +
> +concepts:
> +
> + - Ensure that adding/removing a processor works with concurrent task
> + scheduler changes.
> diff --git a/testsuites/smptests/smpscheduler04/smpscheduler04.scn b/testsuites/smptests/smpscheduler04/smpscheduler04.scn
> new file mode 100644
> index 0000000..8f9be83
> --- /dev/null
> +++ b/testsuites/smptests/smpscheduler04/smpscheduler04.scn
> @@ -0,0 +1,4 @@
> +*** BEGIN OF TEST SMPSCHEDULER 4 ***
> +migration counter = 935005
> +scheduler counter = 903791
> +*** END OF TEST SMPSCHEDULER 4 ***
> diff --git a/testsuites/sptests/spscheduler01/init.c b/testsuites/sptests/spscheduler01/init.c
> index 162d71a..be9fed2 100644
> --- a/testsuites/sptests/spscheduler01/init.c
> +++ b/testsuites/sptests/spscheduler01/init.c
> @@ -411,6 +411,33 @@ static void test_scheduler_get_processors(void)
> #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */
> }
>
> +static void test_scheduler_add_remove_processors(void)
> +{
> + rtems_status_code sc;
> + rtems_id scheduler_id;
> +
> + sc = rtems_scheduler_ident(BLUE, &scheduler_id);
> + rtems_test_assert(sc == RTEMS_SUCCESSFUL);
> +
> + sc = rtems_scheduler_add_processor(invalid_id, 0);
> + rtems_test_assert(sc == RTEMS_INVALID_ID);
> +
> + sc = rtems_scheduler_remove_processor(invalid_id, 0);
> + rtems_test_assert(sc == RTEMS_INVALID_ID);
> +
> + sc = rtems_scheduler_add_processor(scheduler_id, 1);
> + rtems_test_assert(sc == RTEMS_NOT_CONFIGURED);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_id, 1);
> + rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
> +
> + sc = rtems_scheduler_add_processor(scheduler_id, 0);
> + rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
> +
> + sc = rtems_scheduler_remove_processor(scheduler_id, 0);
> + rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
> +}
> +
> static void test_task_get_priority(void)
> {
> rtems_status_code sc;
> @@ -463,6 +490,7 @@ static void Init(rtems_task_argument arg)
> test_task_get_set_scheduler();
> test_scheduler_ident();
> test_scheduler_get_processors();
> + test_scheduler_add_remove_processors();
> test_task_get_priority();
>
> rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
> --
> 1.8.4.5
>
>
> _______________________________________________
> devel mailing list
> devel at rtems.org
> http://lists.rtems.org/mailman/listinfo/devel
More information about the devel
mailing list