[PATCH 10/10] score: Add simple affinity support to EDF SMP
Sebastian Huber
sebastian.huber at embedded-brains.de
Thu Jul 6 13:36:49 UTC 2017
Update #3059.
df
---
cpukit/sapi/include/confdefs.h | 3 +-
cpukit/sapi/include/rtems/scheduler.h | 7 +-
cpukit/score/include/rtems/score/scheduleredfsmp.h | 23 +-
.../score/include/rtems/score/schedulersmpimpl.h | 92 +++++-
cpukit/score/src/scheduleredfsmp.c | 345 ++++++++++++++++---
testsuites/smptests/Makefile.am | 2 +
testsuites/smptests/configure.ac | 2 +
testsuites/smptests/smpschededf01/init.c | 4 +-
testsuites/smptests/smpschededf02/Makefile.am | 19 ++
testsuites/smptests/smpschededf02/init.c | 367 +++++++++++++++++++++
.../smptests/smpschededf02/smpschededf02.doc | 11 +
.../smptests/smpschededf02/smpschededf02.scn | 0
testsuites/smptests/smpschededf03/Makefile.am | 19 ++
testsuites/smptests/smpschededf03/init.c | 160 +++++++++
.../smptests/smpschededf03/smpschededf03.doc | 12 +
.../smptests/smpschededf03/smpschededf03.scn | 2 +
testsuites/smptests/smpscheduler07/init.c | 2 +-
17 files changed, 1022 insertions(+), 48 deletions(-)
create mode 100644 testsuites/smptests/smpschededf02/Makefile.am
create mode 100644 testsuites/smptests/smpschededf02/init.c
create mode 100644 testsuites/smptests/smpschededf02/smpschededf02.doc
create mode 100644 testsuites/smptests/smpschededf02/smpschededf02.scn
create mode 100644 testsuites/smptests/smpschededf03/Makefile.am
create mode 100644 testsuites/smptests/smpschededf03/init.c
create mode 100644 testsuites/smptests/smpschededf03/smpschededf03.doc
create mode 100644 testsuites/smptests/smpschededf03/smpschededf03.scn
diff --git a/cpukit/sapi/include/confdefs.h b/cpukit/sapi/include/confdefs.h
index 77b80d1cce..d01c927014 100755
--- a/cpukit/sapi/include/confdefs.h
+++ b/cpukit/sapi/include/confdefs.h
@@ -992,7 +992,8 @@ extern rtems_initialization_tasks_table Initialization_tasks[];
#if !defined(CONFIGURE_SCHEDULER_CONTROLS)
/** Configure the context needed by the scheduler instance */
- #define CONFIGURE_SCHEDULER_CONTEXT RTEMS_SCHEDULER_CONTEXT_EDF_SMP(dflt)
+ #define CONFIGURE_SCHEDULER_CONTEXT \
+ RTEMS_SCHEDULER_CONTEXT_EDF_SMP(dflt, CONFIGURE_MAXIMUM_PROCESSORS)
/** Configure the controls for this scheduler instance */
#define CONFIGURE_SCHEDULER_CONTROLS \
diff --git a/cpukit/sapi/include/rtems/scheduler.h b/cpukit/sapi/include/rtems/scheduler.h
index fae0db4913..49ad06d36f 100644
--- a/cpukit/sapi/include/rtems/scheduler.h
+++ b/cpukit/sapi/include/rtems/scheduler.h
@@ -99,8 +99,11 @@
#define RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name ) \
RTEMS_SCHEDULER_CONTEXT_NAME( EDF_SMP_ ## name )
- #define RTEMS_SCHEDULER_CONTEXT_EDF_SMP( name ) \
- static Scheduler_EDF_SMP_Context RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name )
+ #define RTEMS_SCHEDULER_CONTEXT_EDF_SMP( name, max_cpu_count ) \
+ static struct { \
+ Scheduler_EDF_SMP_Context Base; \
+ RBTree_Control Ready[ ( max_cpu_count ) + 1 ]; \
+ } RTEMS_SCHEDULER_CONTEXT_EDF_SMP_NAME( name )
#define RTEMS_SCHEDULER_CONTROL_EDF_SMP( name, obj_name ) \
{ \
diff --git a/cpukit/score/include/rtems/score/scheduleredfsmp.h b/cpukit/score/include/rtems/score/scheduleredfsmp.h
index 8f6e85777a..f781e5f241 100644
--- a/cpukit/score/include/rtems/score/scheduleredfsmp.h
+++ b/cpukit/score/include/rtems/score/scheduleredfsmp.h
@@ -35,11 +35,15 @@ extern "C" {
typedef struct {
Scheduler_SMP_Context Base;
- RBTree_Control Ready;
+ uint64_t generation;
+ RBTree_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
} Scheduler_EDF_SMP_Context;
typedef struct {
Scheduler_SMP_Node Base;
+ uint64_t generation;
+ RBTree_Control *ready_queue_of_cpu;
+ uint32_t ready_queue_index;
} Scheduler_EDF_SMP_Node;
#define SCHEDULER_EDF_SMP_ENTRY_POINTS \
@@ -62,8 +66,8 @@ typedef struct {
_Scheduler_EDF_Release_job, \
_Scheduler_EDF_Cancel_job, \
_Scheduler_default_Tick, \
- _Scheduler_SMP_Start_idle \
- SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \
+ _Scheduler_EDF_SMP_Start_idle, \
+ _Scheduler_EDF_SMP_Set_affinity \
}
void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler );
@@ -128,6 +132,19 @@ void _Scheduler_EDF_SMP_Yield(
Scheduler_Node *node
);
+void _Scheduler_EDF_SMP_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ struct Per_CPU_Control *cpu
+);
+
+bool _Scheduler_EDF_SMP_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+);
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index b90c359d4c..2dca037051 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -7,7 +7,7 @@
*/
/*
- * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -317,6 +317,12 @@ typedef void ( *Scheduler_SMP_Update )(
Priority_Control new_priority
);
+typedef void ( *Scheduler_SMP_Set_affinity )(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ void *arg
+);
+
typedef bool ( *Scheduler_SMP_Enqueue )(
Scheduler_Context *context,
Scheduler_Node *node_to_enqueue
@@ -525,6 +531,7 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
Per_CPU_Control *cpu_self = _Per_CPU_Get();
(void) context;
+ (void) victim_thread;
_Thread_Set_CPU( scheduled_thread, victim_cpu );
_Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
@@ -907,6 +914,50 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
} while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
}
+static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *victim,
+ Per_CPU_Control *victim_cpu,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_Try_to_schedule_action action;
+
+ do {
+ Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
+
+ action = _Scheduler_Try_to_schedule_node(
+ context,
+ highest_ready,
+ NULL,
+ _Scheduler_SMP_Get_idle_thread
+ );
+
+ if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
+ _Scheduler_SMP_Preempt(
+ context,
+ highest_ready,
+ victim,
+ allocate_processor
+ );
+
+ ( *move_from_ready_to_scheduled )( context, highest_ready );
+ } else {
+ _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+
+ _Scheduler_SMP_Node_change_state(
+ highest_ready,
+ SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ ( *extract_from_ready )( context, highest_ready );
+ }
+ } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
+}
+
/**
* @brief Blocks a thread.
*
@@ -1359,6 +1410,45 @@ static inline Thread_Control *_Scheduler_SMP_Remove_processor(
return idle;
}
+static inline void _Scheduler_SMP_Set_affinity(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ void *arg,
+ Scheduler_SMP_Set_affinity set_affinity,
+ Scheduler_SMP_Extract extract_from_ready,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Enqueue enqueue_fifo,
+ Scheduler_SMP_Allocate_processor allocate_processor
+)
+{
+ Scheduler_SMP_Node_state node_state;
+
+ node_state = _Scheduler_SMP_Node_state( node );
+
+ if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ _Scheduler_SMP_Extract_from_scheduled( node );
+ _Scheduler_SMP_Preempt_and_schedule_highest_ready(
+ context,
+ node,
+ _Thread_Get_CPU( thread ),
+ extract_from_ready,
+ get_highest_ready,
+ move_from_ready_to_scheduled,
+ allocate_processor
+ );
+ ( *set_affinity )( context, node, arg );
+ ( *enqueue_fifo )( context, node );
+ } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
+ ( *extract_from_ready )( context, node );
+ ( *set_affinity )( context, node, arg );
+ ( *enqueue_fifo )( context, node );
+ } else {
+ ( *set_affinity )( context, node, arg );
+ }
+}
+
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 644bf2f347..1f7c9786e6 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -21,13 +21,13 @@
#include <rtems/score/scheduleredfsmp.h>
#include <rtems/score/schedulersmpimpl.h>
-static Scheduler_EDF_SMP_Context *
+static inline Scheduler_EDF_SMP_Context *
_Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
{
return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
}
-static Scheduler_EDF_SMP_Context *
+static inline Scheduler_EDF_SMP_Context *
_Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
{
return (Scheduler_EDF_SMP_Context *) context;
@@ -39,6 +39,14 @@ _Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
return (Scheduler_EDF_SMP_Node *) node;
}
+static inline Scheduler_EDF_SMP_Node *
+_Scheduler_EDF_SMP_Get_node( const Thread_Control *thread )
+{
+ return _Scheduler_EDF_SMP_Node_downcast(
+ _Thread_Scheduler_get_home_node( thread )
+ );
+}
+
static inline bool _Scheduler_EDF_SMP_Less(
const void *left,
const RBTree_Node *right
@@ -83,7 +91,7 @@ void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
_Scheduler_EDF_SMP_Get_context( scheduler );
_Scheduler_SMP_Initialize( &self->Base );
- _RBTree_Initialize_empty( &self->Ready );
+ /* The ready queues are zero initialized and thus empty */
}
void _Scheduler_EDF_SMP_Node_initialize(
@@ -99,7 +107,7 @@ void _Scheduler_EDF_SMP_Node_initialize(
_Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
}
-static void _Scheduler_EDF_SMP_Do_update(
+static inline void _Scheduler_EDF_SMP_Do_update(
Scheduler_Context *context,
Scheduler_Node *node,
Priority_Control new_priority
@@ -113,29 +121,137 @@ static void _Scheduler_EDF_SMP_Do_update(
_Scheduler_SMP_Node_update_priority( smp_node, new_priority );
}
-static bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
+static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
{
Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
- return !_RBTree_Is_empty( &self->Ready );
+ return !_RBTree_Is_empty( &self->Ready[ 0 ] );
+}
+
+static inline Scheduler_Node *_Scheduler_EDF_SMP_First(
+ Scheduler_EDF_SMP_Context *self,
+ uint32_t ready_queue_index
+)
+{
+ return (Scheduler_Node *)
+ _RBTree_Minimum( &self->Ready[ ready_queue_index ] );
+}
+
+static inline bool _Scheduler_EDF_SMP_Overall_less(
+ const Scheduler_EDF_SMP_Node *left,
+ const Scheduler_EDF_SMP_Node *right
+)
+{
+ Priority_Control lp;
+ Priority_Control rp;
+
+ lp = left->Base.priority;
+ rp = right->Base.priority;
+
+ return lp < rp || (lp == rp && left->generation < right->generation );
}
-static Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
+static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *filter
)
{
- Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
- Scheduler_Node *first = (Scheduler_Node *) _RBTree_Minimum( &self->Ready );
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *highest_ready;
+ Scheduler_EDF_SMP_Node *scheduled;
+ uint32_t rqi;
+ const Chain_Node *tail;
+ Chain_Node *next;
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ highest_ready = (Scheduler_EDF_SMP_Node *)
+ _RBTree_Minimum( &self->Ready[ 0 ] );
+ _Assert( highest_ready != NULL );
+
+ /*
+ * The filter node is a scheduled node which is no longer on the scheduled
+ * chain. On this processor we have to check the corresponding ready queue
+ * unconditionally, so set the ready queue index to zero.
+ */
+ scheduled = (Scheduler_EDF_SMP_Node *) filter;
+ rqi = 0;
+
+ tail = _Chain_Immutable_tail( &self->Base.Scheduled );
+ next = _Chain_Head( &self->Base.Scheduled );
+
+ do {
+ if ( rqi == 0 && !_RBTree_Is_empty( scheduled->ready_queue_of_cpu ) ) {
+ Scheduler_EDF_SMP_Node *other;
+
+ other = (Scheduler_EDF_SMP_Node *)
+ _RBTree_Minimum( scheduled->ready_queue_of_cpu );
+
+ if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
+ highest_ready = other;
+ }
+ }
+
+ next = _Chain_Next( next );
+ scheduled = (Scheduler_EDF_SMP_Node *) next;
+ rqi = scheduled->ready_queue_index;
+ } while ( next != tail );
+
+ return &highest_ready->Base.Base;
+}
- (void) node;
+static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled_node(
+ uint32_t ready_queue_index
+)
+{
+ const Per_CPU_Control *cpu;
+ uint32_t scheduler_index;
+
+ _Assert( ready_queue_index > 0 );
+ cpu = _Per_CPU_Get_by_index( ready_queue_index - 1 );
+ scheduler_index = _Scheduler_Get_index( cpu->Scheduler.control );
+ return (Scheduler_EDF_SMP_Node *)
+ &cpu->heir->Scheduler.nodes[ scheduler_index ];
+}
- _Assert( &first->Node != NULL );
+static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *filter_base,
+ Chain_Node_order order
+)
+{
+ Scheduler_EDF_SMP_Node *filter;
+ uint32_t rqi;
+
+ filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
+ rqi = filter->ready_queue_index;
+
+ if ( rqi != 0 ) {
+ Scheduler_EDF_SMP_Node *node;
+
+ node = _Scheduler_EDF_SMP_Get_scheduled_node( rqi );
+
+ if ( node->ready_queue_index > 0 ) {
+ _Assert( node->ready_queue_index == rqi );
+ return &node->Base.Base;
+ }
+ }
+
+ return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base, order );
+}
+
+static inline void _Scheduler_EDF_SMP_Update_generation(
+ Scheduler_EDF_SMP_Context *self,
+ Scheduler_EDF_SMP_Node *node
+)
+{
+ uint64_t generation;
- return first;
+ generation = self->generation;
+ node->generation = generation;
+ self->generation = generation + 1;
}
-static void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
+static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
Scheduler_Context *context,
Scheduler_Node *scheduled_to_ready
)
@@ -145,16 +261,17 @@ static void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
_Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
_Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
+ _Scheduler_EDF_SMP_Update_generation( self, node );
_RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
_RBTree_Insert_inline(
- &self->Ready,
+ &self->Ready[ node->ready_queue_index ],
&node->Base.Base.Node.RBTree,
&node->Base.priority,
_Scheduler_EDF_SMP_Less
);
}
-static void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
+static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
Scheduler_Context *context,
Scheduler_Node *ready_to_scheduled
)
@@ -163,7 +280,10 @@ static void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
Scheduler_EDF_SMP_Node *node =
_Scheduler_EDF_SMP_Node_downcast( ready_to_scheduled );
- _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree );
+ _RBTree_Extract(
+ &self->Ready[ node->ready_queue_index ],
+ &node->Base.Base.Node.RBTree
+ );
_Chain_Initialize_node( &node->Base.Base.Node.Chain );
_Chain_Insert_ordered_unprotected(
&self->Base.Scheduled,
@@ -172,7 +292,7 @@ static void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
);
}
-static void _Scheduler_EDF_SMP_Insert_ready_lifo(
+static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
Scheduler_Context *context,
Scheduler_Node *node_to_insert
)
@@ -181,16 +301,17 @@ static void _Scheduler_EDF_SMP_Insert_ready_lifo(
Scheduler_EDF_SMP_Node *node =
_Scheduler_EDF_SMP_Node_downcast( node_to_insert );
+ _Scheduler_EDF_SMP_Update_generation( self, node );
_RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
_RBTree_Insert_inline(
- &self->Ready,
+ &self->Ready[ node->ready_queue_index ],
&node->Base.Base.Node.RBTree,
&node->Base.priority,
_Scheduler_EDF_SMP_Less_or_equal
);
}
-static void _Scheduler_EDF_SMP_Insert_ready_fifo(
+static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
Scheduler_Context *context,
Scheduler_Node *node_to_insert
)
@@ -199,16 +320,17 @@ static void _Scheduler_EDF_SMP_Insert_ready_fifo(
Scheduler_EDF_SMP_Node *node =
_Scheduler_EDF_SMP_Node_downcast( node_to_insert );
+ _Scheduler_EDF_SMP_Update_generation( self, node );
_RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
_RBTree_Insert_inline(
- &self->Ready,
+ &self->Ready[ node->ready_queue_index ],
&node->Base.Base.Node.RBTree,
&node->Base.priority,
_Scheduler_EDF_SMP_Less
);
}
-static void _Scheduler_EDF_SMP_Extract_from_ready(
+static inline void _Scheduler_EDF_SMP_Extract_from_ready(
Scheduler_Context *context,
Scheduler_Node *node_to_extract
)
@@ -217,10 +339,73 @@ static void _Scheduler_EDF_SMP_Extract_from_ready(
Scheduler_EDF_SMP_Node *node =
_Scheduler_EDF_SMP_Node_downcast( node_to_extract );
- _RBTree_Extract( &self->Ready, &node->Base.Base.Node.RBTree );
+ _RBTree_Extract(
+ &self->Ready[ node->ready_queue_index ],
+ &node->Base.Base.Node.RBTree
+ );
_Chain_Initialize_node( &node->Base.Base.Node.Chain );
}
+static inline void _Scheduler_EDF_SMP_Set_ready_queue_of_CPU(
+ Scheduler_EDF_SMP_Context *self,
+ Scheduler_EDF_SMP_Node *node,
+ Per_CPU_Control *cpu
+)
+{
+ node->ready_queue_of_cpu = &self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ];
+}
+
+static inline void _Scheduler_EDF_SMP_Allocate_processor(
+ Scheduler_Context *context,
+ Thread_Control *scheduled_thread,
+ Thread_Control *victim_thread,
+ Per_CPU_Control *victim_cpu
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ uint32_t rqi;
+
+ (void) victim_thread;
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Get_node( scheduled_thread );
+ rqi = node->ready_queue_index;
+
+ if ( rqi != 0 ) {
+ Per_CPU_Control *desired_cpu;
+
+ desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
+
+ if ( victim_cpu != desired_cpu ) {
+ Thread_Control *other;
+ Scheduler_EDF_SMP_Node *other_node;
+
+ other = desired_cpu->heir;
+ other_node = _Scheduler_EDF_SMP_Get_node( other );
+ _Scheduler_EDF_SMP_Set_ready_queue_of_CPU(
+ self,
+ other_node,
+ victim_cpu
+ );
+ _Scheduler_SMP_Allocate_processor_exact(
+ context,
+ other,
+ NULL,
+ victim_cpu
+ );
+ victim_cpu = desired_cpu;
+ }
+ }
+
+ _Scheduler_EDF_SMP_Set_ready_queue_of_CPU( self, node, victim_cpu );
+ _Scheduler_SMP_Allocate_processor_exact(
+ context,
+ scheduled_thread,
+ NULL,
+ victim_cpu
+ );
+}
+
void _Scheduler_EDF_SMP_Block(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -236,11 +421,11 @@ void _Scheduler_EDF_SMP_Block(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_EDF_SMP_Allocate_processor
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_ordered(
+static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
Scheduler_Context *context,
Scheduler_Node *node,
Chain_Node_order order,
@@ -255,12 +440,12 @@ static bool _Scheduler_EDF_SMP_Enqueue_ordered(
insert_ready,
insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
- _Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_EDF_SMP_Get_lowest_scheduled,
+ _Scheduler_EDF_SMP_Allocate_processor
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_lifo(
+static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
Scheduler_Context *context,
Scheduler_Node *node
)
@@ -274,7 +459,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_lifo(
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_fifo(
+static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
Scheduler_Context *context,
Scheduler_Node *node
)
@@ -288,7 +473,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_fifo(
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
+static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
Scheduler_Context *context,
Scheduler_Node *node,
Chain_Node_order order,
@@ -305,11 +490,11 @@ static bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
insert_ready,
insert_scheduled,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_EDF_SMP_Allocate_processor
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
+static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
Scheduler_Context *context,
Scheduler_Node *node
)
@@ -323,7 +508,7 @@ static bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
);
}
-static bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
+static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
Scheduler_Context *context,
Scheduler_Node *node
)
@@ -354,7 +539,7 @@ void _Scheduler_EDF_SMP_Unblock(
);
}
-static bool _Scheduler_EDF_SMP_Do_ask_for_help(
+static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
Scheduler_Context *context,
Thread_Control *the_thread,
Scheduler_Node *node
@@ -368,8 +553,8 @@ static bool _Scheduler_EDF_SMP_Do_ask_for_help(
_Scheduler_EDF_SMP_Insert_ready_lifo,
_Scheduler_SMP_Insert_scheduled_lifo,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
- _Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_EDF_SMP_Get_lowest_scheduled,
+ _Scheduler_EDF_SMP_Allocate_processor
);
}
@@ -439,7 +624,7 @@ void _Scheduler_EDF_SMP_Withdraw_node(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_EDF_SMP_Allocate_processor
);
}
@@ -448,10 +633,19 @@ void _Scheduler_EDF_SMP_Add_processor(
Thread_Control *idle
)
{
- Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+
+ self = _Scheduler_EDF_SMP_Get_context( scheduler );
+ node = _Scheduler_EDF_SMP_Get_node( idle );
+ _Scheduler_EDF_SMP_Set_ready_queue_of_CPU(
+ self,
+ node,
+ _Thread_Get_CPU( idle )
+ );
_Scheduler_SMP_Add_processor(
- context,
+ &self->Base.Base,
idle,
_Scheduler_EDF_SMP_Has_ready,
_Scheduler_EDF_SMP_Enqueue_scheduled_fifo
@@ -490,3 +684,76 @@ void _Scheduler_EDF_SMP_Yield(
_Scheduler_EDF_SMP_Enqueue_scheduled_fifo
);
}
+
+void _Scheduler_EDF_SMP_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ Per_CPU_Control *cpu
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+
+ self = _Scheduler_EDF_SMP_Get_context( scheduler );
+ node = _Scheduler_EDF_SMP_Get_node( idle );
+
+ _Scheduler_EDF_SMP_Set_ready_queue_of_CPU( self, node, cpu );
+ _Scheduler_SMP_Start_idle( scheduler, idle, cpu );
+}
+
+static inline void _Scheduler_EDF_SMP_Do_set_affinity(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ void *arg
+)
+{
+ Scheduler_EDF_SMP_Node *node;
+ const uint32_t *rqi;
+
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ rqi = arg;
+ node->ready_queue_index = *rqi;
+}
+
+bool _Scheduler_EDF_SMP_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+)
+{
+ Scheduler_Context *context;
+ Processor_mask a;
+ Processor_mask b;
+ uint32_t rqi;
+
+ context = _Scheduler_Get_context( scheduler );
+ _Processor_mask_And( &a, &context->Processors, affinity );
+
+ if ( _Processor_mask_Count( &a ) == 0 ) {
+ return false;
+ }
+
+ _Processor_mask_And( &b, &_SMP_Online_processors, affinity );
+
+ if ( _Processor_mask_Count( &b ) == _SMP_Processor_count ) {
+ rqi = 0;
+ } else {
+ rqi = _Processor_mask_Find_last_set( &a );
+ }
+
+ _Scheduler_SMP_Set_affinity(
+ context,
+ thread,
+ node,
+ &rqi,
+ _Scheduler_EDF_SMP_Do_set_affinity,
+ _Scheduler_EDF_SMP_Extract_from_ready,
+ _Scheduler_EDF_SMP_Get_highest_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_EDF_SMP_Enqueue_fifo,
+ _Scheduler_EDF_SMP_Allocate_processor
+ );
+
+ return true;
+}
diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index 01dc52e524..40f0b0fd7a 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -36,6 +36,8 @@ _SUBDIRS += smpschedaffinity03
_SUBDIRS += smpschedaffinity04
_SUBDIRS += smpschedaffinity05
_SUBDIRS += smpschededf01
+_SUBDIRS += smpschededf02
+_SUBDIRS += smpschededf03
_SUBDIRS += smpschedsem01
_SUBDIRS += smpscheduler01
_SUBDIRS += smpscheduler02
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index f3a840b593..d72f6e072d 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -91,6 +91,8 @@ smpschedaffinity03/Makefile
smpschedaffinity04/Makefile
smpschedaffinity05/Makefile
smpschededf01/Makefile
+smpschededf02/Makefile
+smpschededf03/Makefile
smpschedsem01/Makefile
smpscheduler01/Makefile
smpscheduler02/Makefile
diff --git a/testsuites/smptests/smpschededf01/init.c b/testsuites/smptests/smpschededf01/init.c
index c1c995e69b..6b250cf699 100644
--- a/testsuites/smptests/smpschededf01/init.c
+++ b/testsuites/smptests/smpschededf01/init.c
@@ -140,11 +140,13 @@ static void Init(rtems_task_argument arg)
#define CONFIGURE_MAXIMUM_TASKS 3
#define CONFIGURE_MAXIMUM_PERIODS 2
+#define CONFIGURE_MAXIMUM_PROCESSORS 1
+
#define CONFIGURE_SCHEDULER_EDF_SMP
#include <rtems/scheduler.h>
-RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a);
+RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a, CONFIGURE_MAXIMUM_PROCESSORS);
#define CONFIGURE_SCHEDULER_CONTROLS \
RTEMS_SCHEDULER_CONTROL_EDF_SMP(a, rtems_build_name('E', 'D', 'F', ' '))
diff --git a/testsuites/smptests/smpschededf02/Makefile.am b/testsuites/smptests/smpschededf02/Makefile.am
new file mode 100644
index 0000000000..6e9e01cb60
--- /dev/null
+++ b/testsuites/smptests/smpschededf02/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpschededf02
+smpschededf02_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpschededf02.scn smpschededf02.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpschededf02_OBJECTS)
+LINK_LIBS = $(smpschededf02_LDLIBS)
+
+smpschededf02$(EXEEXT): $(smpschededf02_OBJECTS) $(smpschededf02_DEPENDENCIES)
+ @rm -f smpschededf02$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpschededf02/init.c b/testsuites/smptests/smpschededf02/init.c
new file mode 100644
index 0000000000..b2d5b886a5
--- /dev/null
+++ b/testsuites/smptests/smpschededf02/init.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2016, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include "tmacros.h"
+
+#include <rtems.h>
+
+const char rtems_test_name[] = "SMPSCHEDEDF 2";
+
+#define CPU_COUNT 2
+
+#define TASK_COUNT 5
+
+#define P(i) (UINT32_C(2) + i)
+
+#define A(cpu0, cpu1) ((cpu1 << 1) | cpu0)
+
+#define IDLE UINT8_C(255)
+
+#define NAME rtems_build_name('E', 'D', 'F', ' ')
+
+typedef struct {
+ enum {
+ KIND_RESET,
+ KIND_SET_PRIORITY,
+ KIND_SET_AFFINITY,
+ KIND_BLOCK,
+ KIND_UNBLOCK
+ } kind;
+
+ size_t index;
+
+ struct {
+ rtems_task_priority priority;
+ uint32_t cpu_set;
+ } data;
+
+ uint8_t expected_cpu_allocations[CPU_COUNT];
+} test_action;
+
+typedef struct {
+ rtems_id timer_id;
+ rtems_id master_id;
+ rtems_id task_ids[TASK_COUNT];
+ size_t action_index;
+} test_context;
+
+#define RESET \
+ { \
+ KIND_RESET, \
+ 0, \
+ { 0 }, \
+ { IDLE, IDLE } \
+ }
+
+#define SET_PRIORITY(index, prio, cpu0, cpu1) \
+ { \
+ KIND_SET_PRIORITY, \
+ index, \
+ { .priority = prio }, \
+ { cpu0, cpu1 } \
+ }
+
+#define SET_AFFINITY(index, aff, cpu0, cpu1) \
+ { \
+ KIND_SET_AFFINITY, \
+ index, \
+ { .cpu_set = aff }, \
+ { cpu0, cpu1 } \
+ }
+
+#define BLOCK(index, cpu0, cpu1) \
+ { \
+ KIND_BLOCK, \
+ index, \
+ { 0 }, \
+ { cpu0, cpu1 } \
+ }
+
+#define UNBLOCK(index, cpu0, cpu1) \
+ { \
+ KIND_UNBLOCK, \
+ index, \
+ { 0 }, \
+ { cpu0, cpu1 } \
+ }
+
+static const test_action test_actions[] = {
+ RESET,
+ UNBLOCK( 0, 0, IDLE),
+ UNBLOCK( 1, 0, 1),
+ UNBLOCK( 3, 0, 1),
+ SET_PRIORITY( 1, P(2), 0, 1),
+ SET_PRIORITY( 3, P(1), 0, 3),
+ BLOCK( 3, 0, 1),
+ SET_AFFINITY( 1, A(1, 1), 0, 1),
+ SET_AFFINITY( 1, A(1, 0), 1, 0),
+ SET_AFFINITY( 1, A(1, 1), 1, 0),
+ SET_AFFINITY( 1, A(1, 0), 1, 0),
+ SET_AFFINITY( 1, A(0, 1), 0, 1),
+ BLOCK( 0, IDLE, 1),
+ UNBLOCK( 0, 0, 1),
+ BLOCK( 1, 0, IDLE),
+ UNBLOCK( 1, 0, 1),
+ RESET,
+ /*
+ * Show that FIFO order is honoured across all threads of the same priority.
+ */
+ SET_PRIORITY( 1, P(0), IDLE, IDLE),
+ SET_PRIORITY( 2, P(1), IDLE, IDLE),
+ SET_PRIORITY( 3, P(1), IDLE, IDLE),
+ SET_AFFINITY( 3, A(1, 0), IDLE, IDLE),
+ SET_PRIORITY( 4, P(1), IDLE, IDLE),
+ SET_AFFINITY( 4, A(1, 0), IDLE, IDLE),
+ UNBLOCK( 0, 0, IDLE),
+ UNBLOCK( 1, 0, 1),
+ UNBLOCK( 2, 0, 1),
+ UNBLOCK( 3, 0, 1),
+ UNBLOCK( 4, 0, 1),
+ BLOCK( 1, 0, 2),
+ BLOCK( 2, 3, 0),
+ BLOCK( 3, 4, 0),
+ RESET
+};
+
+static test_context test_instance;
+
+static void set_priority(rtems_id id, rtems_task_priority prio)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_priority(id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void set_affinity(rtems_id id, uint32_t cpu_set_32)
+{
+ rtems_status_code sc;
+ cpu_set_t cpu_set;
+ size_t i;
+
+ CPU_ZERO(&cpu_set);
+
+ for (i = 0; i < CPU_COUNT; ++i) {
+ if ((cpu_set_32 & (UINT32_C(1) << i)) != 0) {
+ CPU_SET(i, &cpu_set);
+ }
+ }
+
+ sc = rtems_task_set_affinity(id, sizeof(cpu_set), &cpu_set);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void reset(test_context *ctx)
+{
+ rtems_status_code sc;
+ size_t i;
+
+ for (i = 0; i < TASK_COUNT; ++i) {
+ set_priority(ctx->task_ids[i], P(i));
+ set_affinity(ctx->task_ids[i], A(1, 1));
+ }
+
+ for (i = CPU_COUNT; i < TASK_COUNT; ++i) {
+ sc = rtems_task_suspend(ctx->task_ids[i]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED);
+ }
+
+ for (i = 0; i < CPU_COUNT; ++i) {
+ sc = rtems_task_resume(ctx->task_ids[i]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE);
+ }
+
+ /* Order the idle threads explicitly */
+ for (i = 0; i < CPU_COUNT; ++i) {
+ const Per_CPU_Control *c;
+ const Thread_Control *h;
+
+ c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i);
+ h = c->heir;
+
+ sc = rtems_task_suspend(h->Object.id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+}
+
+static void check_cpu_allocations(test_context *ctx, const test_action *action)
+{
+ size_t i;
+
+ for (i = 0; i < CPU_COUNT; ++i) {
+ size_t e;
+ const Per_CPU_Control *c;
+ const Thread_Control *h;
+
+ e = action->expected_cpu_allocations[i];
+ c = _Per_CPU_Get_by_index(i);
+ h = c->heir;
+
+ if (e != IDLE) {
+ rtems_test_assert(h->Object.id == ctx->task_ids[e]);
+ } else {
+ rtems_test_assert(h->is_idle);
+ }
+ }
+}
+
+/*
+ * Use a timer to execute the actions, since it runs with thread dispatching
+ * disabled. This is necessary to check the expected processor allocations.
+ */
+static void timer(rtems_id id, void *arg)
+{
+ test_context *ctx;
+ rtems_status_code sc;
+ size_t i;
+
+ ctx = arg;
+ i = ctx->action_index;
+
+ if (i == 0) {
+ sc = rtems_task_suspend(ctx->master_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ if (i < RTEMS_ARRAY_SIZE(test_actions)) {
+ const test_action *action = &test_actions[i];
+ rtems_id task;
+
+ ctx->action_index = i + 1;
+
+ task = ctx->task_ids[action->index];
+
+ switch (action->kind) {
+ case KIND_SET_PRIORITY:
+ set_priority(task, action->data.priority);
+ break;
+ case KIND_SET_AFFINITY:
+ set_affinity(task, action->data.cpu_set);
+ break;
+ case KIND_BLOCK:
+ sc = rtems_task_suspend(task);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ break;
+ case KIND_UNBLOCK:
+ sc = rtems_task_resume(task);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ break;
+ default:
+ rtems_test_assert(action->kind == KIND_RESET);
+ reset(ctx);
+ break;
+ }
+
+ check_cpu_allocations(ctx, action);
+
+ sc = rtems_timer_reset(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ } else {
+ sc = rtems_task_resume(ctx->master_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_event_transient_send(ctx->master_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+}
+
+static void do_nothing_task(rtems_task_argument arg)
+{
+ (void) arg;
+
+ while (true) {
+ /* Do nothing */
+ }
+}
+
+static void test(void)
+{
+ test_context *ctx;
+ rtems_status_code sc;
+ size_t i;
+
+ ctx = &test_instance;
+
+ ctx->master_id = rtems_task_self();
+
+ for (i = 0; i < TASK_COUNT; ++i) {
+ sc = rtems_task_create(
+ NAME,
+ P(i),
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->task_ids[i]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(ctx->task_ids[i], do_nothing_task, 0);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_timer_create(NAME, &ctx->timer_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_timer_fire_after(ctx->timer_id, 1, timer, ctx);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (i = 0; i < TASK_COUNT; ++i) {
+ sc = rtems_task_delete(ctx->task_ids[i]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_timer_delete(ctx->timer_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void Init(rtems_task_argument arg)
+{
+ TEST_BEGIN();
+
+ if (rtems_get_processor_count() == CPU_COUNT) {
+ test();
+ } else {
+ puts("warning: wrong processor count to run the test");
+ }
+
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_MICROSECONDS_PER_TICK 1000
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
+#define CONFIGURE_MAXIMUM_TIMERS 1
+
+#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_SCHEDULER_EDF_SMP
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpschededf02/smpschededf02.doc b/testsuites/smptests/smpschededf02/smpschededf02.doc
new file mode 100644
index 0000000000..ece0e1a20e
--- /dev/null
+++ b/testsuites/smptests/smpschededf02/smpschededf02.doc
@@ -0,0 +1,11 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpschededf02
+
+directives:
+
+ TBD
+
+concepts:
+
+ TBD
diff --git a/testsuites/smptests/smpschededf02/smpschededf02.scn b/testsuites/smptests/smpschededf02/smpschededf02.scn
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/testsuites/smptests/smpschededf03/Makefile.am b/testsuites/smptests/smpschededf03/Makefile.am
new file mode 100644
index 0000000000..e3496f50aa
--- /dev/null
+++ b/testsuites/smptests/smpschededf03/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpschededf03
+smpschededf03_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpschededf03.scn smpschededf03.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP at .cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpschededf03_OBJECTS)
+LINK_LIBS = $(smpschededf03_LDLIBS)
+
+smpschededf03$(EXEEXT): $(smpschededf03_OBJECTS) $(smpschededf03_DEPENDENCIES)
+ @rm -f smpschededf03$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpschededf03/init.c b/testsuites/smptests/smpschededf03/init.c
new file mode 100644
index 0000000000..33029532c6
--- /dev/null
+++ b/testsuites/smptests/smpschededf03/init.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "tmacros.h"
+
+#include <rtems.h>
+
+const char rtems_test_name[] = "SMPSCHEDEDF 3";
+
+#define CPU_COUNT 32
+
+#define TASK_COUNT (3 * CPU_COUNT)
+
+typedef struct {
+ rtems_id task_ids[TASK_COUNT];
+} test_context;
+
+static test_context test_instance;
+
+static void wait_task(rtems_task_argument arg)
+{
+ (void) arg;
+
+ while (true) {
+ rtems_status_code sc;
+
+ sc = rtems_task_wake_after(1);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+}
+
+static uint32_t simple_random(uint32_t v)
+{
+ v *= 1664525;
+ v += 1013904223;
+ return v;
+}
+
+static void affinity_task(rtems_task_argument arg)
+{
+ uint32_t v;
+ uint32_t n;
+
+ v = (uint32_t) arg;
+ n = rtems_get_processor_count();
+
+ while (true) {
+ rtems_status_code sc;
+ cpu_set_t set;
+
+ CPU_ZERO(&set);
+ CPU_SET((v >> 13) % n, &set);
+ v = simple_random(v);
+
+ sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(set), &set);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+}
+
+static void create_and_start_task(
+ test_context *ctx,
+ rtems_task_entry entry,
+ size_t i,
+ size_t j
+)
+{
+ rtems_status_code sc;
+
+ j = j * CPU_COUNT + i;
+
+ sc = rtems_task_create(
+ rtems_build_name('E', 'D', 'F', ' '),
+ i + 2,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->task_ids[j]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(ctx->task_ids[j], entry, j);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void delete_task(
+ test_context *ctx,
+ size_t i,
+ size_t j
+)
+{
+ rtems_status_code sc;
+
+ j = j * CPU_COUNT + i;
+
+ sc = rtems_task_delete(ctx->task_ids[j]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test(test_context *ctx)
+{
+ rtems_status_code sc;
+ size_t i;
+
+ for (i = 0; i < CPU_COUNT; ++i) {
+ create_and_start_task(ctx, wait_task, i, 0);
+ create_and_start_task(ctx, affinity_task, i, 1);
+ create_and_start_task(ctx, affinity_task, i, 2);
+ }
+
+ sc = rtems_task_wake_after(10 * rtems_clock_get_ticks_per_second());
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (i = 0; i < CPU_COUNT; ++i) {
+ delete_task(ctx, i, 0);
+ delete_task(ctx, i, 1);
+ delete_task(ctx, i, 2);
+ }
+}
+
+static void Init(rtems_task_argument arg)
+{
+ TEST_BEGIN();
+ test(&test_instance);
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_MICROSECONDS_PER_TICK 1000
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_TASKS (1 + TASK_COUNT)
+
+#define CONFIGURE_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_SCHEDULER_EDF_SMP
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpschededf03/smpschededf03.doc b/testsuites/smptests/smpschededf03/smpschededf03.doc
new file mode 100644
index 0000000000..1d11c42b21
--- /dev/null
+++ b/testsuites/smptests/smpschededf03/smpschededf03.doc
@@ -0,0 +1,12 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpschededf03
+
+directives:
+
+ - EDF SMP scheduler operations.
+
+concepts:
+
+ - Randomized test case to show some stability of simple thread processor
+ affinity support of the EDF SMP scheduler.
diff --git a/testsuites/smptests/smpschededf03/smpschededf03.scn b/testsuites/smptests/smpschededf03/smpschededf03.scn
new file mode 100644
index 0000000000..1435f03920
--- /dev/null
+++ b/testsuites/smptests/smpschededf03/smpschededf03.scn
@@ -0,0 +1,2 @@
+*** BEGIN OF TEST SMPSCHEDEDF 3 ***
+*** END OF TEST SMPSCHEDEDF 3 ***
diff --git a/testsuites/smptests/smpscheduler07/init.c b/testsuites/smptests/smpscheduler07/init.c
index cbffe89012..bb065b3844 100644
--- a/testsuites/smptests/smpscheduler07/init.c
+++ b/testsuites/smptests/smpscheduler07/init.c
@@ -32,7 +32,7 @@ const char rtems_test_name[] = "SMPSCHEDULER 7";
#include <rtems/scheduler.h>
-RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a);
+RTEMS_SCHEDULER_CONTEXT_EDF_SMP(a, CONFIGURE_MAXIMUM_PROCESSORS);
#define CONFIGURE_SCHEDULER_CONTROLS \
RTEMS_SCHEDULER_CONTROL_EDF_SMP( a, rtems_build_name('T', 'E', 'S', 'T'))
--
2.12.3
More information about the devel
mailing list