[PATCH 4/6] score: Decouple thread and scheduler nodes on SMP
Sebastian Huber
sebastian.huber at embedded-brains.de
Fri Jun 13 16:36:14 UTC 2014
On 06/13/2014 05:50 PM, Gedare Bloom wrote:
> On Fri, Jun 13, 2014 at 10:37 AM, Sebastian Huber
> <sebastian.huber at embedded-brains.de> wrote:
>> Add a chain node to the scheduler node to decouple the thread and
>> scheduler nodes. It is now possible to enqueue a thread in a thread
>> wait queue and use its scheduler node at the same for other threads,
>> e.g. a resouce owner.
>> ---
>> cpukit/score/include/rtems/score/scheduler.h | 19 ++-
>> cpukit/score/include/rtems/score/schedulerimpl.h | 22 ++
>> .../include/rtems/score/schedulerprioritysmpimpl.h | 48 ++--
>> .../score/include/rtems/score/schedulersimplesmp.h | 8 +-
>> cpukit/score/include/rtems/score/schedulersmp.h | 5 +
>> .../score/include/rtems/score/schedulersmpimpl.h | 251 +++++++++++---------
>> cpukit/score/src/schedulerpriorityaffinitysmp.c | 175 +++++++-------
>> cpukit/score/src/schedulerprioritysmp.c | 43 ++--
>> cpukit/score/src/schedulersimplesmp.c | 98 +++++----
>> cpukit/score/src/schedulersmpstartidle.c | 2 +-
>> 10 files changed, 392 insertions(+), 279 deletions(-)
>>
>> diff --git a/cpukit/score/include/rtems/score/scheduler.h b/cpukit/score/include/rtems/score/scheduler.h
>> index 542e4ae..918c6df 100644
>> --- a/cpukit/score/include/rtems/score/scheduler.h
>> +++ b/cpukit/score/include/rtems/score/scheduler.h
>> @@ -165,7 +165,24 @@ struct Scheduler_Control {
>> * @brief Scheduler node for per-thread data.
>> */
>> struct Scheduler_Node {
>> - /* No fields yet */
>> +#if defined(RTEMS_SMP)
>> + /**
>> + * @brief Chain node for usage in various scheduler data structures.
>> + *
>> + * Strictly this is the wrong place for this field since the data structures
>> + * to manage scheduler nodes belong to the particular scheduler
>> + * implementation. Currently all SMP scheduler implementations use chains.
>> + * The node is here to simplify things, just like the object node in the
>> + * thread control block. It may be replaced with a union to add a red-black
>> + * tree node in the future.
>> + */
>> + Chain_Node Node;
>> +
>> + /**
>> + * @brief The thread owning this node.
>> + */
>> + Thread_Control *owner;
>> +#endif
>> };
>>
>> /**
>> diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
>> index 364c658..391a8d7 100644
>> --- a/cpukit/score/include/rtems/score/schedulerimpl.h
>> +++ b/cpukit/score/include/rtems/score/schedulerimpl.h
>> @@ -652,6 +652,28 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Node_get(
>> return the_thread->Scheduler.node;
>> }
>>
>> +RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
> I'd just use "Node_initialize".
This is already used for the corresponding scheduler operation. This
"do" is already present in similar cases.
>
>> diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
>> index d3e2106..8671035 100644
>> --- a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
>> +++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
>> @@ -26,6 +26,7 @@
>> #include <rtems/score/schedulerprioritysmp.h>
>> #include <rtems/score/schedulerpriorityimpl.h>
>> #include <rtems/score/schedulersimpleimpl.h>
>> +#include <rtems/score/schedulersmpimpl.h>
>>
>> #ifdef __cplusplus
>> extern "C" {
>> @@ -50,26 +51,25 @@ static inline Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
>> return (Scheduler_priority_SMP_Node *) _Scheduler_Node_get( thread );
>> }
>>
>> -static Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_downcast(
>> - Scheduler_Node *node
>> -)
>> +static inline Scheduler_priority_SMP_Node *
>> +_Scheduler_priority_SMP_Node_downcast( Scheduler_Node *node )
>> {
>> return (Scheduler_priority_SMP_Node *) node;
>> }
>>
> Is there a reason not to just cast it where it is used?
I think this makes it easier to grep for the uses cases.
>
> [...]
>
>> static inline void _Scheduler_priority_SMP_Insert_ready_lifo(
>> Scheduler_Context *context,
>> - Thread_Control *thread
>> + Scheduler_Node *thread
>> )
> It's no longer a thread, I'd change the parameter name. Repeated a few times.
Oh, yes.
>> diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
>> index f5ab8cf..f1dcacd 100644
>> --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
>> +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
> [...]
>> @@ -134,28 +137,34 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
>> * the highest ready thread must have affinity such that it can
>> * be executed on the victim's processor.
>> */
>> -static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
>> +static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
>> Scheduler_Context *context,
>> - Thread_Control *victim
>> + Scheduler_Node *victim
>> )
>> {
>> - Scheduler_priority_SMP_Context *self =
>> + Scheduler_priority_SMP_Context *self =
>> _Scheduler_priority_SMP_Get_self( context );
>> - Priority_Control index;
>> - Thread_Control *highest = NULL;
>> - int victim_cpu;
>> + Priority_Control index;
>> + Scheduler_Node *highest = NULL;
>> + Thread_Control *victim_thread;
>> + uint32_t victim_cpu_index;
>> + Scheduler_priority_affinity_SMP_Node *node;
>>
>> /*
>> * This is done when we need to check if reevaluations are needed.
>> */
>> if ( victim == NULL ) {
>> - return _Scheduler_priority_Ready_queue_first(
>> + node = (Scheduler_priority_affinity_SMP_Node *)
>> + _Scheduler_priority_Ready_queue_first(
>> &self->Bit_map,
>> &self->Ready[ 0 ]
>> );
>> +
>> + return &node->Base.Base.Base;
> This Base.Base.Base is quite awkward and not at all clear what it
> means. Shouldn't this be using a downcast() function? Repeated a few
> times.
>
> -Gedare
No, this Base.Base.Base basically is an upcast. In C++ this stuff would
be hidden the compiler for you, but we use C, so object oriented stuff
is awkward.
--
Sebastian Huber, embedded brains GmbH
Address : Dornierstr. 4, D-82178 Puchheim, Germany
Phone : +49 89 189 47 41-16
Fax : +49 89 189 47 41-09
E-Mail : sebastian.huber at embedded-brains.de
PGP : Public key available on request.
Diese Nachricht ist keine geschäftliche Mitteilung im Sinne des EHUG.
More information about the devel
mailing list