[PATCH v2] Pre Release: Strong APA

Richi Dubey richidubey at gmail.com
Wed Aug 26 19:37:34 UTC 2020


>
> > @@ -257,16 +257,14 @@
>> >    #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \
>> >      static struct { \
>> >        Scheduler_strong_APA_Context Base; \
>> > -      Chain_Control                Ready[ ( prio_count ) ]; \
>> > +      Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS
>> ]; \
>>
>> I don't like this name at all either the type or the variable
>> "Struct". Just like I wouldn't call a variable
>>    int Int;
>
> Done. Changed to Scheduler_strong_APA_CPU CPU, since the structure stores
all the important information corresponding to a CPU.

> + *
>> > + * The license and distribution terms for this file may be
>> > + * found in the file LICENSE in this distribution or at
>> >   * http://www.rtems.org/license/LICENSE.
>> relicense 2-bsd -- EB allows it, and your new code should be put under it
>>
> Done.

> > +#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
>> > +  RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain )
>> somehow this is not using 'node' parameter?
>>
> Silly mistake. Changed.

> - * This is an implementation of the global fixed priority scheduler
>> (G-FP).  It
>> > - * uses one ready chain per priority to ensure constant time insert
>> operations.
>> > - * The scheduled chain uses linear insert operations and has at most
>> processor
>> > - * count entries.  Since the processor and priority count are
>> constants all
>> > - * scheduler operations complete in a bounded execution time.
>> > + * This is an implementation of the Strong APA scheduler defined by
>> > + * Cerqueira et al. in Linux's Processor Affinity API, Refined:
>> > + * Shifting Real-Time Tasks Towards Higher Schedulability.
>> >   *
>> > - * The the_thread preempt mode will be ignored.
>> > + * This is an implementation of the Strong APA scheduler defined by
>> > + * Cerqueira et al. in Linux's Processor Affinity API, Refined:
>> > + * Shifting Real-Time Tasks Towards Higher Schedulability.
>> repeating text?
>>
>> You should add a bit more comment about the high-level design here. Of
>> course anyone wanting more details can go to the paper.
>>
> Got it. Added a little description in the new patch.


> >   *
>> >   * @{
>> >   */
>> >
>> >  /**
>> > - * @brief Scheduler context specialization for Strong APA
>> > - * schedulers.
>> > - */
>> > -typedef struct {
>> > -  Scheduler_SMP_Context    Base;
>> > -  Priority_bit_map_Control Bit_map;
>> > -  Chain_Control            Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
>> > -} Scheduler_strong_APA_Context;
>> > -
>> > -/**
>> > - * @brief Scheduler node specialization for Strong APA
>> > - * schedulers.
>> > + * @brief Scheduler node specialization for Strong APA schedulers.
>> >   */
>> >  typedef struct {
>> >    /**
>> >     * @brief SMP scheduler node.
>> >     */
>> >    Scheduler_SMP_Node Base;
>> > +
>> > + /**
>> > +   * @brief Chain node for Scheduler_strong_APA_Context::All_nodes
>> > +   */
>> > +  Chain_Node Chain;
>>
>> Don't call this Chain. We refer to a Chain_Control object as the
>> chain. Some other ideas:
>> * APA_Node
>> * All_nodes_Node
>> * All_nodes_Link
>> * Link
>> * Node (that could be confusing)
>> * Link_Node
>>
> Renamed to Ready_node. This sounds so better. Thanks




> I always felt like Joel missed an opportunity to call them Chain Links
>> instead of Chain Nodes. Then compiler memory fences for synchronizing
>> them could be Chain Link Fences? (A bit of dad humor.)
>>
>> :p


> > +
>> > +  /**
>> > +   * @brief CPU that this node would preempt in the backtracking part
>> of
>> > +   * _Scheduler_strong_APA_Get_highest_ready and
>> > +   * _Scheduler_strong_APA_Do_Enqueue.
>> > +   */
>> > +  Per_CPU_Control *invoker;
>>
>> I don't like this name either. What is the invoker invoking? Since it
>> is used to go backwards, maybe think of a word that conveys that use.
>> 'lowest_scheduled'? 'next_to_preempt'?
>>
>> Changed to cpu_to_preempt, since a node has to preempt the CPU that it is
responsible for inserting into the queue while backtracking.

> >
>> >    /**
>> > -   * @brief The associated ready queue of this node.
>> > +   * @brief The associated affinity set of this node.
>> >     */
>> > -  Scheduler_priority_Ready_queue Ready_queue;
>> > +  Processor_mask Affinity;
>> >  } Scheduler_strong_APA_Node;
>> >
>> > +
>> > +/**
>> > + * @brief Struct for each index of the variable size arrays
>> clarify this comment, no one knows what this means.
>> Add a period (full stop) after brief sentence.
>>
>> Period added for all subsequent brief doxygen comment. Thanks for
pointing it out.


> > +   * its affinity set.
>> > +   */
>> > +  Scheduler_Node *caller;
>> Not sure what it called. Think about what this name means a bit more.
>>
>
Changed to:
  /**
   * @brief The node that would preempt this CPU.
   */
  Scheduler_Node *preempting_node;


> > +
>> > +    /**
>> > +   * @brief Cpu at the index of Scheduler_strong_APA_Context::Struct
>> Isn't this self-referencing? Just say "CPU in a queue"?
>>
>> Got it,
Changed to :
   /**
   * @brief CPU in a queue.
   */
  Per_CPU_Control *cpu;


> > +   * in Queue implementation.
>> > +   */
>> > +  Per_CPU_Control *cpu;
>> > +
>> > +    /**
>> > +   * @brief Indicates if the CPU at the index of
>> > +   * Scheduler_strong_APA_Context::Struct is already
>> > +   * added to the Queue or not.
>> Whether or not this cpu has been added to the queue (visited in the BFS)
>>
>> Done. Thanks.

> > +   */
>> > +  bool visited;
>> > +} Scheduler_strong_APA_Struct;
>>
>> Maybe, Scheduler_strong_APA_BFS_node;
>>
>> I changed it to Scheduler_strong_APA_CPU since for the reasons I
mentioned above.

> -  Scheduler_strong_APA_Context *self =
>> > -    _Scheduler_strong_APA_Get_self( context );
>> > -  Scheduler_strong_APA_Node *node =
>> > -    _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );
>> > -
>> > -  _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
>> > -  _Scheduler_priority_Ready_queue_enqueue_first(
>> > -    &node->Base.Base.Node.Chain,
>> > -    &node->Ready_queue,
>> > -    &self->Bit_map
>> > +  Scheduler_SMP_Node *smp_node;
>> > +  (void) context;
>> > +
>> > +  smp_node = _Scheduler_SMP_Node_downcast( node );
>> > +  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
>> > +}
>> > +
>>
>> Although we don't need doxygen for static (private) methods, a little
>> bit of comment can be helpful to understand what the helper function
>> does.
>>
> Added comment.


> > +static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context
>> *context )
>> > +{
>> > +  Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self(
>> context );
>> > +
>> > +  bool                       ret;
>> > +  const Chain_Node          *tail;
>> > +  Chain_Node                *next;
>> > +  Scheduler_strong_APA_Node *node;
>> > +
>> > +  tail = _Chain_Immutable_tail( &self->All_nodes );
>> > +  next = _Chain_First( &self->All_nodes );
>> > +
>> > +  ret = false;
>> > +
>> > +  while ( next != tail ) {
>> > +    node = (Scheduler_strong_APA_Node *)
>> STRONG_SCHEDULER_NODE_OF_CHAIN( next );
>> I see, this only works by chance. fix your macro so it works on purpose.
>>
> > +
>> > +    if (
>> > +    _Scheduler_SMP_Node_state( &node->Base.Base ) ==
>> SCHEDULER_SMP_NODE_READY
>> not enough indent levels
>> To break this you should use
>>   if (
>>       _Scheduler_SMP_Node_state( &node->Base.Base ) ==
>>           SCHEDULER_SMP_NODE_READY
>>   ) {
>> I know it is kind of ugly.
>>
>> > +    ) {
>> > +      ret = true;
>> > +      break;
>> this is fine, but it could also be 'return true;' and then ...
>> > +    }
>> > +
>> > +    next = _Chain_Next( next );
>> > +  }
>> > +
>> > +  return ret;
>> return false;
>>
>> Minor nit, but it does simplify your code
>>
>> Yes, indeed it does. Changed.

> > +  scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
>> > +
>> > +  _Scheduler_SMP_Allocate_processor_exact(
>> > +    context,
>> > +    &(scheduled->Base.Base),
>> > +    NULL,
>> > +    victim_cpu
>> >    );
>> >  }
>> >
>> > -static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
>> > +static inline Scheduler_Node *
>> _Scheduler_strong_APA_Find_highest_ready(
>> > +  Scheduler_strong_APA_Context *self,
>> > +  uint32_t                      front,
>> > +  uint32_t                      rear
>> > +)
>> > +{
>> > +  Scheduler_Node              *highest_ready;
>> > +  Scheduler_strong_APA_Struct *Struct;
>> > +  const Chain_Node            *tail;
>> > +  Chain_Node                  *next;
>> > +  uint32_t                     index_assigned_cpu;
>> > +  Scheduler_strong_APA_Node   *node;
>> > +  Priority_Control             min_priority_num;
>> > +  Priority_Control             curr_priority;
>> > +  Per_CPU_Control             *assigned_cpu;
>> > +  Scheduler_SMP_Node_state     curr_state;
>> > +  Per_CPU_Control             *curr_CPU;
>> > +  bool                         first_task;
>> > +
>> > +  Struct = self->Struct;
>> > +   //When the first task accessed has nothing to compare its priority
>> against
>> > +  // So, it is the task with the highest priority witnessed so far!
>> Use /* */
>>
>> > +  first_task = true;
>> > +
>> > +  while( front <= rear ) {
>> > +    curr_CPU = Struct[ front ].cpu;
>>
>> Who ensures that rear < sizeof(Struct)?
>>
>> I added an assert. But it should always be the case since we can only
insert an element (CPU) if it is not visited before, and there can't be
more than CONFIGURE_MAXIMUM_PROCESSOR unvisited CPUs.


> > +    front = front + 1;
>> > +
>> > +    tail = _Chain_Immutable_tail( &self->All_nodes );
>> > +    next = _Chain_First( &self->All_nodes );
>> > +
>> > +    while ( next != tail ) {
>> > +      node = (Scheduler_strong_APA_Node*)
>> STRONG_SCHEDULER_NODE_OF_CHAIN( next );
>> > +      //Check if the curr_CPU is in the affinity set of the node
>> > +      if (
>> > +        _Processor_mask_Is_set(&node->Affinity,
>> _Per_CPU_Get_index(curr_CPU))
>> > +      ) {
>> extra ws at end of line
>> you can search a regex like "\s\s*$" for that kind of problem
>>
>> Thanks for telling me this. It is very handy.


> > +        curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
>> > +
>> > +        if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
>> > +          assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
>> > +          index_assigned_cpu =  _Per_CPU_Get_index( assigned_cpu );
>> > +
>> > +          if ( Struct[ index_assigned_cpu ].visited == false ) {
>> > +            rear = rear + 1;
>> > +            Struct[ rear ].cpu = assigned_cpu;
>> > +            Struct[ index_assigned_cpu ].visited = true;
>> OK this is a little bit confusing. You are using the same 'Struct[]'
>> entry to store metadata for two different CPUs. Somehow you need to
>> either clarify this, or make it consistent.
>>
> Yes, I noticed it now :p.

So, The Struct (_Scheduler_Strong_APA_CPU now) stores two things, first is
the important variables related to the CPU (at the index) (like visited,
preempting_node) but currently it also stores a Per_CPU_Control cpu to act
as a queue. Both the parts are independent of each other because they use
their indices in the way they want to. This means that front index and rear
indexes are used for accessing queue elements,

So if front = 2,
CPU[2]. cpu = cpu at front of queue. This could be a cpu with index 5 (say
in a 32 processor system)

but CPU[2]. visited and cpu[2].preempting_node would indicate whether cpu
with index 2 is visited or not and point to its preempting node
respectively.

Is it fine if we keep it like this?

>
>> > +            // The curr CPU of the queue invoked this node to add its
>> CPU
>> > +            // that it is executing on to the queue. So this node
>> might get
>> > +            // preempted because of the invoker curr_CPU and this
>> curr_CPU
>> > +            // is the CPU that node should preempt in case this node
>> > +            // gets preempted.
>> > +            node->invoker = curr_CPU;
>> > +          }
>> > +        }
>> > +        else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
>> > +          curr_priority = _Scheduler_Node_get_priority(
>> &node->Base.Base );
>> > +          curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
>> > +
>> > +          if ( first_task == true || curr_priority < min_priority_num
>> ) {
>>
>> you can also initialize min_priority_num to  the scheduler's maximum
>> priority value? Then get rid of this first_task var?
>>
>>
While debugging with gdb, I noticed multiple times that the lowest priority
ready node (default idle nodes populated at start) have priority of 512
(proof attached as gdb trace at end). I do not know why this is happening.
Since I used the STRONG_APA_MAXIMUM_PRIORITY in scheduler.h while defining
the scheduler table, shouldn't this be taken care of?


> > +            min_priority_num = curr_priority;
>> > +           highest_ready = &node->Base.Base;
>> > +           first_task = false;
>> > +           //In case this task is directly reachable from thread_CPU
>> I don't know what this comment means.
>>
> Changed. It is hard to explain this, as I came across the need to do this
( node->invoker = curr_CPU;) myself while debugging.

I've added the new comment as:
----------------------------------------------------------------------------------------------------
 /*
      * In case curr_CPU is filter_CPU, we need to store the
      * cpu_to_preempt value so that we go back to SMP_*
      * function, rather than preempting the node ourselves.
      */
     node->cpu_to_preempt = curr_CPU;
----------------------------------------------------------------------------------------------------

this is referring to the following line in calling function
_Get_highest_ready:
----------------------------------------------------------------------------------------------------
    /*
     * Highest ready is not just directly reachable from the victim cpu
     * So there is need of task shifting .
     */
    while( node->cpu_to_preempt !=  filter_cpu ){
----------------------------------------------------------------------------------------------------

where we check if we need to backtrack or not.
My comment was trying to explain that for the case the highest ready task
is directly accessible from filter_cpu, i.e. the highest ready task has
filter_cpu in its affinity set, we need to mark its cpu_to_preempt right
then and there so that we don't get a data_exception_error.

So, there is two cases of preemption that can happen:

1) a scheduled node preempts another cpu to make space for another node (in
the backtracking process):  in which case, the same line
( node->cpu_to_preempt = curr_CPU;) is written in the state==...SCHEDULED
case above,

2) the ready node is directly accessible, so the cpu_to_preempt has to be
marked right there.

Is it clear now? Please let me know.


> +  cpu_max = _SMP_Get_processor_maximum();
>>
>> I think there is a valid scheduler at cpu_max?
>>
>> When I was debugging, cpu_max was equal to 3 (corresponding to the test
case I submitted as patch). So this has to be correct.

> > +
>> > +  for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
>> So this should be <= not <
>> Maybe?
>>
>
While checking with scope, I found this:
------------------------------------------------------------
 /*
   * Discover and initialize the secondary cores in an SMP system.
   */

  cpu_max = _CPU_SMP_Initialize();
  cpu_max = cpu_max < cpu_config_max ? cpu_max : cpu_config_max;
  _SMP_Processor_maximum = cpu_max;
------------------------------------------------------------

So, the cpu_max stores the actually number of cpus, and since we use 0
index for per_cpus we use < and not <=.


> > +    Struct[ cpu_index ].visited = false;
>> > +  }
>> > +
>> > +  rear = rear + 1;
>> why not just init rear to 0?
>>
> Okay.


> > +  Struct[ rear ].cpu = filter_cpu;
>> > +  Struct[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
>> > +
>> > +  highest_ready = _Scheduler_strong_APA_Find_highest_ready(
>> > +                    self,
>> > +                    front,
>> > +                    rear
>> > +                  );
>> wrong indents
>>
>
It looks fine in my code editor (on github
<https://github.com/richidubey/rtems/blob/4bbf5d9abe1e16bd942b9806e0be7efcd0cbfe7f/cpukit/score/src/schedulerstrongapa.c#L279>too),
but it looks like this on patch and when I try pasting it here too. Why
does this happen?


> > +        _Scheduler_SMP_Preempt(
>> > +          context,
>> > +          curr_node,
>> > +          _Thread_Scheduler_get_home_node( node->invoker->heir ),
>> > +          _Scheduler_strong_APA_Allocate_processor
>> > +        );
>> > +
>> > +        node = _Scheduler_strong_APA_Node_downcast( next_node );
>> > +      }
>> This is repetitive code, can you merge the first part of the 'if'
>> block into the while loop?
>>
> Changed. Thanks


> > +        if ( curr_priority > max_priority_num ) {
>> > +          lowest_scheduled = curr_node;
>> > +          max_priority_num = curr_priority;
>> > +        }
>> > +      }
>> > +    }
>> > +  }
>> > +
>> > +  return lowest_scheduled;
>>
>> Is it possible this is NULL? what happens if it is?
>>
>> I've now added an assert to check. But the earlier assert
<https://github.com/richidubey/rtems/blob/4bbf5d9abe1e16bd942b9806e0be7efcd0cbfe7f/cpukit/score/src/schedulerstrongapa.c#L347>
should
make sure it should not be NULL since if the affinity is not 0, the lowest
scheduled would be the node with the minimum priority among all the
processors in its affinity.

>
>> > +  _Assert( !_Chain_Is_empty(self->All_nodes) );
>> > +  _Assert( !_Chain_Is_node_off_chain( &node->Chain ) );
>> > +
>> > +   _Chain_Extract_unprotected( &node->Chain ); //Removed from All_nodes
>>
>> All_nodes name is now confusing me. I thought  maybe it meant blocked
>> nodes too, but I understand now.
>>
>> You can call the All_nodes chain 'Ready'. It should be OK to leave
>> Executing tasks on a Ready structure. That is how the uniprocessor
>> schedulers work as I recall.
>>
>> Ready sounds great. Changed. Thanks


> > +
>> > +    if ( !curr_thread->is_idle ) {
>> > +      for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
>> <= ?
>>
>> I believe it should be <. Cause the test passes and I found no problems
while debugging as well.


> > +        if ( _Processor_mask_Is_set( &curr_strong_node->Affinity,
>> cpu_index ) ) {
>> > +          //Checks if the thread_CPU is in the affinity set of the node
>> > +          Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
>> > +          if ( _Per_CPU_Is_processor_online( cpu ) && Struct[
>> cpu_index ].visited == false ) {
>> > +            rear = rear + 1;
>> > +            Struct[ rear ].cpu = cpu;
>> > +            Struct[ cpu_index ].visited = true;
>> > +            Struct[ cpu_index ].caller = curr_node;
>>
>> maybe instead of caller it should be 'ancestor' or something similar
>> to denote the ordering.
>>
>> Yes, now it is preempting_node. Thanks.


> > +  node = _Scheduler_strong_APA_Node_downcast( node_base );
>> > +  affinity = arg;
>> > +  node->Affinity = *affinity;
>> can simplify to:
>>    node->Affinity = (const Processor_mask *)arg;
>>
>> Changed.

> >      _Scheduler_strong_APA_Enqueue_scheduled,
>> > -    _Scheduler_SMP_Do_nothing_register_idle
>> > +    _Scheduler_strong_APA_Register_idle
>> why not use the Do_nothing?
>>
>> Changed.

Once again, Thanks a lot for your reviews. A lot of code got improved which
wouldn't have been possible without your review.

-----------------------------------------End of my
comments--------------------------------------------------

gdb trace:

--------------------------------------------------------------
richi at YouAreAmazing:~/quick-start/rtems/5/bin$ ./arm-rtems5-gdb
--command=arm.gdb
~/quick-start/build/b3-realview/arm-rtems5/c/realview_pbx_a9_qemu/testsuites/smptests/smpstrongapa01.exe
...
Loading section .rtemsroset, size 0x74 lma 0x121384
Loading section .data, size 0x530 lma 0x200000
Start address 0x00100040, load size 137476
Transfer rate: 12204 KB/sec, 1808 bytes/write.
(gdb) continue
Continuing.

Thread 1 hit Breakpoint 6, Init (arg=2113828) at
/home/richi/quick-start/src/rtems/c/src/../../testsuites/smptests/smpstrongapa01/init.c:328
328  TEST_BEGIN();
(gdb)
Continuing.

Thread 1 hit Breakpoint 5, _Terminate (the_source=RTEMS_FATAL_SOURCE_EXIT,
the_error=0) at
/home/richi/quick-start/src/rtems/c/src/../../cpukit/score/src/interr.c:36
36  _User_extensions_Fatal( the_source, the_error );
(gdb)
Continuing.

Thread 1 hit Breakpoint 4, bsp_reset () at
/home/richi/quick-start/src/rtems/c/src/lib/libbsp/arm/realview-pbx-a9/../../../../../../bsps/arm/realview-pbx-a9/start/bspreset.c:19
19  volatile uint32_t *sys_lock = (volatile uint32_t *) 0x10000020;
(gdb)
Continuing.

Thread 1 hit Breakpoint 6, Init (arg=2113828) at
/home/richi/quick-start/src/rtems/c/src/../../testsuites/smptests/smpstrongapa01/init.c:328
328  TEST_BEGIN();
(gdb) b test
Breakpoint 7 at 0x10109c: file
/home/richi/quick-start/src/rtems/c/src/../../testsuites/smptests/smpstrongapa01/init.c,
line 286.
(gdb) continue
Continuing.

Thread 1 hit Breakpoint 7, test () at
/home/richi/quick-start/src/rtems/c/src/../../testsuites/smptests/smpstrongapa01/init.c:286
286  ctx = &test_instance;
(gdb) b _Scheduler_strong_APA_Find_highest_ready
Breakpoint 8 at 0x118922: file
/home/richi/quick-start/src/rtems/c/src/../../cpukit/score/src/schedulerstrongapa.c,
line 153.
(gdb) continue
Continuing.

Thread 1 hit Breakpoint 8, _Scheduler_strong_APA_Find_highest_ready
(self=0x200620 <_Configuration_Scheduler_strong_APA_dflt>, front=0, rear=0)
at
/home/richi/quick-start/src/rtems/c/src/../../cpukit/score/src/schedulerstrongapa.c:153
153  CPU = self->CPU;
(gdb) ni
0x00118924 153  CPU = self->CPU;
(gdb)
0x00118926 153  CPU = self->CPU;
(gdb)
158  first_task = true;
(gdb)
0x0011892a 158  first_task = true;
(gdb)
163  while( front <= rear ) {
(gdb)
163  while( front <= rear ) {
(gdb)
0x00118a6e 163  while( front <= rear ) {
(gdb)
0x00118a70 163  while( front <= rear ) {
(gdb)
0x00118a72 163  while( front <= rear ) {
(gdb)
164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118932 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118934 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118936 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118938 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x0011893a 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x0011893c 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x0011893e 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118940 164    curr_CPU = CPU[ front ].cpu;
(gdb)
0x00118942 164    curr_CPU = CPU[ front ].cpu;
(gdb)
165    front = front + 1;
(gdb)
0x00118946 165    front = front + 1;
(gdb)
0x00118948 165    front = front + 1;
(gdb)
167    tail = _Chain_Immutable_tail( &self->Ready );
(gdb)
0x0011894c 167    tail = _Chain_Immutable_tail( &self->Ready );
(gdb)
0x0011894e 167    tail = _Chain_Immutable_tail( &self->Ready );
(gdb)
0x00118950 167    tail = _Chain_Immutable_tail( &self->Ready );
(gdb)
0x00118954 167    tail = _Chain_Immutable_tail( &self->Ready );
(gdb)
168    next = _Chain_First( &self->Ready );
(gdb)
0x00118958 168    next = _Chain_First( &self->Ready );
(gdb)
0x0011895a 168    next = _Chain_First( &self->Ready );
(gdb)
0x0011895c 168    next = _Chain_First( &self->Ready );
(gdb)
0x00118960 168    next = _Chain_First( &self->Ready );
(gdb)
170    while ( next != tail ) {
(gdb)
170    while ( next != tail ) {
(gdb)
0x00118a64 170    while ( next != tail ) {
(gdb)
0x00118a66 170    while ( next != tail ) {
(gdb)
0x00118a68 170    while ( next != tail ) {
(gdb)
171      node = (Scheduler_strong_APA_Node*)
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb)
0x00118966 171      node = (Scheduler_strong_APA_Node*)
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb)
0x00118968 171      node = (Scheduler_strong_APA_Node*)
STRONG_SCHEDULER_NODE_OF_CHAIN( next );
(gdb)
174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x0011896c 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x00118970 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x00118972 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x00118976 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x00118978 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x0011897a 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x0011897c 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
0x00118980 174        _Processor_mask_Is_set(&node->Affinity,
_Per_CPU_Get_index(curr_CPU))
(gdb)
173      if (
(gdb)
0x00118984 173      if (
(gdb)
176        curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
(gdb)
0x00118988 176        curr_state = _Scheduler_SMP_Node_state(
&node->Base.Base );
(gdb)
0x0011898a 176        curr_state = _Scheduler_SMP_Node_state(
&node->Base.Base );
(gdb)
0x0011898e 176        curr_state = _Scheduler_SMP_Node_state(
&node->Base.Base );
(gdb)
178        if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
(gdb)
0x00118992 178        if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
(gdb)
0x00118994 178        if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
(gdb)
196        else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
(gdb)
0x00118a00 196        else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
(gdb)
0x00118a02 196        else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
(gdb)
197          curr_priority = _Scheduler_Node_get_priority( &node->Base.Base
);
(gdb)
0x00118a06 197          curr_priority = _Scheduler_Node_get_priority(
&node->Base.Base );
(gdb)
0x00118a08 197          curr_priority = _Scheduler_Node_get_priority(
&node->Base.Base );
(gdb)
0x00118a0c 197          curr_priority = _Scheduler_Node_get_priority(
&node->Base.Base );
(gdb)
198          curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
(gdb)
0x00118a14 198          curr_priority = SCHEDULER_PRIORITY_PURIFY(
curr_priority );
(gdb)
0x00118a18 198          curr_priority = SCHEDULER_PRIORITY_PURIFY(
curr_priority );
(gdb)
0x00118a1c 198          curr_priority = SCHEDULER_PRIORITY_PURIFY(
curr_priority );
(gdb)
0x00118a20 198          curr_priority = SCHEDULER_PRIORITY_PURIFY(
curr_priority );
(gdb)
0x00118a24 198          curr_priority = SCHEDULER_PRIORITY_PURIFY(
curr_priority );
(gdb)
200          if ( first_task == true || curr_priority < min_priority_num ) {
(gdb) p curr_priority
$1 = 510
(gdb) p first_task
$2 = true
(gdb)
----------------------------------------------------------------------------------------------------------------------------
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.rtems.org/pipermail/devel/attachments/20200827/a609bcf6/attachment-0001.html>


More information about the devel mailing list