[PATCH 1/4] smp: Add and use _Per_CPU_Get()
Sebastian Huber
sebastian.huber at embedded-brains.de
Fri Jul 26 12:48:41 UTC 2013
Add and use _Per_CPU_Get_by_index() and _Per_CPU_Get_index(). This
avoids direct access to _Per_CPU_Information.
---
c/src/lib/libbsp/i386/shared/smp/smp-imps.c | 2 +-
c/src/lib/libbsp/powerpc/qoriq/startup/smp.c | 7 +++-
c/src/lib/libbsp/sparc/leon3/smp/smp_leon3.c | 10 +++---
cpukit/libmisc/cpuuse/cpuusagereset.c | 2 +-
cpukit/score/include/rtems/score/percpu.h | 33 +++++++++++++----
cpukit/score/src/percpu.c | 8 +++--
cpukit/score/src/schedulerdefaulttick.c | 6 ++--
cpukit/score/src/schedulersimplesmp.c | 7 +---
cpukit/score/src/smp.c | 51 ++++++++++++++------------
cpukit/score/src/threadcreateidle.c | 2 +-
testsuites/smptests/smpschedule01/init.c | 4 +-
11 files changed, 78 insertions(+), 54 deletions(-)
diff --git a/c/src/lib/libbsp/i386/shared/smp/smp-imps.c b/c/src/lib/libbsp/i386/shared/smp/smp-imps.c
index 806cfb9..a9afa11 100644
--- a/c/src/lib/libbsp/i386/shared/smp/smp-imps.c
+++ b/c/src/lib/libbsp/i386/shared/smp/smp-imps.c
@@ -268,7 +268,7 @@ boot_cpu(imps_processor *proc)
);
reset[1] = (uint32_t)secondary_cpu_initialize;
- reset[2] = (uint32_t)_Per_CPU_Information[apicid].interrupt_stack_high;
+ reset[2] = (uint32_t)_Per_CPU_Get_by_index(apicid)->interrupt_stack_high;
/*
* Generic CPU startup sequence starts here.
diff --git a/c/src/lib/libbsp/powerpc/qoriq/startup/smp.c b/c/src/lib/libbsp/powerpc/qoriq/startup/smp.c
index 30bcb7b..9caaa99 100644
--- a/c/src/lib/libbsp/powerpc/qoriq/startup/smp.c
+++ b/c/src/lib/libbsp/powerpc/qoriq/startup/smp.c
@@ -87,6 +87,7 @@ static void mmu_config_undo(void)
static void release_core_1(void)
{
+ const Per_CPU_Control *second_cpu = _Per_CPU_Get_by_index(1);
uboot_spin_table *spin_table = (uboot_spin_table *) SPIN_TABLE;
qoriq_mmu_context mmu_context;
@@ -96,7 +97,7 @@ static void release_core_1(void)
qoriq_mmu_write_to_tlb1(&mmu_context, TLB_BEGIN);
spin_table->pir = 1;
- spin_table->r3_lower = (uint32_t) _Per_CPU_Information[1].interrupt_stack_high;
+ spin_table->r3_lower = (uint32_t) second_cpu->interrupt_stack_high;
spin_table->addr_upper = 0;
rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table));
ppc_synchronize_data();
@@ -108,13 +109,15 @@ static void release_core_1(void)
void qoriq_secondary_cpu_initialize(void)
{
+ const Per_CPU_Control *second_cpu = _Per_CPU_Get_by_index(1);
+
/* Disable decrementer */
PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(BOOKE_TCR, BOOKE_TCR_DIE);
/* Initialize exception handler */
ppc_exc_initialize_with_vector_base(
PPC_INTERRUPT_DISABLE_MASK_DEFAULT,
- (uintptr_t) _Per_CPU_Information[1].interrupt_stack_low,
+ (uintptr_t) second_cpu->interrupt_stack_low,
rtems_configuration_get_interrupt_stack_size(),
bsp_exc_vector_base
);
diff --git a/c/src/lib/libbsp/sparc/leon3/smp/smp_leon3.c b/c/src/lib/libbsp/sparc/leon3/smp/smp_leon3.c
index 8862ca5..f7b70f3 100644
--- a/c/src/lib/libbsp/sparc/leon3/smp/smp_leon3.c
+++ b/c/src/lib/libbsp/sparc/leon3/smp/smp_leon3.c
@@ -86,12 +86,13 @@ uint32_t bsp_smp_initialize( uint32_t configured_cpu_count )
return 1;
for ( cpu=1 ; cpu < found_cpus ; cpu++ ) {
-
+ const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
+
#if defined(RTEMS_DEBUG)
printk( "Waking CPU %d\n", cpu );
#endif
- bsp_ap_stack = _Per_CPU_Information[cpu].interrupt_stack_high -
+ bsp_ap_stack = per_cpu->interrupt_stack_high -
CPU_MINIMUM_STACK_FRAME_SIZE;
bsp_ap_entry = leon3_secondary_cpu_initialize;
@@ -101,9 +102,8 @@ uint32_t bsp_smp_initialize( uint32_t configured_cpu_count )
printk(
"CPU %d is %s\n",
cpu,
- _Per_CPU_Information[cpu].state
- == PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ?
- "online" : "offline"
+ per_cpu->state == PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ?
+ "online" : "offline"
);
#endif
}
diff --git a/cpukit/libmisc/cpuuse/cpuusagereset.c b/cpukit/libmisc/cpuuse/cpuusagereset.c
index 9f682f3..b86350e 100644
--- a/cpukit/libmisc/cpuuse/cpuusagereset.c
+++ b/cpukit/libmisc/cpuuse/cpuusagereset.c
@@ -47,7 +47,7 @@ void rtems_cpu_usage_reset( void )
processor_count = rtems_smp_get_processor_count();
for ( processor = 0 ; processor < processor_count ; ++processor ) {
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ processor ];
+ Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
per_cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset;
}
diff --git a/cpukit/score/include/rtems/score/percpu.h b/cpukit/score/include/rtems/score/percpu.h
index d44c7a1..cf35657 100644
--- a/cpukit/score/include/rtems/score/percpu.h
+++ b/cpukit/score/include/rtems/score/percpu.h
@@ -233,6 +233,25 @@ typedef struct {
*/
extern Per_CPU_Control _Per_CPU_Information[] CPU_STRUCTURE_ALIGNMENT;
+#if defined( RTEMS_SMP )
+static inline Per_CPU_Control *_Per_CPU_Get( void )
+{
+ return &_Per_CPU_Information[ _SMP_Get_current_processor() ];
+}
+#else
+#define _Per_CPU_Get() ( &_Per_CPU_Information[ 0 ] )
+#endif
+
+static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
+{
+ return &_Per_CPU_Information[ index ];
+}
+
+static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *per_cpu )
+{
+ return ( uint32_t ) ( per_cpu - &_Per_CPU_Information[ 0 ] );
+}
+
#if defined(RTEMS_SMP)
/**
* @brief Set of Pointers to Per CPU Core Information
@@ -280,19 +299,19 @@ void _Per_CPU_Wait_for_state(
* Thus when built for non-SMP, there should be no performance penalty.
*/
#define _Thread_Heir \
- _Per_CPU_Information[_SMP_Get_current_processor()].heir
+ _Per_CPU_Get()->heir
#define _Thread_Executing \
- _Per_CPU_Information[_SMP_Get_current_processor()].executing
+ _Per_CPU_Get()->executing
#define _ISR_Nest_level \
- _Per_CPU_Information[_SMP_Get_current_processor()].isr_nest_level
+ _Per_CPU_Get()->isr_nest_level
#define _CPU_Interrupt_stack_low \
- _Per_CPU_Information[_SMP_Get_current_processor()].interrupt_stack_low
+ _Per_CPU_Get()->interrupt_stack_low
#define _CPU_Interrupt_stack_high \
- _Per_CPU_Information[_SMP_Get_current_processor()].interrupt_stack_high
+ _Per_CPU_Get()->interrupt_stack_high
#define _Thread_Dispatch_necessary \
- _Per_CPU_Information[_SMP_Get_current_processor()].dispatch_necessary
+ _Per_CPU_Get()->dispatch_necessary
#define _Thread_Time_of_last_context_switch \
- _Per_CPU_Information[_SMP_Get_current_processor()].time_of_last_context_switch
+ _Per_CPU_Get()->time_of_last_context_switch
#endif /* ASM */
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 74056a7..5e1a917 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -39,10 +39,10 @@
/*
* Initialize per cpu pointer table
*/
- _Per_CPU_Information_p[0] = &_Per_CPU_Information[0];
+ _Per_CPU_Information_p[0] = _Per_CPU_Get_by_index( 0 );
for ( cpu = 1 ; cpu < max_cpus; ++cpu ) {
- Per_CPU_Control *p = &_Per_CPU_Information[cpu];
+ Per_CPU_Control *p = _Per_CPU_Get_by_index( cpu );
_Per_CPU_Information_p[cpu] = p;
@@ -68,8 +68,10 @@
_SMP_Processor_count = max_cpus;
for ( cpu = 1 ; cpu < max_cpus; ++cpu ) {
+ const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
+
_Per_CPU_Wait_for_state(
- &_Per_CPU_Information[ cpu ],
+ per_cpu,
PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING
);
}
diff --git a/cpukit/score/src/schedulerdefaulttick.c b/cpukit/score/src/schedulerdefaulttick.c
index a8a9f32..b088289 100644
--- a/cpukit/score/src/schedulerdefaulttick.c
+++ b/cpukit/score/src/schedulerdefaulttick.c
@@ -85,8 +85,8 @@ void _Scheduler_default_Tick( void )
uint32_t processor;
for ( processor = 0 ; processor < processor_count ; ++processor ) {
- _Scheduler_default_Tick_for_executing(
- _Per_CPU_Information[ processor ].executing
- );
+ const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
+
+ _Scheduler_default_Tick_for_executing( per_cpu->executing );
}
}
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 0cc74c3..20965ff 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -58,17 +58,14 @@ static void _Scheduler_simple_smp_Allocate_processor(
}
if ( heir != victim ) {
- Per_CPU_Control *cpu_of_executing =
- &_Per_CPU_Information[ _SMP_Get_current_processor() ];
+ Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
heir->cpu = cpu_of_victim;
cpu_of_victim->heir = heir;
cpu_of_victim->dispatch_necessary = true;
if ( cpu_of_victim != cpu_of_executing ) {
- _CPU_SMP_Send_interrupt(
- cpu_of_victim - &_Per_CPU_Information[ 0 ]
- );
+ _CPU_SMP_Send_interrupt( _Per_CPU_Get_index( cpu_of_victim ) );
}
}
}
diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c
index 42be949..6c580a4 100644
--- a/cpukit/score/src/smp.c
+++ b/cpukit/score/src/smp.c
@@ -30,19 +30,18 @@
void rtems_smp_secondary_cpu_initialize( void )
{
- uint32_t self = _SMP_Get_current_processor();
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ self ];
+ Per_CPU_Control *self_cpu = _Per_CPU_Get();
Thread_Control *heir;
#if defined(RTEMS_DEBUG)
- printk( "Made it to %d -- ", self );
+ printk( "Made it to %d -- ", _Per_CPU_Get_index( self_cpu ) );
#endif
- _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING );
+ _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING );
- _Per_CPU_Wait_for_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
+ _Per_CPU_Wait_for_state( self_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
- _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_UP );
+ _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_UP );
/*
* The Scheduler will have selected the heir thread for each CPU core.
@@ -50,11 +49,11 @@ void rtems_smp_secondary_cpu_initialize( void )
* force a switch to the designated heir and make it executing on
* THIS core.
*/
- heir = per_cpu->heir;
+ heir = self_cpu->heir;
heir->is_executing = true;
- per_cpu->executing->is_executing = false;
- per_cpu->executing = heir;
- per_cpu->dispatch_necessary = false;
+ self_cpu->executing->is_executing = false;
+ self_cpu->executing = heir;
+ self_cpu->dispatch_necessary = false;
/*
* Threads begin execution in the _Thread_Handler() function. This function
@@ -67,24 +66,28 @@ void rtems_smp_secondary_cpu_initialize( void )
void rtems_smp_process_interrupt( void )
{
- uint32_t self = _SMP_Get_current_processor();
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ self ];
+ Per_CPU_Control *self_cpu = _Per_CPU_Get();
- if ( per_cpu->message != 0 ) {
+ if ( self_cpu->message != 0 ) {
uint32_t message;
ISR_Level level;
- _Per_CPU_Lock_acquire( per_cpu, level );
- message = per_cpu->message;
- per_cpu->message = 0;
- _Per_CPU_Lock_release( per_cpu, level );
+ _Per_CPU_Lock_acquire( self_cpu, level );
+ message = self_cpu->message;
+ self_cpu->message = 0;
+ _Per_CPU_Lock_release( self_cpu, level );
#if defined(RTEMS_DEBUG)
{
void *sp = __builtin_frame_address(0);
if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) {
- printk( "ISR on CPU %d -- (0x%02x) (0x%p)\n", self, message, sp );
+ printk(
+ "ISR on CPU %d -- (0x%02x) (0x%p)\n",
+ _Per_CPU_Get_index( self_cpu ),
+ message,
+ sp
+ );
if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF )
printk( "signal to self\n" );
if ( message & RTEMS_BSP_SMP_SHUTDOWN )
@@ -99,9 +102,9 @@ void rtems_smp_process_interrupt( void )
_Thread_Dispatch_set_disable_level( 0 );
- _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_SHUTDOWN );
+ _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_SHUTDOWN );
- _CPU_Fatal_halt( self );
+ _CPU_Fatal_halt( _Per_CPU_Get_index( self_cpu ) );
/* does not continue past here */
}
}
@@ -109,7 +112,7 @@ void rtems_smp_process_interrupt( void )
void _SMP_Send_message( uint32_t cpu, uint32_t message )
{
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
+ Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
ISR_Level level;
#if defined(RTEMS_DEBUG)
@@ -132,7 +135,7 @@ void _SMP_Broadcast_message( uint32_t message )
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
if ( cpu != self ) {
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
+ Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
ISR_Level level;
_Per_CPU_Lock_acquire( per_cpu, level );
@@ -151,7 +154,7 @@ void _SMP_Request_other_cores_to_perform_first_context_switch( void )
uint32_t cpu;
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ cpu ];
+ Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
if ( cpu != self ) {
_Per_CPU_Change_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING );
@@ -173,7 +176,7 @@ void _SMP_Request_other_cores_to_shutdown( void )
for ( cpu = 0 ; cpu < ncpus ; ++cpu ) {
if ( cpu != self ) {
_Per_CPU_Wait_for_state(
- &_Per_CPU_Information[ cpu ],
+ _Per_CPU_Get_by_index( cpu ),
PER_CPU_STATE_SHUTDOWN
);
}
diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c
index 181978b..900dab7 100644
--- a/cpukit/score/src/threadcreateidle.c
+++ b/cpukit/score/src/threadcreateidle.c
@@ -73,7 +73,7 @@ void _Thread_Create_idle( void )
uint32_t processor;
for ( processor = 0 ; processor < processor_count ; ++processor ) {
- Per_CPU_Control *per_cpu = &_Per_CPU_Information[ processor ];
+ Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor );
_Thread_Create_idle_for_cpu( per_cpu );
}
diff --git a/testsuites/smptests/smpschedule01/init.c b/testsuites/smptests/smpschedule01/init.c
index 58ff3eb..d1c171b 100644
--- a/testsuites/smptests/smpschedule01/init.c
+++ b/testsuites/smptests/smpschedule01/init.c
@@ -71,12 +71,12 @@ static bool is_per_cpu_state_ok(void)
uint32_t i;
for (i = 0; i < n; ++i) {
- const Thread_Control *thread = _Per_CPU_Information[i].executing;
+ const Thread_Control *thread = _Per_CPU_Get_by_index(i)->executing;
uint32_t count = 0;
uint32_t j;
for (j = 0; j < n; ++j) {
- const Per_CPU_Control *cpu = &_Per_CPU_Information[j];
+ const Per_CPU_Control *cpu = _Per_CPU_Get_by_index(j);
const Thread_Control *executing = cpu->executing;
const Thread_Control *heir = cpu->heir;
--
1.7.7
More information about the devel
mailing list