[PATCH v2] score: Refactor SMP cache manager support

Sebastian Huber sebastian.huber at embedded-brains.de
Fri Apr 17 11:10:41 UTC 2015


v2: Update missing instruction routines.  Test smpcache01 passes on NGMP.

---
 c/src/lib/libcpu/shared/src/cache_manager.c | 224 +++++++---------------------
 cpukit/score/Makefile.am                    |   1 +
 cpukit/score/include/rtems/score/smpimpl.h  |  33 +++-
 cpukit/score/src/smpmulticastaction.c       | 141 +++++++++++++++++
 testsuites/smptests/smpcache01/init.c       |  23 +--
 5 files changed, 225 insertions(+), 197 deletions(-)
 create mode 100644 cpukit/score/src/smpmulticastaction.c

diff --git a/c/src/lib/libcpu/shared/src/cache_manager.c b/c/src/lib/libcpu/shared/src/cache_manager.c
index 89ec88f..ab7d5b7 100644
--- a/c/src/lib/libcpu/shared/src/cache_manager.c
+++ b/c/src/lib/libcpu/shared/src/cache_manager.c
@@ -37,161 +37,43 @@
 
 #include <rtems.h>
 #include "cache_.h"
-#include <rtems/score/smpimpl.h>
-#include <rtems/score/smplock.h>
-#include <rtems/score/chainimpl.h>
-#include <rtems/score/sysstate.h>
 
-#if defined( RTEMS_SMP )
+#if defined(RTEMS_SMP)
 
-typedef void (*Cache_manager_Function_ptr)(const void *d_addr, size_t n_bytes);
+#include <rtems/score/smpimpl.h>
 
 typedef struct {
-  Chain_Node Node;
-  Cache_manager_Function_ptr func;
   const void *addr;
   size_t size;
-  cpu_set_t *recipients;
-  size_t setsize;
-  Atomic_Ulong done;
-} Cache_manager_SMP_node;
-
-typedef struct {
-  SMP_lock_Control Lock;
-  Chain_Control List;
-} Cache_manager_SMP_control;
+} smp_cache_area;
 
-static Cache_manager_SMP_control _Cache_manager_SMP_control = {
-  .Lock = SMP_LOCK_INITIALIZER("cachemgr"),
-  .List = CHAIN_INITIALIZER_EMPTY(_Cache_manager_SMP_control.List)
-};
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
 
-void
-_SMP_Cache_manager_message_handler(void)
+static void smp_cache_data_flush(void *arg)
 {
-  SMP_lock_Context lock_context;
-  Cache_manager_SMP_node *node;
-  Cache_manager_SMP_node *next;
-  uint32_t cpu_self_idx;
-
-  _SMP_lock_ISR_disable_and_acquire( &_Cache_manager_SMP_control.Lock,
-      &lock_context );
-  cpu_self_idx = _SMP_Get_current_processor();
-
-  node = (Cache_manager_SMP_node*)_Chain_First(
-      &_Cache_manager_SMP_control.List );
-  while ( !_Chain_Is_tail( &_Cache_manager_SMP_control.List, &node->Node ) ) {
-    next = (Cache_manager_SMP_node*)_Chain_Next( &node->Node );
-    if ( CPU_ISSET_S ( cpu_self_idx, node->setsize, node->recipients ) ) {
-      CPU_CLR_S ( cpu_self_idx, node->setsize, node->recipients );
-
-      node->func( node->addr, node->size );
-
-      if ( CPU_COUNT_S( node->setsize, node->recipients ) == 0 ) {
-        _Chain_Extract_unprotected( &node->Node );
-        _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
-      }
-    }
-    node = next;
-  }
+  smp_cache_area *area = arg;
 
-  _SMP_lock_Release_and_ISR_enable( &_Cache_manager_SMP_control.Lock,
-      &lock_context );
+  rtems_cache_flush_multiple_data_lines(area->addr, area->size);
 }
 
-#if defined(CPU_DATA_CACHE_ALIGNMENT) || \
-    (defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) && \
-    defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING))
-
-static void
-_Cache_manager_Process_cache_messages( void )
+static void smp_cache_data_inv(void *arg)
 {
-  unsigned long message;
-  Per_CPU_Control *cpu_self;
-  ISR_Level isr_level;
-
-  _ISR_Disable_without_giant( isr_level );
-
-  cpu_self = _Per_CPU_Get();
-
-  message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
+  smp_cache_area *area = arg;
 
-  if ( message & SMP_MESSAGE_CACHE_MANAGER ) {
-    if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message,
-        message & ~SMP_MESSAGE_CACHE_MANAGER, ATOMIC_ORDER_RELAXED,
-        ATOMIC_ORDER_RELAXED ) ) {
-      _SMP_Cache_manager_message_handler();
-    }
-  }
-
-  _ISR_Enable_without_giant( isr_level );
+  rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
 }
 
-/*
- * We can not make this function static as we need to access it
- * from the test program.
- */
-void
-_Cache_manager_Send_smp_msg(
-    const size_t setsize,
-    const cpu_set_t *set,
-    Cache_manager_Function_ptr func,
-    const void * addr,
-    size_t size
-  );
-
-void
-_Cache_manager_Send_smp_msg(
-    const size_t setsize,
-    const cpu_set_t *set,
-    Cache_manager_Function_ptr func,
-    const void * addr,
-    size_t size
-  )
+static void smp_cache_data_flush_all(void *arg)
 {
-  uint32_t i;
-  Cache_manager_SMP_node node;
-  size_t set_size = CPU_ALLOC_SIZE( _SMP_Get_processor_count() );
-  char cpu_set_copy[set_size];
-  SMP_lock_Context lock_context;
-
-  if ( ! _System_state_Is_up( _System_state_Get() ) ) {
-    func( addr, size );
-    return;
-  }
-
-  memset( cpu_set_copy, 0, set_size );
-  if( set == NULL ) {
-    for( i=0; i<_SMP_Get_processor_count(); ++i )
-      CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
-  } else {
-    for( i=0; i<_SMP_Get_processor_count(); ++i )
-      if( CPU_ISSET_S( i, set_size, set ) )
-        CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
-  }
-
-  node.func = func;
-  node.addr = addr;
-  node.size = size;
-  node.setsize = set_size;
-  node.recipients = (cpu_set_t *)cpu_set_copy;
-  _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );
-
-
-  _SMP_lock_ISR_disable_and_acquire( &_Cache_manager_SMP_control.Lock,
-      &lock_context );
-  _Chain_Prepend_unprotected( &_Cache_manager_SMP_control.List, &node.Node );
-  _SMP_lock_Release_and_ISR_enable( &_Cache_manager_SMP_control.Lock,
-      &lock_context );
-
-  _SMP_Send_message_multicast( set_size, node.recipients,
-      SMP_MESSAGE_CACHE_MANAGER );
-
-  _Cache_manager_Process_cache_messages();
+  rtems_cache_flush_entire_data();
+}
 
-  while ( !_Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) );
+static void smp_cache_data_inv_all(void *arg)
+{
+  rtems_cache_invalidate_entire_data();
 }
-#endif
+
+#endif /* defined(CPU_DATA_CACHE_ALIGNMENT) */
 
 void
 rtems_cache_flush_multiple_data_lines_processor_set(
@@ -202,8 +84,9 @@ rtems_cache_flush_multiple_data_lines_processor_set(
 )
 {
 #if defined(CPU_DATA_CACHE_ALIGNMENT)
-  _Cache_manager_Send_smp_msg( setsize, set,
-      rtems_cache_flush_multiple_data_lines, addr, size );
+  smp_cache_area area = { addr, size };
+
+  _SMP_Multicast_action( setsize, set, smp_cache_data_flush, &area );
 #endif
 }
 
@@ -216,8 +99,9 @@ rtems_cache_invalidate_multiple_data_lines_processor_set(
 )
 {
 #if defined(CPU_DATA_CACHE_ALIGNMENT)
-  _Cache_manager_Send_smp_msg( setsize, set,
-      rtems_cache_invalidate_multiple_data_lines, addr, size );
+  smp_cache_area area = { addr, size };
+
+  _SMP_Multicast_action( setsize, set, smp_cache_data_inv, &area );
 #endif
 }
 
@@ -228,8 +112,7 @@ rtems_cache_flush_entire_data_processor_set(
 )
 {
 #if defined(CPU_DATA_CACHE_ALIGNMENT)
-  _Cache_manager_Send_smp_msg( setsize, set,
-      (Cache_manager_Function_ptr)rtems_cache_flush_entire_data, 0, 0 );
+  _SMP_Multicast_action( setsize, set, smp_cache_data_flush_all, NULL );
 #endif
 }
 
@@ -240,11 +123,11 @@ rtems_cache_invalidate_entire_data_processor_set(
 )
 {
 #if defined(CPU_DATA_CACHE_ALIGNMENT)
-  _Cache_manager_Send_smp_msg( setsize, set,
-      (Cache_manager_Function_ptr)rtems_cache_invalidate_entire_data, 0, 0 );
+  _SMP_Multicast_action( setsize, set, smp_cache_data_inv_all, NULL );
 #endif
 }
-#endif
+
+#endif /* defined(RTEMS_SMP) */
 
 /*
  * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
@@ -427,6 +310,23 @@ rtems_cache_disable_data( void )
  * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
  */
 
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+  && defined(RTEMS_SMP) \
+  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
+
+static void smp_cache_inst_inv(void *arg)
+{
+  smp_cache_area *area = arg;
+
+  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
+}
+
+static void smp_cache_inst_inv_all(void *arg)
+{
+  _CPU_cache_invalidate_entire_instruction();
+}
+
+#endif
 
 
 /*
@@ -435,10 +335,10 @@ rtems_cache_disable_data( void )
  * and then perform the invalidations.
  */
 
-#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
-#if !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
+  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
 static void
-_invalidate_multiple_instruction_lines_no_range_functions(
+_CPU_cache_invalidate_instruction_range(
   const void * i_addr,
   size_t n_bytes
 )
@@ -463,7 +363,6 @@ _invalidate_multiple_instruction_lines_no_range_functions(
   }
 }
 #endif
-#endif
 
 void
 rtems_cache_invalidate_multiple_instruction_lines(
@@ -472,25 +371,12 @@ rtems_cache_invalidate_multiple_instruction_lines(
 )
 {
 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
-#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
-
 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
-  _Cache_manager_Send_smp_msg( 0, 0, _CPU_cache_invalidate_instruction_range,
-      i_addr, n_bytes );
-#else
-  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
-#endif
-
-#else
+  smp_cache_area area = { i_addr, n_bytes };
 
-#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
-  _Cache_manager_Send_smp_msg( 0, 0,
-      _invalidate_multiple_instruction_lines_no_range_functions, i_addr,
-      n_bytes );
+  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
 #else
-  _invalidate_multiple_instruction_lines_no_range_functions( i_addr, n_bytes );
-#endif
-
+  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
 #endif
 #endif
 }
@@ -504,14 +390,8 @@ void
 rtems_cache_invalidate_entire_instruction( void )
 {
 #if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
- /*
-  * Call the CPU-specific routine
-  */
-
 #if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
-  _Cache_manager_Send_smp_msg( 0, 0,
-      (Cache_manager_Function_ptr)_CPU_cache_invalidate_entire_instruction,
-      0, 0 );
+  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
 #else
  _CPU_cache_invalidate_entire_instruction();
 #endif
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index e84d4e5..487ccc0 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -139,6 +139,7 @@ libscore_a_SOURCES += src/schedulerprioritysmp.c
 libscore_a_SOURCES += src/schedulersimplesmp.c
 libscore_a_SOURCES += src/schedulersmpdebug.c
 libscore_a_SOURCES += src/smp.c
+libscore_a_SOURCES += src/smpmulticastaction.c
 libscore_a_SOURCES += src/cpuset.c
 libscore_a_SOURCES += src/cpusetprintsupport.c
 libscore_a_SOURCES += src/schedulerdefaultaskforhelp.c
diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h
index 98e109c..97c78b0 100644
--- a/cpukit/score/include/rtems/score/smpimpl.h
+++ b/cpukit/score/include/rtems/score/smpimpl.h
@@ -52,11 +52,11 @@ extern "C" {
 #define SMP_MESSAGE_TEST 0x2UL
 
 /**
- * @brief SMP message to request a cache manager invocation.
+ * @brief SMP message to request a multicast action.
  *
  * @see _SMP_Send_message().
  */
-#define SMP_MESSAGE_CACHE_MANAGER 0x4UL
+#define SMP_MESSAGE_MULTICAST_ACTION 0x4UL
 
 /**
  * @brief SMP fatal codes.
@@ -135,10 +135,9 @@ static inline void _SMP_Set_test_message_handler(
 }
 
 /**
- * @brief Handles cache invalidation/flush requests from a remote processor.
- *
+ * @brief Processes all pending multicast actions.
  */
-void _SMP_Cache_manager_message_handler( void );
+void _SMP_Multicast_actions_process( void );
 
 /**
  * @brief Interrupt handler for inter-processor interrupts.
@@ -163,10 +162,9 @@ static inline void _SMP_Inter_processor_interrupt_handler( void )
       ( *_SMP_Test_message_handler )( cpu_self );
     }
 
-    if ( ( message & SMP_MESSAGE_CACHE_MANAGER ) != 0 ) {
-      _SMP_Cache_manager_message_handler();
+    if ( ( message & SMP_MESSAGE_MULTICAST_ACTION ) != 0 ) {
+      _SMP_Multicast_actions_process();
     }
-
   }
 }
 
@@ -220,6 +218,25 @@ void _SMP_Send_message_multicast(
   unsigned long message
 );
 
+typedef void ( *SMP_Multicast_action_handler )( void *arg );
+
+/**
+ *  @brief Initiates a SMP multicast action to a set of processors.
+ *
+ *  The current processor may be part of the set.
+ *
+ *  @param[in] setsize The size of the set of target processors of the message.
+ *  @param[in] cpus The set of target processors of the message.
+ *  @param[in] handler The multicast action handler.
+ *  @param[in] arg The multicast action argument.
+ */
+void _SMP_Multicast_action(
+  const size_t setsize,
+  const cpu_set_t *cpus,
+  SMP_Multicast_action_handler handler,
+  void *arg
+);
+
 #endif /* defined( RTEMS_SMP ) */
 
 /**
diff --git a/cpukit/score/src/smpmulticastaction.c b/cpukit/score/src/smpmulticastaction.c
new file mode 100644
index 0000000..2e59262
--- /dev/null
+++ b/cpukit/score/src/smpmulticastaction.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014 Aeroflex Gaisler AB.  All rights reserved.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+  #include "config.h"
+#endif
+
+#include <rtems/score/smpimpl.h>
+#include <rtems/score/smplock.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/sysstate.h>
+
+typedef struct {
+  Chain_Node Node;
+  SMP_Multicast_action_handler handler;
+  void *arg;
+  cpu_set_t *recipients;
+  size_t setsize;
+  Atomic_Ulong done;
+} SMP_Multicast_action;
+
+typedef struct {
+  SMP_lock_Control Lock;
+  Chain_Control List;
+} SMP_Multicast_action_context;
+
+static SMP_Multicast_action_context _SMP_Multicast_action_context = {
+  .Lock = SMP_LOCK_INITIALIZER("SMP Multicast Action"),
+  .List = CHAIN_INITIALIZER_EMPTY(_SMP_Multicast_action_context.List)
+};
+
+void
+_SMP_Multicast_actions_process(void)
+{
+  SMP_lock_Context lock_context;
+  SMP_Multicast_action *node;
+  SMP_Multicast_action *next;
+  uint32_t cpu_self_idx;
+
+  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast_action_context.Lock,
+      &lock_context );
+  cpu_self_idx = _SMP_Get_current_processor();
+
+  node = (SMP_Multicast_action*)_Chain_First(
+      &_SMP_Multicast_action_context.List );
+  while ( !_Chain_Is_tail( &_SMP_Multicast_action_context.List, &node->Node ) ) {
+    next = (SMP_Multicast_action*)_Chain_Next( &node->Node );
+    if ( CPU_ISSET_S ( cpu_self_idx, node->setsize, node->recipients ) ) {
+      CPU_CLR_S ( cpu_self_idx, node->setsize, node->recipients );
+
+      node->handler( node->arg );
+
+      if ( CPU_COUNT_S( node->setsize, node->recipients ) == 0 ) {
+        _Chain_Extract_unprotected( &node->Node );
+        _Atomic_Store_ulong( &node->done, 1, ATOMIC_ORDER_RELEASE );
+      }
+    }
+    node = next;
+  }
+
+  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast_action_context.Lock,
+      &lock_context );
+}
+
+static void
+_SMP_Multicast_actions_try_process( void )
+{
+  unsigned long message;
+  Per_CPU_Control *cpu_self;
+  ISR_Level isr_level;
+
+  _ISR_Disable_without_giant( isr_level );
+
+  cpu_self = _Per_CPU_Get();
+
+  message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED );
+
+  if ( message & SMP_MESSAGE_MULTICAST_ACTION ) {
+    if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message,
+        message & ~SMP_MESSAGE_MULTICAST_ACTION, ATOMIC_ORDER_RELAXED,
+        ATOMIC_ORDER_RELAXED ) ) {
+      _SMP_Multicast_actions_process();
+    }
+  }
+
+  _ISR_Enable_without_giant( isr_level );
+}
+
+void _SMP_Multicast_action(
+  const size_t setsize,
+  const cpu_set_t *cpus,
+  SMP_Multicast_action_handler handler,
+  void *arg
+)
+{
+  uint32_t i;
+  SMP_Multicast_action node;
+  size_t set_size = CPU_ALLOC_SIZE( _SMP_Get_processor_count() );
+  char cpu_set_copy[set_size];
+  SMP_lock_Context lock_context;
+
+  if ( ! _System_state_Is_up( _System_state_Get() ) ) {
+    handler( arg );
+    return;
+  }
+
+  memset( cpu_set_copy, 0, set_size );
+  if( cpus == NULL ) {
+    for( i=0; i<_SMP_Get_processor_count(); ++i )
+      CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
+  } else {
+    for( i=0; i<_SMP_Get_processor_count(); ++i )
+      if( CPU_ISSET_S( i, set_size, cpus ) )
+        CPU_SET_S( i, set_size, (cpu_set_t *)cpu_set_copy );
+  }
+
+  node.handler = handler;
+  node.arg = arg;
+  node.setsize = set_size;
+  node.recipients = (cpu_set_t *)cpu_set_copy;
+  _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );
+
+
+  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast_action_context.Lock,
+      &lock_context );
+  _Chain_Prepend_unprotected( &_SMP_Multicast_action_context.List, &node.Node );
+  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast_action_context.Lock,
+      &lock_context );
+
+  _SMP_Send_message_multicast( set_size, node.recipients,
+      SMP_MESSAGE_MULTICAST_ACTION );
+
+  _SMP_Multicast_actions_try_process();
+
+  while ( !_Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) );
+}
diff --git a/testsuites/smptests/smpcache01/init.c b/testsuites/smptests/smpcache01/init.c
index 01d6e1e..7ad2ef9 100644
--- a/testsuites/smptests/smpcache01/init.c
+++ b/testsuites/smptests/smpcache01/init.c
@@ -12,6 +12,7 @@
 
 #include <rtems/score/atomic.h>
 #include <rtems/score/smpbarrier.h>
+#include <rtems/score/smpimpl.h>
 #include <rtems.h>
 #include <limits.h>
 #include <string.h>
@@ -26,17 +27,6 @@ CPU_STRUCTURE_ALIGNMENT static int data_to_flush[1024];
 
 #define WORKER_PRIORITY 100
 
-typedef void (*Cache_manager_Function_ptr)(const void *d_addr, size_t n_bytes);
-
-void
-_Cache_manager_Send_smp_msg(
-    const size_t setsize,
-    const cpu_set_t *set,
-    Cache_manager_Function_ptr func,
-    const void * addr,
-    size_t size
-  );
-
 typedef struct {
   SMP_barrier_Control barrier;
   uint32_t count[CPU_COUNT];
@@ -51,10 +41,9 @@ static void function_to_flush( void )
   /* Does nothing. Used to give a pointer to instruction address space. */
 }
 
-static void test_cache_message( const void *d_addr, size_t n_bytes )
+static void test_cache_message( void *arg )
 {
-  rtems_test_assert(n_bytes == 123);
-  rtems_test_assert(d_addr == 0);
+  rtems_test_assert(arg == &ctx);
 
   ctx.count[rtems_get_current_processor()]++;
 }
@@ -111,7 +100,7 @@ static void test_func_test( size_t set_size, cpu_set_t *cpu_set,
   ctx.count[rtems_get_current_processor()] = 0;
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
-  _Cache_manager_Send_smp_msg( set_size, cpu_set, test_cache_message, 0, 123 );
+  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );
 
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
@@ -129,7 +118,7 @@ static void test_func_isrdisabled_test( size_t set_size, cpu_set_t *cpu_set,
 
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
-  _Cache_manager_Send_smp_msg( set_size, cpu_set, test_cache_message, 0, 123 );
+  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );
 
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
@@ -149,7 +138,7 @@ static void test_func_giant_taken_test( size_t set_size, cpu_set_t *cpu_set,
 
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
-  _Cache_manager_Send_smp_msg( set_size, cpu_set, test_cache_message, 0, 123 );
+  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );
 
   _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );
 
-- 
1.8.4.5



More information about the devel mailing list