[PATCH 3/3] score: Add SMP support to the cache manager

Daniel Cederman cederman at gaisler.com
Thu Jul 3 09:37:28 UTC 2014


Adds functions that allows the user to specify which cores that should
perform the cache operation. SMP messages are sent to all the specified
cores and the caller waits until all cores have acknowledged that they
have flushed their cache. Implementation is shown using both function
pointers and function ids together with a switch statement. One needs
to be dropped before committing. Any preference?
---
 c/src/lib/libcpu/shared/src/cache_manager.c |  166 +++++++++++++++++++++++++++
 cpukit/rtems/include/rtems/rtems/cache.h    |   84 ++++++++++++++
 cpukit/score/include/rtems/score/smpimpl.h  |   13 +++
 3 files changed, 263 insertions(+)

diff --git a/c/src/lib/libcpu/shared/src/cache_manager.c b/c/src/lib/libcpu/shared/src/cache_manager.c
index 420a013..91e9f72 100644
--- a/c/src/lib/libcpu/shared/src/cache_manager.c
+++ b/c/src/lib/libcpu/shared/src/cache_manager.c
@@ -37,6 +37,172 @@
 
 #include <rtems.h>
 #include "cache_.h"
+#include <rtems/score/smplock.h>
+#include <rtems/score/smpimpl.h>
+
+#if defined( RTEMS_SMP )
+
+typedef void (*cache_manager_func_t)(const void * d_addr, size_t n_bytes);
+
+typedef enum {
+  FLUSH_MULTIPLE_DATA_LINES,
+  INVALIDATE_MULTIPLE_DATA_LINES,
+  INVALIDATE_MULTIPLE_INSTRUCTION_LINES,
+  FLUSH_ENTIRE_DATA,
+  INVALIDATE_ENTIRE_INSTRUCTION,
+  INVALIDATE_ENTIRE_DATA
+} cache_manager_func_id_t;
+
+typedef struct {
+  SMP_lock_Control lock;
+  cache_manager_func_t func;
+  cache_manager_func_id_t func_id;
+  Atomic_Uint count;
+  const void *addr;
+  size_t size;
+} Cache_Manager_SMP;
+
+static Cache_Manager_SMP _CM_SMP = {
+  .lock = SMP_LOCK_INITIALIZER("CacheMgr"),
+  .count = CPU_ATOMIC_INITIALIZER_UINT(0)
+};
+
+void
+_SMP_Cache_manager_ipi_handler(void)
+{
+#ifdef USE_FUNCPTR
+  _CM_SMP.func( _CM_SMP.addr, _CM_SMP.size );
+#else
+  switch( _CM_SMP.func_id ) {
+    case FLUSH_MULTIPLE_DATA_LINES:
+      rtems_cache_flush_multiple_data_lines(
+          _CM_SMP.addr, _CM_SMP.size );
+      break;
+    case INVALIDATE_MULTIPLE_DATA_LINES:
+      rtems_cache_invalidate_multiple_data_lines(
+          _CM_SMP.addr, _CM_SMP.size );
+      break;
+    case INVALIDATE_MULTIPLE_INSTRUCTION_LINES:
+      rtems_cache_invalidate_multiple_instruction_lines(
+          _CM_SMP.addr, _CM_SMP.size );
+      break;
+    case FLUSH_ENTIRE_DATA:
+      rtems_cache_flush_entire_data();
+      break;
+    case INVALIDATE_ENTIRE_INSTRUCTION:
+      rtems_cache_invalidate_entire_instruction();
+      break;
+    case INVALIDATE_ENTIRE_DATA:
+      rtems_cache_invalidate_entire_data();
+      break;
+    default:
+      _Assert( 0 );
+      break;
+  }
+#endif
+
+  _Atomic_Fetch_sub_uint( &_CM_SMP.count, 1, ATOMIC_ORDER_RELEASE );
+}
+
+static void
+_cache_manager_send_ipi_msg( const cpu_set_t *set, cache_manager_func_t func,
+    cache_manager_func_id_t func_id, const void * addr, size_t size )
+{
+  uint32_t i;
+  uint32_t set_size = 0;
+  SMP_lock_Context lock_context;
+
+  _Assert( _System_state_Is_up( _System_state_Get() ) );
+
+  for( i=0; i < _SMP_Get_processor_count(); ++i ) {
+    set_size += CPU_ISSET(i, set);
+  }
+
+  _SMP_lock_Acquire( &_CM_SMP.lock, &lock_context );
+
+  _CM_SMP.func = func;
+  _CM_SMP.func_id = func_id;
+  _CM_SMP.addr = addr;
+  _CM_SMP.size = size;
+  _Atomic_Store_uint( &_CM_SMP.count, set_size, ATOMIC_ORDER_RELEASE );
+  _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
+  _SMP_Send_message_cpu_set( set, SMP_MESSAGE_CACHE_MANAGER );
+
+  while(_Atomic_Load_uint( &_CM_SMP.count, ATOMIC_ORDER_ACQUIRE ) != 0 );
+
+  _SMP_lock_Release( &_CM_SMP.lock, &lock_context );
+}
+
+void
+rtems_cache_invalidate_entire_instruction_cpu_set ( const cpu_set_t *set )
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set,
+      (cache_manager_func_t)rtems_cache_invalidate_entire_instruction,
+      INVALIDATE_ENTIRE_INSTRUCTION, 0, 0 );
+#endif
+}
+
+void
+rtems_cache_flush_multiple_data_lines_cpu_set( const void *addr,
+    size_t size,
+    const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set, rtems_cache_flush_multiple_data_lines,
+      FLUSH_MULTIPLE_DATA_LINES, addr, size );
+#endif
+}
+
+void
+rtems_cache_invalidate_multiple_data_lines_cpu_set(
+  const void *addr,
+  size_t size,
+  const cpu_set_t *set
+)
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set, rtems_cache_invalidate_multiple_data_lines,
+      INVALIDATE_MULTIPLE_DATA_LINES, addr, size );
+#endif
+}
+
+void
+rtems_cache_invalidate_multiple_instruction_lines_cpu_set(
+  const void *addr,
+  size_t size,
+  const cpu_set_t *set
+)
+{
+#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set,
+      rtems_cache_invalidate_multiple_instruction_lines,
+      INVALIDATE_MULTIPLE_INSTRUCTION_LINES, addr, size );
+#endif
+}
+
+void
+rtems_cache_flush_entire_data_cpu_set( const cpu_set_t *set )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set,
+      (cache_manager_func_t)rtems_cache_flush_entire_data,
+      FLUSH_ENTIRE_DATA, 0, 0 );
+#endif
+}
+
+void
+rtems_cache_invalidate_entire_data_cpu_set( const cpu_set_t *set )
+{
+#if defined(CPU_DATA_CACHE_ALIGNMENT)
+  _cache_manager_send_ipi_msg( set,
+      (cache_manager_func_t)rtems_cache_invalidate_entire_data,
+      INVALIDATE_ENTIRE_DATA, 0, 0 );
+#endif
+}
+#endif
 
 /*
  * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
diff --git a/cpukit/rtems/include/rtems/rtems/cache.h b/cpukit/rtems/include/rtems/rtems/cache.h
index 05f6612..824a8c0 100644
--- a/cpukit/rtems/include/rtems/rtems/cache.h
+++ b/cpukit/rtems/include/rtems/rtems/cache.h
@@ -188,6 +188,90 @@ void rtems_cache_disable_instruction( void );
  */
 void *rtems_cache_aligned_malloc ( size_t nbytes );
 
+#if defined( RTEMS_SMP )
+
+/**
+ * @brief Handles cache invalidation/flush requests from a remote CPU
+ *
+ */
+void _SMP_Cache_manager_ipi_handler(void);
+
+/**
+ * @brief Invalidates the entire instruction cache for a set of CPUs
+ *
+ * @see rtems_cache_invalidate_multiple_instruction_lines().
+ */
+void rtems_cache_invalidate_entire_instruction_cpu_set( const cpu_set_t *set);
+
+/**
+ * @brief Flushes multiple data cache lines for a set of CPUs
+ *
+ * Dirty cache lines covering the area are transfered to memory.  Depending on
+ * the cache implementation this may mark the lines as invalid.
+ *
+ * @param[in] addr The start address of the area to flush.
+ * @param[in] size The size in bytes of the area to flush.
+ */
+void rtems_cache_flush_multiple_data_lines_cpu_set( const void *addr,
+    size_t size,
+    const cpu_set_t *set
+);
+
+/**
+ * @brief Invalidates multiple data cache lines for a set of CPUs
+ *
+ * The cache lines covering the area are marked as invalid.  A later read
+ * access in the area will load the data from memory.
+ *
+ * In case the area is not aligned on cache line boundaries, then this
+ * operation may destroy unrelated data.
+ *
+ * @param[in] addr The start address of the area to invalidate.
+ * @param[in] size The size in bytes of the area to invalidate.
+ */
+void rtems_cache_invalidate_multiple_data_lines_cpu_set(
+  const void *addr,
+  size_t size,
+  const cpu_set_t *set
+);
+
+/**
+ * @brief Invalidates multiple instruction cache lines for a set of CPUs
+ *
+ * The cache lines covering the area are marked as invalid.  A later
+ * instruction fetch from the area will result in a load from memory.
+ *
+ * @param[in] addr The start address of the area to invalidate.
+ * @param[in] size The size in bytes of the area to invalidate.
+ */
+void rtems_cache_invalidate_multiple_instruction_lines_cpu_set(
+  const void *addr,
+  size_t size,
+  const cpu_set_t *set
+);
+
+/**
+ * @brief Flushes the entire data cache for a set of CPUs
+ *
+ * @see rtems_cache_flush_multiple_data_lines().
+ */
+void rtems_cache_flush_entire_data_cpu_set( const cpu_set_t *set );
+
+/**
+ * @brief Invalidates the entire instruction cache for a set of CPUs
+ *
+ * @see rtems_cache_invalidate_multiple_instruction_lines().
+ */
+void rtems_cache_invalidate_entire_instruction_cpu_set( const cpu_set_t *set );
+
+/**
+ * This function is responsible for performing a data cache
+ * invalidate. It invalidates the entire cache for a set of CPUs.
+ */
+void rtems_cache_invalidate_entire_data_cpu_set( const cpu_set_t *set );
+
+#endif
+
 /**@}*/
 
 #ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h
index 51dd34e..5db89ac 100644
--- a/cpukit/score/include/rtems/score/smpimpl.h
+++ b/cpukit/score/include/rtems/score/smpimpl.h
@@ -21,6 +21,7 @@
 #include <rtems/score/smp.h>
 #include <rtems/score/percpu.h>
 #include <rtems/fatal.h>
+#include <rtems/rtems/cache.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -51,6 +52,13 @@ extern "C" {
 #define SMP_MESSAGE_TEST 0x2UL
 
 /**
+ * @brief SMP message to request a cache manager invocation.
+ *
+ * @see _SMP_Send_message().
+ */
+#define SMP_MESSAGE_CACHE_MANAGER 0x4UL
+
+/**
  * @brief SMP fatal codes.
  */
 typedef enum {
@@ -148,6 +156,11 @@ static inline void _SMP_Inter_processor_interrupt_handler( void )
     if ( ( message & SMP_MESSAGE_TEST ) != 0 ) {
       ( *_SMP_Test_message_handler )( cpu_self );
     }
+
+    if ( ( message & SMP_MESSAGE_CACHE_MANAGER ) != 0 ) {
+      _SMP_Cache_manager_ipi_handler();
+    }
+
   }
 }
 
-- 
1.7.9.5




More information about the devel mailing list