[PATCH 3/5] bsp/arm: Provide L2 cache locking

Ralf Kirchner ralf.kirchner at embedded-brains.de
Thu Apr 10 16:30:21 UTC 2014


---
 c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h |  289 +++++++++++++---------
 1 Datei geändert, 174 Zeilen hinzugefügt(+), 115 Zeilen entfernt(-)

diff --git a/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h b/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
index 8af65b3..8e7404d 100644
--- a/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
+++ b/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
@@ -59,6 +59,7 @@
 #include <assert.h>
 #include <bsp.h>
 #include <libcpu/arm-cp15.h>
+#include <rtems/rtems/intr.h>
 #include <bsp/arm-release-id.h>
 #include <bsp/arm-errata.h>
 #include "../include/arm-cache-l1.h"
@@ -80,6 +81,11 @@ extern "C" {
 #define CACHE_l2C_310_NUM_WAYS 8
 #define CACHE_l2C_310_WAY_MASK ( ( 1 << CACHE_l2C_310_NUM_WAYS ) - 1 )
 
+#define CACHE_MIN( a, b ) \
+  ((a < b) ? (a) : (b))
+
+#define CACHE_MAX_LOCKING_BYTES (4 * 1024)
+
 
 /* RTL release number as can be read from cache_id register */
 typedef enum {
@@ -451,6 +457,8 @@ typedef struct {
   uint32_t power_ctrl;
 } L2CC;
 
+rtems_interrupt_lock         cache_lock = RTEMS_INTERRUPT_LOCK_INITIALIZER("cache");
+rtems_interrupt_lock_context cache_lock_context;
 
 static bool l2c_310_cache_errata_is_applicable_727913(
   void
@@ -972,20 +980,17 @@ cache_l2c_310_flush_1_line( const void *d_addr )
 }
 
 static inline void
-cache_l2c_310_flush_range( const void *addr, size_t n_bytes )
+cache_l2c_310_flush_range( uint32_t adx, const uint32_t ADDR_LAST )
 {
-  if ( n_bytes != 0 ) {
-    uint32_t       adx       = (uint32_t) addr
-                               & ~CACHE_L2C_310_DATA_LINE_MASK;
-    const uint32_t ADDR_LAST = (uint32_t) addr + n_bytes - 1;
-    volatile L2CC *l2cc      = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
 
-    CACHE_ARM_ERRATA_764369_HANDLER();
+  CACHE_ARM_ERRATA_764369_HANDLER();
 
-    for (; adx <= ADDR_LAST; adx += CPU_DATA_CACHE_ALIGNMENT ) {
-      l2cc->clean_pa = adx;
-    }
+  rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
+  for (; adx <= ADDR_LAST; adx += CPU_DATA_CACHE_ALIGNMENT ) {
+    l2cc->clean_pa = adx;
   }
+  rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
 }
 
 static inline void
@@ -999,11 +1004,12 @@ cache_l2c_310_flush_entire( void )
     /* ensure ordering with previous memory accesses */
     _ARM_Data_memory_barrier();
 
+    rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
     l2cc->clean_inv_way = CACHE_l2C_310_WAY_MASK;
 
     while ( l2cc->clean_inv_way & CACHE_l2C_310_WAY_MASK ) {};
 
-
+    rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
   }
 }
 
@@ -1017,22 +1023,18 @@ cache_l2c_310_invalidate_1_line( const void *d_addr )
 }
 
 static inline void
-cache_l2c_310_invalidate_range( const void *addr, size_t n_bytes )
+cache_l2c_310_invalidate_range( uint32_t adx, const uint32_t ADDR_LAST )
 {
-  if ( n_bytes != 0 ) {
-    uint32_t       adx       = (uint32_t) addr
-                         & ~CACHE_L2C_310_INSTRUCTION_LINE_MASK;
-    const uint32_t ADDR_LAST = addr + n_bytes - 1;
-    volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
-
-    /* Back starting address up to start of a line and invalidate until end */
-    for (;
-         adx <= ADDR_LAST;
-         adx += CPU_INSTRUCTION_CACHE_ALIGNMENT ) {
-      /* Invalidate L2 cache line */
-      l2cc->inv_pa = adx;
-    }
+  volatile L2CC *l2cc      = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+  rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
+  for (;
+       adx <= ADDR_LAST;
+       adx += CPU_INSTRUCTION_CACHE_ALIGNMENT ) {
+    /* Invalidate L2 cache line */
+    l2cc->inv_pa = adx;
   }
+  rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
 }
 
 static inline void
@@ -1045,10 +1047,12 @@ cache_l2c_310_invalidate_entire( void )
   /* ensure ordering with previous memory accesses */
   _ARM_Data_memory_barrier();
 
+  rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
   l2cc->inv_way = CACHE_l2C_310_WAY_MASK;
 
   while ( l2cc->inv_way & CACHE_l2C_310_WAY_MASK ) ;
 
+  rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
 }
 
 static inline void
@@ -1063,10 +1067,12 @@ cache_l2c_310_clean_and_invalidate_entire( void )
     /* ensure ordering with previous memory accesses */
     _ARM_Data_memory_barrier();
 
+    rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
     l2cc->clean_inv_way = CACHE_l2C_310_WAY_MASK;
 
     while ( l2cc->clean_inv_way & CACHE_l2C_310_WAY_MASK ) ;
 
+    rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
   }
 }
 
@@ -1076,8 +1082,10 @@ cache_l2c_310_store( const void *d_addr )
   volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
 
 
+  rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
   l2cc->clean_pa = (uint32_t) d_addr;
 
+  rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
 }
 
 static inline void
@@ -1120,98 +1128,99 @@ static void cache_l2c_310_unlock( void )
 static inline void
 cache_l2c_310_enable( void )
 {
-  volatile L2CC *l2cc     = (volatile L2CC *) BSP_ARM_L2CC_BASE;
-  uint32_t       cache_id = l2cc->cache_id & CACHE_L2C_310_L2CC_ID_PART_MASK;
-  int            ways     = 0;
-
-
-  /* Do we actually have an L2C-310 cache controller?
-   * Has BSP_ARM_L2CC_BASE been configured correctly? */
-  switch ( cache_id ) {
-    case CACHE_L2C_310_L2CC_ID_PART_L310:
-    {
-      const cache_l2c_310_rtl_release RTL_RELEASE = 
-        l2cc->cache_id & CACHE_L2C_310_L2CC_ID_RTL_MASK;
-      /* If this assertion fails, you have a release of the
-       * L2C-310 cache for which the l2c_310_cache_errata_is_applicable_ ...
-       * methods are not yet implemented. This means you will get incorrect
-       * errata handling */
-      assert(    RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
-              || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
-      if ( l2cc->aux_ctrl & ( 1 << 16 ) ) {
-        ways = 16;
-      } else {
-        ways = 8;
+  rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
+  {
+    volatile L2CC *l2cc     = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+    uint32_t       cache_id = l2cc->cache_id & CACHE_L2C_310_L2CC_ID_PART_MASK;
+    int            ways     = 0;
+
+
+    /* Do we actually have an L2C-310 cache controller?
+    * Has BSP_ARM_L2CC_BASE been configured correctly? */
+    switch ( cache_id ) {
+      case CACHE_L2C_310_L2CC_ID_PART_L310:
+      {
+        const cache_l2c_310_rtl_release RTL_RELEASE =
+          l2cc->cache_id & CACHE_L2C_310_L2CC_ID_RTL_MASK;
+        /* If this assertion fails, you have a release of the
+        * L2C-310 cache for which the l2c_310_cache_errata_is_applicable_ ...
+        * methods are not yet implemented. This means you will get incorrect
+        * errata handling */
+        assert(    RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+                || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+        if ( l2cc->aux_ctrl & ( 1 << 16 ) ) {
+          ways = 16;
+        } else {
+          ways = 8;
+        }
+
+        assert( ways == CACHE_l2C_310_NUM_WAYS );
       }
-
-      assert( ways == CACHE_l2C_310_NUM_WAYS );
-    }
-    break;
-    case CACHE_L2C_310_L2CC_ID_PART_L210:
-
-      /* Invalid case */
-
-      /* Support for this type is not implemented in this driver.
-       * Either support needs to get added or a seperate driver needs to get
-       * implemented */
-      assert( cache_id != CACHE_L2C_310_L2CC_ID_PART_L210 );
       break;
-    default:
+      case CACHE_L2C_310_L2CC_ID_PART_L210:
 
-      /* Unknown case */
-      assert( cache_id == CACHE_L2C_310_L2CC_ID_PART_L310 );
-      break;
-  }
+        /* Invalid case */
 
-  if ( ways > 0 ) {
-    /* Only enable if L2CC is currently disabled */    
-    if ( ways != 0
-         && ( l2cc->ctrl & CACHE_L2C_310_L2CC_ENABLE_MASK ) == 0 ) {
-      rtems_interrupt_level level;
-      uint32_t              aux;
+        /* Support for this type is not implemented in this driver.
+        * Either support needs to get added or a seperate driver needs to get
+        * implemented */
+        assert( cache_id != CACHE_L2C_310_L2CC_ID_PART_L210 );
+        break;
+      default:
 
-      rtems_interrupt_disable( level );
+        /* Unknown case */
+        assert( cache_id == CACHE_L2C_310_L2CC_ID_PART_L310 );
+        break;
+    }
 
-      /* Set up the way size */
-      aux  = l2cc->aux_ctrl;
-      aux &= CACHE_L2C_310_L2CC_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
-      aux |= CACHE_L2C_310_L2CC_AUX_REG_DEFAULT_MASK;
+    if ( ways > 0 ) {
+      /* Only enable if L2CC is currently disabled */
+      if ( ways != 0
+          && ( l2cc->ctrl & CACHE_L2C_310_L2CC_ENABLE_MASK ) == 0 ) {
+        uint32_t              aux;
 
-      /* Make sure that I&D is not locked down when starting */
-      cache_l2c_310_unlock();
+        /* Set up the way size */
+        aux  = l2cc->aux_ctrl;
+        aux &= CACHE_L2C_310_L2CC_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
+        aux |= CACHE_L2C_310_L2CC_AUX_REG_DEFAULT_MASK;
 
-      /* Level 2 configuration and control registers must not get written while
-       * background operations are pending */
-      while ( l2cc->inv_way & CACHE_l2C_310_WAY_MASK ) ;
+        /* Make sure that I&D is not locked down when starting */
+        cache_l2c_310_unlock();
 
-      while ( l2cc->clean_way & CACHE_l2C_310_WAY_MASK ) ;
+        /* Level 2 configuration and control registers must not get written while
+        * background operations are pending */
+        while ( l2cc->inv_way & CACHE_l2C_310_WAY_MASK ) ;
 
-      while ( l2cc->clean_inv_way & CACHE_l2C_310_WAY_MASK ) ;
+        while ( l2cc->clean_way & CACHE_l2C_310_WAY_MASK ) ;
 
-      l2cc->aux_ctrl = aux;
+        while ( l2cc->clean_inv_way & CACHE_l2C_310_WAY_MASK ) ;
 
-      /* Set up the latencies */
-      l2cc->tag_ram_ctrl  = CACHE_L2C_310_L2CC_TAG_RAM_DEFAULT_LAT;
-      l2cc->data_ram_ctrl = CACHE_L2C_310_L2CC_DATA_RAM_DEFAULT_MASK;
+        l2cc->aux_ctrl = aux;
 
-      cache_l2c_310_invalidate_entire();
+        /* Set up the latencies */
+        l2cc->tag_ram_ctrl  = CACHE_L2C_310_L2CC_TAG_RAM_DEFAULT_LAT;
+        l2cc->data_ram_ctrl = CACHE_L2C_310_L2CC_DATA_RAM_DEFAULT_MASK;
 
-      /* Clear the pending interrupts */
-      l2cc->int_clr = l2cc->int_raw_status;
+        rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
+        cache_l2c_310_invalidate_entire();
+        rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
 
-      l2c_310_cache_check_errata();
+        /* Clear the pending interrupts */
+        l2cc->int_clr = l2cc->int_raw_status;
 
-      /* Enable the L2CC */
-      l2cc->ctrl |= CACHE_L2C_310_L2CC_ENABLE_MASK;
+        l2c_310_cache_check_errata();
 
-      rtems_interrupt_enable( level );
+        /* Enable the L2CC */
+        l2cc->ctrl |= CACHE_L2C_310_L2CC_ENABLE_MASK;
+      }
     }
   }
+  rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
 }
 
 static inline void 
@@ -1223,6 +1232,7 @@ cache_l2c_310_disable( void )
   if ( l2cc->ctrl & CACHE_L2C_310_L2CC_ENABLE_MASK ) {
     /* Clean and Invalidate L2 Cache */
     cache_l2c_310_flush_entire();
+    rtems_interrupt_lock_acquire( &cache_lock, &cache_lock_context );
 
     /* Level 2 configuration and control registers must not get written while
      * background operations are pending */
@@ -1234,6 +1244,7 @@ cache_l2c_310_disable( void )
 
     /* Disable the L2 cache */
     l2cc->ctrl &= ~CACHE_L2C_310_L2CC_ENABLE_MASK;
+    rtems_interrupt_lock_release( &cache_lock, &cache_lock_context );
   }
 }
 
@@ -1272,14 +1283,28 @@ _CPU_cache_flush_data_range(
 )
 {
   if ( n_bytes != 0 ) {
+    /* Back starting address up to start of a line and invalidate until ADDR_LAST */
+    uint32_t       adx       = (uint32_t) d_addr
+      & ~CACHE_L2C_310_DATA_LINE_MASK;
+    const uint32_t ADDR_LAST =
+      (uint32_t)( (size_t)d_addr + n_bytes - 1 );
+    uint32_t       block_end =
+      CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES );
     arm_cache_l1_flush_data_range( 
       d_addr,
       n_bytes
     );
-    cache_l2c_310_flush_range(
-      d_addr,
-      n_bytes
-    );
+    /* We have to apply a lock. Thus we will operate only CACHE_MAX_LOCKING_BYTES
+     * at a time */
+    for (;
+         adx      <= ADDR_LAST;
+         adx       = block_end + 1, 
+         block_end = CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES )) {
+      cache_l2c_310_flush_range(
+        adx,
+        block_end
+      );
+    }
   }
 }
 
@@ -1297,20 +1322,42 @@ _CPU_cache_invalidate_data_range(
 )
 {
   if ( n_bytes > 0 ) {
+    /* Back starting address up to start of a line and invalidate until ADDR_LAST */
+    uint32_t       adx       = (uint32_t) addr_first
+      & ~CACHE_L2C_310_DATA_LINE_MASK;
+    const uint32_t ADDR_LAST =
+      (uint32_t)( (size_t)addr_first + n_bytes - 1 );
+    uint32_t       block_end =
+      CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES );
     CACHE_ARM_ERRATA_764369_HANDLER();
     
-    cache_l2c_310_invalidate_range(
-      addr_first,
-      n_bytes
-    );
+    /* We have to apply a lock. Thus we will operate only CACHE_MAX_LOCKING_BYTES
+     * at a time */
+    for (;
+         adx      <= ADDR_LAST;
+         adx       = block_end + 1,
+         block_end = CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES )) {
+      cache_l2c_310_invalidate_range(
+        adx,
+        block_end
+      );
+    }
     arm_cache_l1_invalidate_data_range(
       addr_first,
       n_bytes
     );
-    cache_l2c_310_invalidate_range(
-      addr_first,
-      n_bytes
-    );
+
+    adx       = (uint32_t)addr_first & ~CACHE_L2C_310_DATA_LINE_MASK;
+    block_end = CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES );
+    for (;
+         adx      <= ADDR_LAST;
+         adx       = block_end + 1,
+         block_end = CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES )) {
+      cache_l2c_310_invalidate_range(
+        adx,
+        block_end
+      );
+    }
     arm_cache_l1_invalidate_data_range(
       addr_first,
       n_bytes
@@ -1362,13 +1409,25 @@ _CPU_cache_invalidate_instruction_range(
 )
 {
   if ( n_bytes != 0 ) {
-   CACHE_ARM_ERRATA_764369_HANDLER();
+    uint32_t       adx       = (uint32_t) i_addr
+      & ~CACHE_L2C_310_DATA_LINE_MASK;
+    const uint32_t ADDR_LAST =
+    (uint32_t)( (size_t)i_addr + n_bytes - 1 );
+    uint32_t       block_end =
+      CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES );
+
+    CACHE_ARM_ERRATA_764369_HANDLER();
     
     /* Invalidate L2 cache lines */
-    cache_l2c_310_invalidate_range(
-      i_addr,
-      n_bytes
-    );
+    for (;
+         adx      <= ADDR_LAST;
+         adx       = block_end + 1,
+         block_end = CACHE_MIN( ADDR_LAST, adx + CACHE_MAX_LOCKING_BYTES )) {
+      cache_l2c_310_invalidate_range(
+        adx,
+        block_end
+      );
+    }
     
     arm_cache_l1_invalidate_instruction_range(
       i_addr,
-- 
1.7.10.4




More information about the devel mailing list