[PATCH 11/17] bsp/arm: Add handling for level 2 L2C-310 cache controller
Ralf Kirchner
ralf.kirchner at embedded-brains.de
Wed Feb 26 10:51:58 UTC 2014
arm-l2c-310/cache_.h contains the handling for the L2C-310
level 2 cache controller from arm. It references the arm
level 1 cache handling in the new file arm-cache-l1.h.
---
c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h | 1480 ++++++++++++++++++++
c/src/lib/libbsp/arm/shared/include/arm-cache-l1.h | 528 +++++++
2 Dateien geändert, 2008 Zeilen hinzugefügt(+)
create mode 100644 c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
create mode 100644 c/src/lib/libbsp/arm/shared/include/arm-cache-l1.h
diff --git a/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h b/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
new file mode 100644
index 0000000..d5555da
--- /dev/null
+++ b/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h
@@ -0,0 +1,1480 @@
+/**
+ * @file cache_.h
+ *
+ * @ingroup arm_shared
+ *
+ * @brief Cache definitions and functions.
+ *
+ * This file implements handling for the ARM L2C-310 cache controller
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef LIBBSP_ARM_SHARED_L2C_310_CACHE_H
+#define LIBBSP_ARM_SHARED_L2C_310_CACHE_H
+
+#include <assert.h>
+#include <bsp.h>
+#include <libcpu/arm-cp15.h>
+#include <bsp/arm-release-id.h>
+#include <bsp/arm-errata.h>
+#include "../include/arm-cache-l1.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* These two defines also ensure that the rtems_cache_* functions have bodies */
+#define CPU_DATA_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_DATA_ALIGNMENT
+#define CPU_INSTRUCTION_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT
+#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+#define CACHE_L2C_310_L1_DATA_LINE_MASK ( CPU_DATA_CACHE_ALIGNMENT - 1 )
+#define CACHE_L2C_310_L1_INSTRUCTION_LINE_MASK \
+ ( CPU_INSTRUCTION_CACHE_ALIGNMENT \
+ - 1 )
+#define CACHE_l2C_310_L2_NUM_WAYS 8
+#define CACHE_l2C_310_L2_WAY_MASK ( ( 1 << CACHE_l2C_310_L2_NUM_WAYS ) - 1 )
+
+
+/* RTL release number as can be read from cache_id register */
+typedef enum {
+ CACHE_L2C_310_RTL_RELEASE_R0_P0 = 0x0,
+ CACHE_L2C_310_RTL_RELEASE_R1_P0 = 0x2,
+ CACHE_L2C_310_RTL_RELEASE_R2_P0 = 0x4,
+ CACHE_L2C_310_RTL_RELEASE_R3_P0 = 0x5,
+ CACHE_L2C_310_RTL_RELEASE_R3_P1 = 0x6,
+ CACHE_L2C_310_RTL_RELEASE_R3_P2 = 0x8,
+ CACHE_L2C_310_RTL_RELEASE_R3_P3 = 0x9
+} cache_l2c_310_rtl_release;
+
+/**
+ * @brief Cache Functions and Defitions
+ */
+
+/**
+ * @brief L2CC Register Offsets
+ */
+typedef struct {
+ uint32_t cache_id; /* Cache ID */
+#define L2X0_CACHE_ID_RTL_MASK 0x3f
+#define L2X0_CACHE_ID_PART_MASK ( 0xf << 6 )
+#define L2X0_CACHE_ID_PART_L210 ( 1 << 6 )
+#define L2X0_CACHE_ID_PART_L310 ( 3 << 6 )
+ uint32_t cache_type; /* Cache type */
+#define L2X0_CACHE_TYPE_DATA_BANKING_MASK 0x80000000 /* 1 if data banking implemented, 0 if not */
+#define L2X0_CACHE_TYPE_CTYPE_MASK 0x1E000000 /* 11xy, where: x=1 if pl310_LOCKDOWN_BY_MASTER is defined, otherwise 0 */
+#define L2X0_CACHE_TYPE_CTYPE_SHIFT 25 /* y=1 if pl310_LOCKDOWN_BY_LINE is defined, otherwise 0. */
+#define L2X0_CACHE_TYPE_HARVARD_MASK 0x01000000 /* 1 for Harvard architecture, 0 for unified architecture */
+#define L2X0_CACHE_TYPE_SIZE_D_WAYS_MASK 0x00700000 /* Data cache way size = 2 Exp(value + 2) KB */
+#define L2X0_CACHE_TYPE_SIZE_D_WAYS_SHIFT 20
+#define L2X0_CACHE_TYPE_NUM_D_WAYS_MASK 0x00040000 /* Assoziativity aka number of data ways = (value * 8) + 8 */
+#define L2X0_CACHE_TYPE_NUM_D_WAYS_SHIFT 18
+#define L2X0_CACHE_TYPE_LENGTH_D_LINE_MASK 0x00003000 /* Data cache line length 00 - 32 */
+#define L2X0_CACHE_TYPE_LENGTH_D_LINE_SHIFT 12
+#define L2X0_CACHE_TYPE_LENGTH_D_LINE_VAL_32 0x0
+#define L2X0_CACHE_TYPE_SIZE_I_WAYS_MASK 0x00000700 /* Instruction cache way size = 2 Exp(value + 2) KB */
+#define L2X0_CACHE_TYPE_SIZE_I_WAYS_SHIFT 8
+#define L2X0_CACHE_TYPE_NUM_I_WAYS_MASK 0x00000040 /* Assoziativity aka number of instruction ways = (value * 8) + 8 */
+#define L2X0_CACHE_TYPE_NUM_I_WAYS_SHIFT 6
+#define L2X0_CACHE_TYPE_LENGTH_I_LINE_MASK 0x00000003 /* Instruction cache line length 00 - 32 */
+#define L2X0_CACHE_TYPE_LENGTH_I_LINE_SHIFT 0
+#define L2X0_CACHE_TYPE_LENGTH_I_LINE_VAL_32 0x0
+
+ uint8_t reserved_8[0x100 - 8];
+ uint32_t ctrl; /* Control */
+/** @brief Enables the L2CC */
+#define L2CC_ENABLE_MASK 0x00000001
+
+ /** @brief Auxiliary control */
+ uint32_t aux_ctrl;
+
+/** @brief Early BRESP Enable */
+#define L2CC_AUX_EBRESPE_MASK 0x40000000
+
+/** @brief Instruction Prefetch Enable */
+#define L2CC_AUX_IPFE_MASK 0x20000000
+
+/** @brief Data Prefetch Enable */
+#define L2CC_AUX_DPFE_MASK 0x10000000
+
+/** @brief Non-secure interrupt access control */
+#define L2CC_AUX_NSIC_MASK 0x08000000
+
+/** @brief Non-secure lockdown enable */
+#define L2CC_AUX_NSLE_MASK 0x04000000
+
+/** @brief Cache replacement policy */
+#define L2CC_AUX_CRP_MASK 0x02000000
+
+/** @brief Force write allocate */
+#define L2CC_AUX_FWE_MASK 0x01800000
+
+/** @brief Shared attribute override enable */
+#define L2CC_AUX_SAOE_MASK 0x00400000
+
+/** @brief Parity enable */
+#define L2CC_AUX_PE_MASK 0x00200000
+
+/** @brief Event monitor bus enable */
+#define L2CC_AUX_EMBE_MASK 0x00100000
+
+/** @brief Way-size */
+#define L2CC_AUX_WAY_SIZE_MASK 0x000E0000
+#define L2CC_AUX_WAY_SIZE_SHIFT 17
+
+/** @brief Way-size */
+#define L2CC_AUX_ASSOC_MASK 0x00010000
+
+/** @brief Shared attribute invalidate enable */
+#define L2CC_AUX_SAIE_MASK 0x00002000
+
+/** @brief Exclusive cache configuration */
+#define L2CC_AUX_EXCL_CACHE_MASK 0x00001000
+
+/** @brief Store buffer device limitation Enable */
+#define L2CC_AUX_SBDLE_MASK 0x00000800
+
+/** @brief High Priority for SO and Dev Reads Enable */
+#define L2CC_AUX_HPSODRE_MASK 0x00000400
+
+/** @brief Full line of zero enable */
+#define L2CC_AUX_FLZE_MASK 0x00000001
+
+/** @brief Enable all prefetching, */
+#define L2CC_AUX_REG_DEFAULT_MASK \
+ ( L2CC_AUX_WAY_SIZE_MASK & ( 0x3 << L2CC_AUX_WAY_SIZE_SHIFT ) ) \
+ | L2CC_AUX_PE_MASK /* Prefetch enable */ \
+ | L2CC_AUX_SAOE_MASK /* Shared attribute override enable */ \
+ | L2CC_AUX_CRP_MASK /* Cache replacement policy */ \
+ | L2CC_AUX_DPFE_MASK /* Data prefetch enable */ \
+ | L2CC_AUX_IPFE_MASK /* Instruction prefetch enable */ \
+ | L2CC_AUX_EBRESPE_MASK /* Early BRESP enable */
+
+#define L2CC_AUX_REG_ZERO_MASK 0xFFF1FFFF
+
+#define L2CC_RAM_1_CYCLE_LAT_VAL 0x00000000 /* 1 cycle of latency, there is no additional latency fot tag RAM */
+#define L2CC_RAM_2_CYCLE_LAT_VAL 0x00000001 /* 2 cycles of latency for tag RAM */
+#define L2CC_RAM_3_CYCLE_LAT_VAL 0x00000002 /* 3 cycles of latency for tag RAM */
+#define L2CC_RAM_4_CYCLE_LAT_VAL 0x00000003 /* 4 cycles of latency for tag RAM */
+#define L2CC_RAM_5_CYCLE_LAT_VAL 0x00000004 /* 5 cycles of latency for tag RAM */
+#define L2CC_RAM_6_CYCLE_LAT_VAL 0x00000005 /* 6 cycles of latency for tag RAM */
+#define L2CC_RAM_7_CYCLE_LAT_VAL 0x00000006 /* 7 cycles of latency for tag RAM */
+#define L2CC_RAM_8_CYCLE_LAT_VAL 0x00000007 /* 8 cycles of latency for tag RAM */
+#define L2CC_RAM_SETUP_SHIFT 0x00000000 /* Shift left setup latency values by this value */
+#define L2CC_RAM_READ_SHIFT 0x00000004 /* Shift left read latency values by this value */
+#define L2CC_RAM_WRITE_SHIFT 0x00000008 /* Shift left write latency values by this value */
+#define L2CC_RAM_SETUP_LAT_MASK 0x00000007 /* Mask for RAM setup latency */
+#define L2CC_RAM_READ_LAT_MASK 0x00000070 /* Mask for RAM read latency */
+#define L2CC_RAM_WRITE_LAT_MASK 0x00000700 /* Mask for RAM read latency */
+ /** @brief Latency for tag RAM */
+ uint32_t tag_ram_ctrl;
+#define L2CC_TAG_RAM_DEFAULT_LAT \
+ ( ( L2CC_RAM_2_CYCLE_LAT_VAL << L2CC_RAM_SETUP_SHIFT ) \
+ | ( L2CC_RAM_2_CYCLE_LAT_VAL << L2CC_RAM_READ_SHIFT ) \
+ | ( L2CC_RAM_2_CYCLE_LAT_VAL << L2CC_RAM_WRITE_SHIFT ) ) /* Latency for tag RAM */
+ /** @brief Latency for data RAM */
+ uint32_t data_ram_ctrl;
+#define L2CC_DATA_RAM_DEFAULT_MASK \
+ ( ( L2CC_RAM_2_CYCLE_LAT_VAL << L2CC_RAM_SETUP_SHIFT ) \
+ | ( L2CC_RAM_3_CYCLE_LAT_VAL << L2CC_RAM_READ_SHIFT ) \
+ | ( L2CC_RAM_2_CYCLE_LAT_VAL << L2CC_RAM_WRITE_SHIFT ) ) /* Latency for data RAM */
+
+ uint8_t reserved_110[0x200 - 0x110];
+
+ /** @brief Event counter control */
+ uint32_t ev_ctrl;
+
+ /** @brief Event counter 1 configuration */
+ uint32_t ev_cnt1_cfg;
+
+ /** @brief Event counter 0 configuration */
+ uint32_t ev_cnt0_cfg;
+
+ /** @brief Event counter 1 value */
+ uint32_t ev_cnt1;
+
+ /** @brief Event counter 0 value */
+ uint32_t ev_cnt0;
+
+ /** @brief Interrupt enable mask */
+ uint32_t int_mask;
+
+ /** @brief Masked interrupt status (read-only)*/
+ uint32_t int_mask_status;
+
+ /** @brief Unmasked interrupt status */
+ uint32_t int_raw_status;
+
+ /** @brief Interrupt clear */
+ uint32_t int_clr;
+
+/**
+ * @name Interrupt bit masks
+ */
+
+/** @brief DECERR from L3 */
+#define L2CC_INT_DECERR_MASK 0x00000100
+
+/** @brief SLVERR from L3 */
+#define L2CC_INT_SLVERR_MASK 0x00000080
+
+/** @brief Error on L2 data RAM (Read) */
+#define L2CC_INT_ERRRD_MASK 0x00000040
+
+/** @brief Error on L2 tag RAM (Read) */
+#define L2CC_INT_ERRRT_MASK 0x00000020
+
+/** @brief Error on L2 data RAM (Write) */
+#define L2CC_INT_ERRWD_MASK 0x00000010
+
+/** @brief Error on L2 tag RAM (Write) */
+#define L2CC_INT_ERRWT_MASK 0x00000008
+
+/** @brief Parity Error on L2 data RAM (Read) */
+#define L2CC_INT_PARRD_MASK 0x00000004
+
+/** @brief Parity Error on L2 tag RAM (Read) */
+#define L2CC_INT_PARRT_MASK 0x00000002
+
+/** @brief Event Counter1/0 Overflow Increment */
+#define L2CC_INT_ECNTR_MASK 0x00000001
+
+ uint8_t reserved_224[0x730 - 0x224];
+
+ /** @brief Drain the STB */
+ uint32_t cache_sync;
+ uint8_t reserved_734[0x740 - 0x734];
+ uint32_t dummy_cache_sync_reg; /* ARM Errata 753970 for pl310-r3p0 */
+ uint8_t reserved_744[0x770 - 0x744];
+
+ /** @brief Invalidate line by PA */
+ uint32_t inv_pa;
+ uint8_t reserved_774[0x77c - 0x774];
+
+ /** @brief Invalidate by Way */
+ uint32_t inv_way;
+ uint8_t reserved_780[0x7b0 - 0x780];
+
+ /** @brief Clean Line by PA */
+ uint32_t clean_pa;
+ uint8_t reserved_7b4[0x7b8 - 0x7b4];
+
+ /** @brief Clean Line by Set/Way */
+ uint32_t clean_index;
+
+ /** @brief Clean by Way */
+ uint32_t clean_way;
+ uint8_t reserved_7c0[0x7f0 - 0x7c0];
+
+ /** @brief Clean and Invalidate Line by PA */
+ uint32_t clean_inv_pa;
+ uint8_t reserved_7f4[0x7f8 - 0x7f4];
+
+ /** @brief Clean and Invalidate Line by Set/Way */
+ uint32_t clean_inv_indx;
+
+ /** @brief Clean and Invalidate by Way */
+ uint32_t clean_inv_way;
+
+ /** @brief Data lock down 0 */
+ uint32_t d_lockdown_0;
+
+ /** @brief Instruction lock down 0 */
+ uint32_t i_lockdown_0;
+
+ /** @brief Data lock down 1 */
+ uint32_t d_lockdown_1;
+
+ /** @brief Instruction lock down 1 */
+ uint32_t i_lockdown_1;
+
+ /** @brief Data lock down 2 */
+ uint32_t d_lockdown_2;
+
+ /** @brief Instruction lock down 2 */
+ uint32_t i_lockdown_2;
+
+ /** @brief Data lock down 3 */
+ uint32_t d_lockdown_3;
+
+ /** @brief Instruction lock down 3 */
+ uint32_t i_lockdown_3;
+
+ /** @brief Data lock down 4 */
+ uint32_t d_lockdown_4;
+
+ /** @brief Instruction lock down 4 */
+ uint32_t i_lockdown_4;
+
+ /** @brief Data lock down 5 */
+ uint32_t d_lockdown_5;
+
+ /** @brief Instruction lock down 5 */
+ uint32_t i_lockdown_5;
+
+ /** @brief Data lock down 6 */
+ uint32_t d_lockdown_6;
+
+ /** @brief Instruction lock down 6 */
+ uint32_t i_lockdown_6;
+
+ /** @brief Data lock down 7 */
+ uint32_t d_lockdown_7;
+
+ /** @brief Instruction lock down 7 */
+ uint32_t i_lockdown_7;
+
+ uint8_t reserved_940[0x950 - 0x940];
+
+ /** @brief Lockdown by Line Enable */
+ uint32_t lock_line_en;
+
+ /** @brief Cache lockdown by way */
+ uint32_t unlock_way;
+
+ uint8_t reserved_958[0xc00 - 0x958];
+
+ /** @brief Address range redirect, part 1 */
+ uint32_t addr_filtering_start;
+
+ /** @brief Address range redirect, part 2 */
+ uint32_t addr_filtering_end;
+
+/** @brief Address filtering valid bits*/
+#define L2CC_ADDR_FILTER_VALID_MASK 0xFFF00000
+
+/** @brief Address filtering enable bit*/
+#define L2CC_ADDR_FILTER_ENABLE_MASK 0x00000001
+
+ uint8_t reserved_c08[0xf40 - 0xc08];
+
+ /** @brief Debug control */
+ uint32_t debug_ctrl;
+
+/** @brief Debug SPIDEN bit */
+#define L2CC_DEBUG_SPIDEN_MASK 0x00000004
+
+/** @brief Debug DWB bit, forces write through */
+#define L2CC_DEBUG_DWB_MASK 0x00000002
+
+/** @brief Debug DCL bit, disables cache line fill */
+#define L2CC_DEBUG_DCL_MASK 0x00000002
+
+ uint8_t reserved_f44[0xf60 - 0xf44];
+
+ /** @brief Purpose prefetch enables */
+ uint32_t prefetch_ctrl;
+#define L2CC_PREFETCH_OFFSET_MASK 0x0000001F /* Prefetch offset */
+ uint8_t reserved_f64[0xf80 - 0xf64];
+
+ /** @brief Purpose power controls */
+ uint32_t power_ctrl;
+} L2CC;
+
+/* Errata table for the LC2 310 Level 2 cache from ARM.
+* Information taken from ARMs
+* "CoreLink controllers and peripherals
+* - System controllers
+* - L2C-310 Level 2 Cache Controller
+* - Revision r3p3
+* - Software Developer Errata Notice
+* - ARM CoreLink Level 2 Cache Controller (L2C-310 or PL310), r3 releases Software Developers Errata Notice"
+* The corresponding link is: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0360f/BABJFIBA.html
+* Please see this document for more information on these erratas */
+static bool l2c_310_cache_errata_is_applicable_753970(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+
+
+static bool l2c_310_cache_errata_is_applicable_588369(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_727913(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_727914(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_727915(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_729806(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_729815(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_742884(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_752271(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_765569(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+static bool l2c_310_cache_errata_is_applicable_769419(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ is_applicable = false;
+ break;
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+
+#ifdef CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS
+static bool l2c_310_cache_errata_is_applicable_754670(
+ void
+)
+{
+ volatile L2CC *l2cc =
+ (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ bool is_applicable = false;
+
+ switch( RTL_RELEASE ) {
+ case CACHE_L2C_310_RTL_RELEASE_R3_P3:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P2:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P1:
+ case CACHE_L2C_310_RTL_RELEASE_R3_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R2_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R1_P0:
+ case CACHE_L2C_310_RTL_RELEASE_R0_P0:
+ is_applicable = true;
+ break;
+ default:
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ break;
+ }
+
+ return is_applicable;
+}
+#endif /* CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS */
+
+/* Errata Handlers */
+#if ( defined( RTEMS_SMP ) )
+ #define CACHE_ARM_ERRATA_764369_HANDLER() \
+ if( arm_errata_is_applicable_processor_errata_764369() ) { \
+ _ARM_Data_synchronization_barrier(); \
+ }
+#else /* #if ( defined( RTEMS_SMP ) ) */
+ #define CACHE_ARM_ERRATA_764369_HANDLER()
+#endif /* #if ( defined( RTEMS_SMP ) ) */
+
+/* The common workaround for this erratum would be to add a data synchronization barrier to the beginning of the abort handler.
+ * But for RTEMS a call of the abort handler means a fatal condition anyway. So there is no need to handle this erratum */
+#define CACHE_ARM_ERRATA_775420_HANDLER() \
+ if( arm_errata_is_applicable_processor_errata_775420 ) { \
+ } \
+
+static void l2c_310_cache_check_errata( void )
+{
+ /* This erratum gets handled within the sources */
+ /* Unhandled erratum present: 588369 Errata 588369 says that clean + inv may keep the cache line if it was clean, the recommanded workaround is to clean then invalidate the cache line, with write-back and cache linefill disabled. */
+ /* assert( ! l2c_310_cache_errata_is_applicable_588369() ); */
+
+ /* Unhandled erratum present: 727913 Prefetch dropping feature can cause incorrect behavior when PL310 handles reads that cross cache line boundary */
+ assert( ! l2c_310_cache_errata_is_applicable_727913() );
+
+ /* Unhandled erratum present: 727914 Double linefill feature can cause deadlock */
+ assert( ! l2c_310_cache_errata_is_applicable_727914() );
+
+ /* Unhandled erratum present: 727915 Background Clean and Invalidate by Way operation can cause data corruption */
+ assert( ! l2c_310_cache_errata_is_applicable_727915() );
+
+ /* Unhandled erratum present: 729806 Speculative reads from the Cortex-A9 MPCore processor can cause deadlock */
+ assert( ! l2c_310_cache_errata_is_applicable_729806() );
+
+ if( l2c_310_cache_errata_is_applicable_729815() )
+ {
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+ assert( 0 == ( l2cc->aux_ctrl & L2CC_AUX_HPSODRE_MASK ) );
+
+ /* Erratum: 729815 The “High Priority for SO and Dev reads” feature can cause Quality of Service issues to cacheable read transactions*/
+
+ /* Conditions
+ This problem occurs when the following conditions are met:
+ 1. Bit[10] “High Priority for SO and Dev reads enable” of the PL310 Auxiliary Control Register is set to 1.
+ 2. PL310 receives a cacheable read that misses in the L2 cache.
+ 3. PL310 receives a continuous flow of Strongly Ordered or Device reads that take all address slots in the master
+ interface.
+ Workaround
+ A workaround is only necessary in systems that are able to issue a continuous flow of Strongly Ordered or Device
+ reads. In such a case, the workaround is to disable the “High Priority for SO and Dev reads” feature. This is the
+ default behavior.*/
+ }
+
+ /* Unhandled erratum present: 742884 Double linefill feature might introduce circular dependency and deadlock */
+ assert( ! l2c_310_cache_errata_is_applicable_742884() );
+
+ /* Unhandled erratum present: 752271 Double linefill feature can cause data corruption */
+ assert( ! l2c_310_cache_errata_is_applicable_752271() );
+
+ /* This erratum gets handled with a workaround: 753970 The Cache Sync operation prevents further bufferable writes from merging in the store .
+ Search for 753970 in cache_.h for detailed information */
+
+ /* Conditions
+ This problem occurs when the following conditions are met:
+ 1. PL310 receives a Cache Sync operation.
+ Workaround
+ The proposed workaround to avoid this erratum is to replace the normal offset of Cache Sync operation (0x730) by
+ another offset targeting an unmapped PL310 register: 0x740.
+ More specifically, find below a pseudo code sequence illustrating the workaround:
+ Replace
+ // PL310 Cache Sync operation
+ LDR r1,=PL310_BASE
+ STR r2,[r1,#0x730]
+ by
+ // Workaround for PL310 Cache Sync operation
+ LDR r1,=PL310_BASE
+ STR r2,[r1,#0x740] ; write to an unmapped register
+ This write has the same effect as the Cache Sync operation: store buffer drained and waiting for all buffers empty.*/
+ /* assert( ! l2c_310_cache_errata_is_applicable_753970() ); */
+
+ /* This erratum can not be worked around: 754670 A continuous write flow can stall a read targeting the same memory area
+ * But this erratum does not lead to any data corruption */
+ /* assert( ! l2c_310_cache_errata_is_applicable_754670() ); */
+
+ if( l2c_310_cache_errata_is_applicable_765569() )
+ {
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+ assert( !( ( l2cc->aux_ctrl & L2CC_AUX_IPFE_MASK
+ || l2cc->aux_ctrl & L2CC_AUX_DPFE_MASK )
+ && ( ( l2cc->prefetch_ctrl & L2CC_PREFETCH_OFFSET_MASK )
+ == 23 ) ) );
+
+ /* Unhandled erratum present: 765569 Prefetcher can cross 4KB boundary if offset is programmed with value 23 */
+
+ /* Conditions
+ This problem occurs when the following conditions are met:
+ 1. One of the Prefetch Enable bits (bits [29:28] of the Auxiliary or Prefetch Control Register) is set HIGH.
+ 2. The prefetch offset bits are programmed with value 23 (5'b10111).
+ Workaround
+ A workaround for this erratum is to program the prefetch offset with any value except 23.*/
+ }
+
+ /* Unhandled erratum present: 769419 No automatic Store Buffer drain, visibility of written data requires an explicit Cache */
+ assert( ! l2c_310_cache_errata_is_applicable_769419() );
+}
+
+static inline void cache_l2c_310_sync( const bool apply_erratum_l2c_310_753970 )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+ if( apply_erratum_l2c_310_753970 ) {
+ l2cc->dummy_cache_sync_reg = 0;
+ } else {
+ l2cc->cache_sync = 0;
+ }
+}
+
+static inline void cache_l2c_310_flush_1_line(
+ const void *d_addr,
+ const bool apply_erratum_l2c_310_753970,
+ const bool apply_erratum_l2c_310_588369
+)
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ if( apply_erratum_l2c_310_588369 ) {
+ /*
+ * Errata 588369 says that clean + inv may keep the
+ * cache line if it was clean, the recommanded
+ * workaround is to clean then invalidate the cache
+ * line, with write-back and cache linefill disabled.
+ */
+ l2cc->clean_pa = (uint32_t) d_addr;
+ cache_l2c_310_sync( apply_erratum_l2c_310_753970 );
+ l2cc->inv_pa = (uint32_t) d_addr;
+ } else {
+ l2cc->clean_inv_pa = (uint32_t) d_addr;
+ }
+
+ cache_l2c_310_sync( apply_erratum_l2c_310_753970 );
+}
+
+#if 0
+static inline void cache_l2c_310_flush_range( const void *d_addr,
+ size_t n_bytes )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr;
+ const uint32_t end = adx + n_bytes;
+
+ /* Back starting address up to start of a line and flush until end */
+ for ( adx &= ~( cache_l2->data_line_mask );
+ adx < end;
+ adx += cache_l2->bytes_per_data_line ) {
+ cache_l2c_310_flush_1_line( addr );
+ }
+ }
+
+ /* Synchronize the processor */
+/* _ARM_Data_synchronization_barrier();*/
+}
+
+#endif
+
+static inline void cache_l2c_310_flush_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+ /* Only flush if level 2 cache is active */
+ if( ( l2cc->ctrl & L2CC_ENABLE_MASK ) != 0 ) {
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ l2cc->clean_inv_way = CACHE_l2C_310_L2_WAY_MASK;
+
+ while ( l2cc->clean_inv_way & CACHE_l2C_310_L2_WAY_MASK ) {
+ }
+
+ ;
+
+ /* Wait for the flush to complete */
+ cache_l2c_310_sync(
+ l2c_310_cache_errata_is_applicable_753970()
+ );
+ }
+}
+
+static inline void cache_l2c_310_invalidate_1_line(
+ const void *d_addr,
+ const bool apply_erratum_l2c_310_753970
+)
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ l2cc->inv_pa = (uint32_t) d_addr;
+
+ cache_l2c_310_sync( apply_erratum_l2c_310_753970 );
+}
+
+#if 0
+static inline void cache_l2c_310_invalidate_range( const void *d_addr,
+ size_t n_bytes )
+{
+/* volatile L2CC* l2cc = (volatile L2CC *)BSP_ARM_L2CC_BASE;*/
+
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr;
+ const uint32_t end = adx + n_bytes;
+
+ /* Back starting address up to start of a line and invalidate until end */
+ for ( adx &= ~( cache_l2->data_line_mask );
+ adx < end;
+ adx += cache_l2->bytes_per_data_line ) {
+ cache_l2c_310_invalidate_1_line( adx );
+ }
+ }
+
+ /* Synchronize the processor */
+/* _ARM_Data_synchronization_barrier();*/
+}
+
+#endif
+
+static inline void cache_l2c_310_clean_and_invalidate_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ if( ( l2cc->ctrl & L2CC_ENABLE_MASK ) != 0 ) {
+ /* Invalidate the caches */
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ l2cc->clean_inv_way = CACHE_l2C_310_L2_WAY_MASK;
+
+ while ( l2cc->clean_inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ /* Wait for the invalidate to complete */
+ cache_l2c_310_sync(
+ l2c_310_cache_errata_is_applicable_753970()
+ );
+ }
+}
+
+static inline void cache_l2c_310_invalidate_entire( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+ /* Invalidate the caches */
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ l2cc->inv_way = CACHE_l2C_310_L2_WAY_MASK;
+
+ while ( l2cc->inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ /* Wait for the invalidate to complete */
+ cache_l2c_310_sync(
+ l2c_310_cache_errata_is_applicable_753970()
+ );
+}
+
+static inline void cache_l2c_310_store( const void *d_addr )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ l2cc->clean_pa = (uint32_t) d_addr;
+
+ cache_l2c_310_sync(
+ l2c_310_cache_errata_is_applicable_753970()
+ );
+}
+
+static inline void cache_l2c_310_freeze( void )
+{
+ /* TODO */
+}
+
+static inline void cache_l2c_310_unfreeze( void )
+{
+ /* TODO */
+}
+
+static void cache_l2c_310_unlock( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ l2cc->d_lockdown_0 = 0;
+ l2cc->i_lockdown_0 = 0;
+ l2cc->d_lockdown_1 = 0;
+ l2cc->i_lockdown_1 = 0;
+ l2cc->d_lockdown_2 = 0;
+ l2cc->i_lockdown_2 = 0;
+ l2cc->d_lockdown_3 = 0;
+ l2cc->i_lockdown_3 = 0;
+ l2cc->d_lockdown_4 = 0;
+ l2cc->i_lockdown_4 = 0;
+ l2cc->d_lockdown_5 = 0;
+ l2cc->i_lockdown_5 = 0;
+ l2cc->d_lockdown_6 = 0;
+ l2cc->i_lockdown_6 = 0;
+ l2cc->d_lockdown_7 = 0;
+ l2cc->i_lockdown_7 = 0;
+}
+
+static inline void cache_l2c_310_enable( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+ uint32_t cache_id = l2cc->cache_id & L2X0_CACHE_ID_PART_MASK;
+ int ways = 0;
+
+
+ /* Do we actually have an L2C-310 cache controller?
+ * Has BSP_ARM_L2CC_BASE been configured correctly? */
+ switch ( cache_id ) {
+ case L2X0_CACHE_ID_PART_L310:
+ {
+ const cache_l2c_310_rtl_release RTL_RELEASE =
+ l2cc->cache_id & L2X0_CACHE_ID_RTL_MASK;
+ /* If this assertion fails, you have a release of the
+ * L2C-310 cache for which the l2c_310_cache_errata_is_applicable_ ...
+ * methods are not yet implemented. This means you will get incorrect
+ * errata handling */
+ assert( RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P3
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P2
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P1
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R3_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R2_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R1_P0
+ || RTL_RELEASE == CACHE_L2C_310_RTL_RELEASE_R0_P0 );
+ if ( l2cc->aux_ctrl & ( 1 << 16 ) ) {
+ ways = 16;
+ } else {
+ ways = 8;
+ }
+
+ assert( ways == CACHE_l2C_310_L2_NUM_WAYS );
+ }
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+
+ /* Invalid case */
+
+ /* Support for this type is not implemented in this driver.
+ * Either support needs to get added or a seperate driver needs to get implemented */
+ assert( cache_id != L2X0_CACHE_ID_PART_L210 );
+ break;
+ default:
+
+ /* Unknown case */
+ assert( cache_id == L2X0_CACHE_ID_PART_L310 );
+ break;
+ }
+
+ if ( ways > 0 ) {
+ /* Only enable if L2CC is currently disabled */
+ if ( ways != 0
+ && ( l2cc->ctrl & L2CC_ENABLE_MASK ) == 0 ) {
+ rtems_interrupt_level level;
+ uint32_t aux;
+
+ rtems_interrupt_disable( level );
+
+ /* Set up the way size */
+ aux = l2cc->aux_ctrl;
+ aux &= L2CC_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
+ aux |= L2CC_AUX_REG_DEFAULT_MASK;
+
+ /* Make sure that I&D is not locked down when starting */
+ cache_l2c_310_unlock();
+
+ /* Level 2 configuration and control registers must not get written while
+ * background operations are pending */
+ while ( l2cc->inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ while ( l2cc->clean_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ while ( l2cc->clean_inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ l2cc->aux_ctrl = aux;
+
+ /* Set up the latencies */
+ l2cc->tag_ram_ctrl = L2CC_TAG_RAM_DEFAULT_LAT;
+ l2cc->data_ram_ctrl = L2CC_DATA_RAM_DEFAULT_MASK;
+
+ cache_l2c_310_invalidate_entire();
+
+ /*cache_l2c_310_flush_entire();*/
+ /*l2c_310_cache_flush_entire();*/
+
+ /* Clear the pending interrupts */
+ l2cc->int_clr = l2cc->int_raw_status;
+
+ l2c_310_cache_check_errata();
+
+ /* Enable the L2CC */
+ l2cc->ctrl |= L2CC_ENABLE_MASK;
+
+ /* Synchronize the processor */
+ _ARM_Data_synchronization_barrier();
+
+ rtems_interrupt_enable( level );
+ }
+ }
+}
+
+static inline void cache_l2c_310_disable( void )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ if ( l2cc->ctrl & L2CC_ENABLE_MASK ) {
+ /* Clean and Invalidate L2 Cache */
+ cache_l2c_310_flush_entire();
+
+ /* Level 2 configuration and control registers must not get written while
+ * background operations are pending */
+ while ( l2cc->inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ while ( l2cc->clean_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ while ( l2cc->clean_inv_way & CACHE_l2C_310_L2_WAY_MASK ) ;
+
+ /* Disable the L2 cache */
+ l2cc->ctrl &= ~L2CC_ENABLE_MASK;
+
+ /* Synchronize the processor */
+ /* TODO: This should not be required! */
+ _ARM_Data_synchronization_barrier();
+ }
+}
+
+static inline void _CPU_cache_enable_data( void )
+{
+ cache_l2c_310_enable();
+ arm_cache_l1_enable_data();
+}
+
+static inline void _CPU_cache_disable_data( void )
+{
+ arm_cache_l1_disable_data();
+ cache_l2c_310_disable();
+}
+
+static inline void _CPU_cache_enable_instruction( void )
+{
+ cache_l2c_310_enable();
+ arm_cache_l1_enable_instruction();
+}
+
+static inline void _CPU_cache_disable_instruction( void )
+{
+ arm_cache_l1_disable_instruction();
+ cache_l2c_310_disable();
+}
+
+static inline void _CPU_cache_flush_entire_data( void )
+{
+ arm_cache_l1_flush_entire_data();
+ cache_l2c_310_flush_entire();
+}
+
+static inline void _CPU_cache_invalidate_entire_data( void )
+{
+ /* This is broadcast within the cluster */
+ arm_cache_l1_flush_entire_data();
+
+ /* forces the address out past level 2 */
+ cache_l2c_310_clean_and_invalidate_entire();
+
+ /*This is broadcast within the cluster */
+ arm_cache_l1_clean_and_invalidate_entire_data();
+}
+
+static inline void _CPU_cache_store_data_line( const void *d_addr )
+{
+ const void *ADX =
+ (const void *) ( (uint32_t) d_addr & ~CACHE_L2C_310_L1_DATA_LINE_MASK );
+
+
+ CACHE_ARM_ERRATA_764369_HANDLER();
+
+ arm_cache_l1_store_data( ADX );
+ cache_l2c_310_store( ADX );
+}
+
+static inline void _CPU_cache_freeze_data( void )
+{
+ arm_cache_l1_freeze_data();
+ cache_l2c_310_freeze();
+}
+
+static inline void _CPU_cache_unfreeze_data( void )
+{
+ arm_cache_l1_unfreeze_data();
+ cache_l2c_310_unfreeze();
+}
+
+static inline void _CPU_cache_invalidate_entire_instruction( void )
+{
+ cache_l2c_310_invalidate_entire();
+ arm_cache_l1_invalidate_entire_instruction();
+}
+
+static inline void _CPU_cache_freeze_instruction( void )
+{
+ arm_cache_l1_freeze_instruction();
+ cache_l2c_310_freeze();
+}
+
+static inline void _CPU_cache_unfreeze_instruction( void )
+{
+ arm_cache_l1_unfreeze_instruction();
+ cache_l2c_310_unfreeze();
+}
+
+static inline void _CPU_cache_flush_data_range( const void *d_addr,
+ size_t n_bytes )
+{
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr
+ & ~CACHE_L2C_310_L1_DATA_LINE_MASK;
+ const uint32_t ADDR_LAST =
+ ( (uint32_t) d_addr + n_bytes - 1 ) & ~CACHE_L2C_310_L1_DATA_LINE_MASK;
+
+ for (; adx <= ADDR_LAST; adx += CPU_DATA_CACHE_ALIGNMENT ) {
+ _CPU_cache_store_data_line( (const void *) adx );
+ }
+ }
+}
+
+static inline void _CPU_cache_invalidate_data_range( const void *addr_first,
+ size_t n_bytes )
+{
+ if ( n_bytes > 0 ) {
+ const uint32_t ADDR_LAST =
+ ( (uint32_t) addr_first + n_bytes
+ - 1 ) & ~CACHE_L2C_310_L1_DATA_LINE_MASK;
+ uint32_t addr = (uint32_t) addr_first
+ & ~CACHE_L2C_310_L1_DATA_LINE_MASK;
+ const bool IS_APPLICABLE_ERRATUM_L2C_310_753970 =
+ l2c_310_cache_errata_is_applicable_753970();
+ const bool IS_APPLICABLE_ERRATUM_L2C_310_588369 =
+ l2c_310_cache_errata_is_applicable_588369();
+
+ CACHE_ARM_ERRATA_764369_HANDLER();
+
+ for (; addr <= ADDR_LAST; addr += CPU_DATA_CACHE_ALIGNMENT ) {
+ /* This is broadcast within the cluster */
+ arm_cache_l1_store_data( (const void *) addr );
+
+ /* forces the address out past level 2 */
+ cache_l2c_310_flush_1_line(
+ (const void *) addr,
+ IS_APPLICABLE_ERRATUM_L2C_310_753970,
+ IS_APPLICABLE_ERRATUM_L2C_310_588369
+ );
+
+ /* This is broadcast within the cluster */
+ arm_cache_l1_flush_1_data_line( (const void *) addr );
+ }
+ }
+
+/* _CPU_cache_flush_data_range( addr_first, n_bytes );*/
+}
+
+#if 0
+static inline void _CPU_cache_invalidate_data_range( const void *d_addr,
+ size_t n_bytes )
+{
+ volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2CC_BASE;
+
+
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) d_addr;
+ const uint32_t end = adx + n_bytes;
+ const bool IS_APPLICABLE_ERRATUM_L2C_310_753970 =
+ l2c_310_cache_errata_is_applicable_753970();
+
+ /* Select cache Level 1 and Data cache in CSSELR */
+ arm_cp15_set_cache_size_selection( 0 );
+
+ /* Back starting address up to start of a line and invalidate until end */
+
+ for ( adx &= ~( CPU_DATA_CACHE_ALIGNMENT - 1 );
+ adx < end;
+ adx += CPU_DATA_CACHE_ALIGNMENT ) {
+ /* Invalidate L2 cache line */
+ l2cc->inv_pa = adx;
+ cache_l2c_310_sync(
+ IS_APPLICABLE_ERRATUM_L2C_310_753970
+ );
+
+ /* Invalidate L1 Data cache line */
+ arm_cp15_data_cache_invalidate_line( (const void *) adx );
+ }
+ }
+
+ /* Wait for L1 and L2 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#endif
+
+static inline void _CPU_cache_invalidate_instruction_range( const void *i_addr,
+ size_t n_bytes )
+{
+ if ( n_bytes != 0 ) {
+ uint32_t adx = (uint32_t) i_addr
+ & ~CACHE_L2C_310_L1_INSTRUCTION_LINE_MASK;
+ const uint32_t end =
+ ( adx + n_bytes ) & ~CACHE_L2C_310_L1_INSTRUCTION_LINE_MASK;
+ const bool IS_APPLICABLE_ERRATUM_L2C_310_753970 =
+ l2c_310_cache_errata_is_applicable_753970();
+
+ CACHE_ARM_ERRATA_764369_HANDLER();
+
+ /* Back starting address up to start of a line and invalidate until end */
+ for (;
+ adx < end;
+ adx += CPU_INSTRUCTION_CACHE_ALIGNMENT ) {
+ /* Invalidate L2 cache line */
+ cache_l2c_310_invalidate_1_line(
+ (const void *) adx,
+ IS_APPLICABLE_ERRATUM_L2C_310_753970
+ );
+
+ /* Invalidate L1 I-cache line */
+ arm_cache_l1_invalidate_1_instruction_line( (const void *) adx );
+ }
+
+ _ARM_Data_synchronization_barrier();
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* LIBBSP_ARM_SHARED_L2C_310_CACHE_H */
\ No newline at end of file
diff --git a/c/src/lib/libbsp/arm/shared/include/arm-cache-l1.h b/c/src/lib/libbsp/arm/shared/include/arm-cache-l1.h
new file mode 100644
index 0000000..09ddf13
--- /dev/null
+++ b/c/src/lib/libbsp/arm/shared/include/arm-cache-l1.h
@@ -0,0 +1,528 @@
+/**
+ * @file arm-cache-l1.h
+ *
+ * @ingroup arm_shared
+ *
+ * @brief Level 1 Cache definitions and functions.
+ *
+ * This file implements handling for the ARM Level 1 cache controller
+ */
+
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems at embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef LIBBSP_ARM_SHARED_CACHE_L1_H
+#define LIBBSP_ARM_SHARED_CACHE_L1_H
+
+#include <assert.h>
+#include <bsp.h>
+#include <libcpu/arm-cp15.h>
+//#include <bsp/arm-release-id.h>
+//#include <bsp/arm-errata.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* These two defines also ensure that the rtems_cache_* functions have bodies */
+#define ARM_CACHE_L1_CPU_DATA_ALIGNMENT 32
+#define ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT 32
+#define ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
+
+#define ARM_CACHE_L1_CSS_ID_DATA 0
+#define ARM_CACHE_L1_CSS_ID_INSTRUCTION 1
+#define ARM_CACHE_L1_DATA_LINE_MASK ( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 )
+#define ARM_CACHE_L1_INSTRUCTION_LINE_MASK \
+ ( ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT \
+ - 1 )
+
+
+
+/* Errata Handlers */
+#if ( defined( RTEMS_SMP ) )
+ #define ARM_CACHE_L1_ARM_ERRATA_764369_HANDLER() \
+ if( arm_errata_is_applicable_processor_errata_764369() ) { \
+ _ARM_Data_synchronization_barrier(); \
+ }
+#else /* #if ( defined( RTEMS_SMP ) ) */
+ #define ARM_CACHE_L1_ARM_ERRATA_764369_HANDLER()
+#endif /* #if ( defined( RTEMS_SMP ) ) */
+
+/* The common workaround for this erratum would be to add a data synchronization barrier to the beginning of the abort handler.
+ * But for RTEMS a call of the abort handler means a fatal condition anyway. So there is no need to handle this erratum */
+#define ARM_CACHE_L1_ARM_ERRATA_775420_HANDLER() \
+ if( arm_errata_is_applicable_processor_errata_775420 ) { \
+ } \
+
+
+static void arm_cache_l1_check_errata( void )
+{
+ #ifndef ARM_CACHE_L1_ARM_ERRATA_764369_HANDLER
+ /* Unhandled erratum present: 764369 Affecting Cortex-A9 MPCore with two or more processors: Under certain timing circumstances, a data cache line maintenance operation by MVA targeting an Inner Shareable memory region may fail to proceed up to either the Point of Coherency or to the Point of Unification of the system. This workaround adds a DSB instruction before the relevant cache maintenance functions and sets a specific bit in the diagnostic control register of the SCU.*/
+ assert( ! arm_errata_is_applicable_processor_errata_764369() );
+ #endif
+
+ #ifndef ARM_CACHE_L1_ARM_ERRATA_775420_HANDLER
+ /* Unhandled erratum present: 775420 A data cache maintenance operation which aborts, followed by an ISB, without any DSB in-between, might lead to deadlock */
+ assert( ! arm_errata_is_applicable_processor_errata_775420() );
+ #endif
+}
+
+
+
+static void arm_cache_l1_select( const uint32_t selection )
+{
+ /* select current cache level in cssr */
+ arm_cp15_set_cache_size_selection( selection );
+
+ /* isb to sych the new cssr&csidr */
+ _ARM_Instruction_synchronization_barrier();
+}
+
+/*
+ * @param l1LineSize Number of bytes in cache line expressed as power of 2 value
+ * @param l1Associativity Associativity of cache. The associativity does not have to be a power of 2.
+ * qparam liNumSets Number of sets in cache
+ * */
+
+static inline void arm_cache_l1_properties(
+ uint32_t *l1LineSize,
+ uint32_t *l1Associativity,
+ uint32_t *l1NumSets )
+{
+ uint32_t id;
+
+
+ _ARM_Instruction_synchronization_barrier();
+ id = arm_cp15_get_cache_size_id();
+
+ *l1LineSize = ( id & 0x0007U ) + 2 + 2; /* Cache line size in words + 2 -> bytes) */
+ *l1Associativity = ( ( id >> 3 ) & 0x03ffU ) + 1; /* Number of Ways */
+ *l1NumSets = ( ( id >> 13 ) & 0x7fffU ) + 1; /* Number of Sets */
+}
+
+/*
+ * @param log_2_line_bytes The number of bytes per cache line expressed in log2
+ * @param associativity The associativity of the cache beeing operated
+ * @param cache_level_idx The level of the cache beeing operated minus 1 e.g 0 for cache level 1
+ * @param set Number of the set to operate on
+ * @param way Number of the way to operate on
+ * */
+
+static inline uint32_t arm_cache_l1_get_set_way_param(
+ const uint32_t log_2_line_bytes,
+ const uint32_t associativity,
+ const uint32_t cache_level_idx,
+ const uint32_t set,
+ const uint32_t way )
+{
+ uint32_t way_shift = __builtin_clz( associativity - 1 );
+
+
+ return ( 0
+ | ( way
+ << way_shift ) | ( set << log_2_line_bytes ) | ( cache_level_idx << 1 ) );
+}
+
+static inline void arm_cache_l1_flush_1_data_line( const void *d_addr )
+{
+ /* Flush the Data cache */
+ arm_cp15_data_cache_clean_and_invalidate_line( d_addr );
+
+ /* Wait for L1 flush to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#if 0
+static inline void arm_cache_l1_cache_flush_data_range(
+ const void *d_addr,
+ size_t n_bytes
+)
+{
+ const void *final_address;
+
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be pushed. Increment d_addr and push
+ * the resulting line until final_address is passed.
+ */
+
+ if ( n_bytes == 0 )
+ /* Do nothing if number of bytes to flush is zero */
+ return;
+
+ final_address = (void *) ( (size_t) d_addr + n_bytes - 1 );
+ d_addr =
+ (void *) ( (size_t) d_addr & ~( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 ) );
+
+ while ( d_addr <= final_address ) {
+ arm_cache_l1_flush_1_data_line( d_addr );
+ d_addr = (void *) ( (size_t) d_addr + ARM_CACHE_L1_CPU_DATA_ALIGNMENT );
+ }
+}
+
+#endif
+
+static inline void arm_cache_l1_flush_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+ rtems_interrupt_level level;
+
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ /* make cssr&csidr read atomic */
+ rtems_interrupt_disable( level );
+ arm_cache_l1_select( ARM_CACHE_L1_CSS_ID_DATA );
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties( &l1LineSize, &l1Associativity,
+ &l1NumSets );
+ rtems_interrupt_enable( level );
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_clean_line_by_set_and_way( set_way_param );
+ }
+ }
+
+ /* Wait for L1 flush to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#if 0
+static inline void arm_cache_l1_invalidate_data_range(
+ const void *d_addr,
+ size_t n_bytes )
+{
+ const void *final_address;
+
+
+ /*
+ * Set d_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment d_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if ( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ /* Select cache Level 1 and Data cache in CSSELR */
+ arm_cp15_set_cache_size_selection( 0 );
+
+ /* isb to sych the new cssr&csidr */
+ _ARM_Instruction_synchronization_barrier();
+
+ final_address = (void *) ( (size_t) d_addr + n_bytes - 1 );
+ d_addr =
+ (void *) ( (size_t) d_addr & ~( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 ) );
+
+ while ( final_address >= d_addr ) {
+ arm_cp15_data_cache_invalidate_line( d_addr );
+ d_addr = (void *) ( (size_t) d_addr + ARM_CACHE_L1_CPU_DATA_ALIGNMENT );
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#endif
+
+static inline void arm_cache_l1_invalidate_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+ rtems_interrupt_level level;
+
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ /* make cssr&csidr read atomic */
+ rtems_interrupt_disable( level );
+ arm_cache_l1_select( ARM_CACHE_L1_CSS_ID_DATA );
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties( &l1LineSize, &l1Associativity,
+ &l1NumSets );
+ rtems_interrupt_enable( level );
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_invalidate_line_by_set_and_way( set_way_param );
+ }
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_clean_and_invalidate_entire_data( void )
+{
+ uint32_t l1LineSize, l1Associativity, l1NumSets;
+ uint32_t s, w;
+ uint32_t set_way_param;
+ rtems_interrupt_level level;
+
+
+ /* ensure ordering with previous memory accesses */
+ _ARM_Data_memory_barrier();
+
+ /* make cssr&csidr read atomic */
+ rtems_interrupt_disable( level );
+ arm_cache_l1_select( ARM_CACHE_L1_CSS_ID_DATA );
+
+ /* Get the L1 cache properties */
+ arm_cache_l1_properties( &l1LineSize, &l1Associativity,
+ &l1NumSets );
+ rtems_interrupt_enable( level );
+
+ for ( w = 0; w < l1Associativity; ++w ) {
+ for ( s = 0; s < l1NumSets; ++s ) {
+ set_way_param = arm_cache_l1_get_set_way_param(
+ l1LineSize,
+ l1Associativity,
+ 0,
+ s,
+ w
+ );
+ arm_cp15_data_cache_clean_and_invalidate_line_by_set_and_way(
+ set_way_param );
+ }
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_store_data( const void *d_addr )
+{
+ /* Store the Data cache line */
+ arm_cp15_data_cache_clean_line( d_addr );
+
+ /* Wait for L1 store to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+static inline void arm_cache_l1_freeze_data( void )
+{
+ /* TODO */
+}
+
+static inline void arm_cache_l1_unfreeze_data( void )
+{
+ /* TODO */
+}
+
+static inline void arm_cache_l1_invalidate_1_instruction_line(
+ const void *i_addr )
+{
+ /* Invalidate the Instruction cache line */
+ arm_cp15_instruction_cache_invalidate_line( i_addr );
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#if 0
+static inline void arm_cache_l1_invalidate_instruction_range(
+ const void *i_addr,
+ size_t n_bytes )
+{
+ const void *final_address;
+
+
+ /*
+ * Set i_addr to the beginning of the cache line; final_address indicates
+ * the last address_t which needs to be invalidated. Increment i_addr and
+ * invalidate the resulting line until final_address is passed.
+ */
+
+ if ( n_bytes == 0 )
+ /* Do nothing if number of bytes to invalidate is zero */
+ return;
+
+ /* Select cache Level 1 and Instruction cache in CSSELR */
+ arm_cp15_set_cache_size_selection( 1 );
+
+ /* isb to sych the new cssr&csidr */
+ _ARM_Instruction_synchronization_barrier();
+
+ final_address = (void *) ( (size_t) i_addr + n_bytes - 1 );
+ i_addr =
+ (void *) ( (size_t) i_addr & ~( ARM_CACHE_L1_CPU_DATA_ALIGNMENT - 1 ) );
+
+ while ( final_address > i_addr ) {
+ arm_cache_l1_invalidate_1_instruction_line( i_addr );
+ i_addr = (void *) ( (size_t) i_addr + ARM_CACHE_L1_CPU_DATA_ALIGNMENT );
+ }
+
+ /* Wait for L1 invalidate to complete */
+ _ARM_Data_synchronization_barrier();
+}
+
+#endif
+
+static inline void arm_cache_l1_invalidate_entire_instruction( void )
+{
+ uint32_t ctrl = arm_cp15_get_control();
+
+
+ #ifdef RTEMS_SMP
+
+ /* invalidate I-cache inner shareable */
+ arm_cp15_instruction_cache_inner_shareable_invalidate_all();
+
+ /* I+BTB cache invalidate */
+ arm_cp15_instruction_cache_invalidate();
+ #else /* RTEMS_SMP */
+ /* I+BTB cache invalidate */
+ arm_cp15_instruction_cache_invalidate();
+ #endif /* RTEMS_SMP */
+
+ if ( ( ctrl & ARM_CP15_CTRL_Z ) == 0 ) {
+ arm_cp15_branch_predictor_inner_shareable_invalidate_all();
+ arm_cp15_branch_predictor_invalidate_all();
+ }
+}
+
+static inline void arm_cache_l1_freeze_instruction( void )
+{
+ /* TODO */
+}
+
+static inline void arm_cache_l1_unfreeze_instruction( void )
+{
+ /* TODO */
+}
+
+static inline void arm_cache_l1_enable_data( void )
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+
+ arm_cache_l1_select( ARM_CACHE_L1_CSS_ID_DATA );
+
+ assert( ARM_CACHE_L1_CPU_DATA_ALIGNMENT == arm_cp15_get_data_cache_line_size() );
+
+ rtems_interrupt_disable( level );
+ ctrl = arm_cp15_get_control();
+ rtems_interrupt_enable( level );
+
+ /* Only enable the cache if it is disabled */
+ if ( !( ctrl & ARM_CP15_CTRL_C ) ) {
+ /* Clean and invalidate the Data cache */
+ arm_cache_l1_invalidate_entire_data();
+
+ /* Enable the Data cache */
+ ctrl |= ARM_CP15_CTRL_C;
+
+ rtems_interrupt_disable( level );
+ arm_cp15_set_control( ctrl );
+ rtems_interrupt_enable( level );
+ }
+
+ arm_cache_l1_check_errata();
+}
+
+static inline void arm_cache_l1_disable_data( void )
+{
+ rtems_interrupt_level level;
+
+
+ /* Clean and invalidate the Data cache */
+ arm_cache_l1_flush_entire_data();
+
+ rtems_interrupt_disable( level );
+
+ /* Disable the Data cache */
+ arm_cp15_set_control( arm_cp15_get_control() & ~ARM_CP15_CTRL_C );
+
+ rtems_interrupt_enable( level );
+}
+
+static inline void arm_cache_l1_disable_instruction( void )
+{
+ rtems_interrupt_level level;
+
+
+ rtems_interrupt_disable( level );
+
+ /* Synchronize the processor */
+ _ARM_Data_synchronization_barrier();
+
+ /* Invalidate the Instruction cache */
+ arm_cache_l1_invalidate_entire_instruction();
+
+ /* Disable the Instruction cache */
+ arm_cp15_set_control( arm_cp15_get_control() & ~ARM_CP15_CTRL_I );
+
+ rtems_interrupt_enable( level );
+}
+
+static inline void arm_cache_l1_enable_instruction( void )
+{
+ rtems_interrupt_level level;
+ uint32_t ctrl;
+
+
+ arm_cache_l1_select( ARM_CACHE_L1_CSS_ID_INSTRUCTION );
+
+ assert( ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT
+ == arm_cp15_get_data_cache_line_size() );
+
+ rtems_interrupt_disable( level );
+
+ /* Enable Instruction cache only if it is disabled */
+ ctrl = arm_cp15_get_control();
+
+ if ( !( ctrl & ARM_CP15_CTRL_I ) ) {
+ /* Invalidate the Instruction cache */
+ arm_cache_l1_invalidate_entire_instruction();
+
+ /* Enable the Instruction cache */
+ ctrl |= ARM_CP15_CTRL_I;
+
+ arm_cp15_set_control( ctrl );
+ }
+
+ rtems_interrupt_enable( level );
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* LIBBSP_ARM_SHARED_CACHE_L1_H */
\ No newline at end of file
--
1.7.10.4
More information about the devel
mailing list