[rtems commit] bsps: Import Xilinx support code

Joel Sherrill joel at rtems.org
Fri Dec 23 18:52:44 UTC 2022


Module:    rtems
Branch:    master
Commit:    50539ba881f00cc9328cf7677f0c1fcd73259031
Changeset: http://git.rtems.org/rtems/commit/?id=50539ba881f00cc9328cf7677f0c1fcd73259031

Author:    Kinsey Moore <kinsey.moore at oarcorp.com>
Date:      Fri Dec  2 12:19:19 2022 -0600

bsps: Import Xilinx support code

This support code is necessary for many Xilinx-provided bare metal device
drivers supported on ARM, AArch64, and MicroBlaze platforms. Support for
all of these architectures is kept under bsps/include due to multiple
architecture variants being supported which requires complex logic in
the build system. The imported files are and should be able to remain
unmodified. Import information is kept in bsps/shared/xil/VERSION.

---

 bsps/include/xil/arm/ARMv8/32bit/xil_cache.h      |  60 ++
 bsps/include/xil/arm/ARMv8/32bit/xpseudo_asm.h    |  53 ++
 bsps/include/xil/arm/ARMv8/32bit/xreg_cortexa53.h | 394 ++++++++++++
 bsps/include/xil/arm/ARMv8/64bit/xil_cache.h      |  75 +++
 bsps/include/xil/arm/ARMv8/64bit/xpseudo_asm.h    |  56 ++
 bsps/include/xil/arm/ARMv8/64bit/xreg_cortexa53.h | 163 +++++
 bsps/include/xil/arm/cortexa9/xil_cache.h         | 105 ++++
 bsps/include/xil/arm/cortexa9/xpseudo_asm.h       |  60 ++
 bsps/include/xil/arm/cortexa9/xreg_cortexa9.h     | 573 +++++++++++++++++
 bsps/include/xil/arm/cortexr5/xil_cache.h         |  95 +++
 bsps/include/xil/arm/cortexr5/xpseudo_asm.h       |  60 ++
 bsps/include/xil/arm/cortexr5/xreg_cortexr5.h     | 429 +++++++++++++
 bsps/include/xil/bspconfig.h                      |   1 +
 bsps/include/xil/microblaze/xil_cache.h           | 392 ++++++++++++
 bsps/include/xil/sleep.h                          |  99 +++
 bsps/include/xil/xbasic_types.h                   | 113 ++++
 bsps/include/xil/xil_assert.h                     | 176 ++++++
 bsps/include/xil/xil_exception.h                  |   1 +
 bsps/include/xil/xil_io.h                         | 412 ++++++++++++
 bsps/include/xil/xil_mem.h                        |  47 ++
 bsps/include/xil/xil_printf.h                     |  44 ++
 bsps/include/xil/xil_smc.h                        |   1 +
 bsps/include/xil/xil_types.h                      | 203 ++++++
 bsps/include/xil/xparameters.h                    |  44 ++
 bsps/include/xil/xpseudo_asm_gcc.h                | 240 +++++++
 bsps/include/xil/xstatus.h                        | 522 +++++++++++++++
 bsps/shared/xil/VERSION                           |  20 +
 bsps/shared/xil/xil_assert.c                      | 126 ++++
 bsps/shared/xil/xil_cache.c                       | 732 ++++++++++++++++++++++
 bsps/shared/xil/xil_mem.c                         |  70 +++
 spec/build/bsps/objxilinxsupport.yml              |  45 ++
 spec/build/bsps/objxilinxsupporta9.yml            |  20 +
 spec/build/bsps/objxilinxsupportilp32.yml         |  20 +
 spec/build/bsps/objxilinxsupportlp64.yml          |  22 +
 spec/build/bsps/objxilinxsupportmb.yml            |  17 +
 spec/build/bsps/objxilinxsupportr5.yml            |  18 +
 spec/build/bsps/optxilsupportpath.yml             |  34 +
 37 files changed, 5542 insertions(+)

diff --git a/bsps/include/xil/arm/ARMv8/32bit/xil_cache.h b/bsps/include/xil/arm/ARMv8/32bit/xil_cache.h
new file mode 100644
index 0000000000..0fe994b093
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/32bit/xil_cache.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+* Copyright (c) 2015 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup a53_32_cache_apis Cortex A53 32bit Processor Cache Functions
+*
+* Cache functions provide access to cache related operations such as flush
+* and invalidate for instruction and data caches. It gives option to perform
+* the cache operations on a single cacheline, a range of memory and an entire
+* cache.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.2	pkp  28/05/15 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void Xil_DCacheEnable(void);
+void Xil_DCacheDisable(void);
+void Xil_DCacheInvalidate(void);
+void Xil_DCacheInvalidateRange(INTPTR adr, u32 len);
+void Xil_DCacheFlush(void);
+void Xil_DCacheFlushRange(INTPTR adr, u32 len);
+void Xil_DCacheInvalidateLine(u32 adr);
+void Xil_DCacheFlushLine(u32 adr);
+
+void Xil_ICacheInvalidateLine(u32 adr);
+void Xil_ICacheEnable(void);
+void Xil_ICacheDisable(void);
+void Xil_ICacheInvalidate(void);
+void Xil_ICacheInvalidateRange(INTPTR adr, u32 len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/**
+* @} End of "addtogroup a53_64_cache_apis".
+*/
diff --git a/bsps/include/xil/arm/ARMv8/32bit/xpseudo_asm.h b/bsps/include/xil/arm/ARMv8/32bit/xpseudo_asm.h
new file mode 100644
index 0000000000..41c9c9c944
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/32bit/xpseudo_asm.h
@@ -0,0 +1,53 @@
+/******************************************************************************
+* Copyright (c) 2015 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xpseudo_asm.h
+*
+* @addtogroup a53_32_specific Cortex A53 32bit Processor Specific Include Files
+*
+* The xpseudo_asm.h includes xreg_cortexa53.h and xpseudo_asm_gcc.h.
+* The xreg_cortexa53.h file contains definitions for inline assembler code.
+* It provides inline definitions for Cortex A53 GPRs, SPRs, co-processor
+* registers and floating point registers.
+*
+* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
+* assembler instructions, available as macros. These can be very useful for
+* tasks such as setting or getting special purpose registers, synchronization,
+* or cache manipulation etc. These inline assembler instructions can be used
+* from drivers and user applications written in C.
+*
+* @{
+*
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.2	pkp  	28/05/15 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XPSEUDO_ASM_H
+#define XPSEUDO_ASM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xreg_cortexa53.h"
+#include "xpseudo_asm_gcc.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* XPSEUDO_ASM_H */
+/**
+* @} End of "addtogroup a53_32_specific".
+*/
diff --git a/bsps/include/xil/arm/ARMv8/32bit/xreg_cortexa53.h b/bsps/include/xil/arm/ARMv8/32bit/xreg_cortexa53.h
new file mode 100644
index 0000000000..e811686fe5
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/32bit/xreg_cortexa53.h
@@ -0,0 +1,394 @@
+/******************************************************************************
+* Copyright (c) 2015 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xreg_cortexa53.h
+*
+* This header file contains definitions for using inline assembler code. It is
+* written specifically for the GNU.
+*
+* All of the ARM Cortex A53 GPRs, SPRs, and Debug Registers are defined along
+* with the positions of the bits within the registers.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.2	pkp  	 28/05/15 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XREG_CORTEXA53_H
+#define XREG_CORTEXA53_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ *@cond nocomments
+ */
+
+/* GPRs */
+#define XREG_GPR0				r0
+#define XREG_GPR1				r1
+#define XREG_GPR2				r2
+#define XREG_GPR3				r3
+#define XREG_GPR4				r4
+#define XREG_GPR5				r5
+#define XREG_GPR6				r6
+#define XREG_GPR7				r7
+#define XREG_GPR8				r8
+#define XREG_GPR9				r9
+#define XREG_GPR10				r10
+#define XREG_GPR11				r11
+#define XREG_GPR12				r12
+#define XREG_GPR13				r13
+#define XREG_GPR14				r14
+#define XREG_GPR15				r15
+#define XREG_CPSR				cpsr
+
+/* Coprocessor number defines */
+#define XREG_CP0				0
+#define XREG_CP1				1
+#define XREG_CP2				2
+#define XREG_CP3				3
+#define XREG_CP4				4
+#define XREG_CP5				5
+#define XREG_CP6				6
+#define XREG_CP7				7
+#define XREG_CP8				8
+#define XREG_CP9				9
+#define XREG_CP10				10
+#define XREG_CP11				11
+#define XREG_CP12				12
+#define XREG_CP13				13
+#define XREG_CP14				14
+#define XREG_CP15				15
+
+/* Coprocessor control register defines */
+#define XREG_CR0				cr0
+#define XREG_CR1				cr1
+#define XREG_CR2				cr2
+#define XREG_CR3				cr3
+#define XREG_CR4				cr4
+#define XREG_CR5				cr5
+#define XREG_CR6				cr6
+#define XREG_CR7				cr7
+#define XREG_CR8				cr8
+#define XREG_CR9				cr9
+#define XREG_CR10				cr10
+#define XREG_CR11				cr11
+#define XREG_CR12				cr12
+#define XREG_CR13				cr13
+#define XREG_CR14				cr14
+#define XREG_CR15				cr15
+
+/* Current Processor Status Register (CPSR) Bits */
+#define XREG_CPSR_THUMB_MODE			0x20
+#define XREG_CPSR_MODE_BITS			0x1F
+#define XREG_CPSR_SYSTEM_MODE			0x1F
+#define XREG_CPSR_UNDEFINED_MODE		0x1B
+#define XREG_CPSR_DATA_ABORT_MODE		0x17
+#define XREG_CPSR_SVC_MODE			0x13
+#define XREG_CPSR_IRQ_MODE			0x12
+#define XREG_CPSR_FIQ_MODE			0x11
+#define XREG_CPSR_USER_MODE			0x10
+
+#define XREG_CPSR_IRQ_ENABLE			0x80
+#define XREG_CPSR_FIQ_ENABLE			0x40
+
+#define XREG_CPSR_N_BIT				0x80000000
+#define XREG_CPSR_Z_BIT				0x40000000
+#define XREG_CPSR_C_BIT				0x20000000
+#define XREG_CPSR_V_BIT				0x10000000
+
+
+/* CP15 defines */
+
+/* C0 Register defines */
+#define XREG_CP15_MAIN_ID			"p15, 0, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_TYPE			"p15, 0, %0,  c0,  c0, 1"
+#define XREG_CP15_TCM_TYPE			"p15, 0, %0,  c0,  c0, 2"
+#define XREG_CP15_TLB_TYPE			"p15, 0, %0,  c0,  c0, 3"
+#define XREG_CP15_MULTI_PROC_AFFINITY		"p15, 0, %0,  c0,  c0, 5"
+
+#define XREG_CP15_PROC_FEATURE_0		"p15, 0, %0,  c0,  c1, 0"
+#define XREG_CP15_PROC_FEATURE_1		"p15, 0, %0,  c0,  c1, 1"
+#define XREG_CP15_DEBUG_FEATURE_0		"p15, 0, %0,  c0,  c1, 2"
+#define XREG_CP15_MEMORY_FEATURE_0		"p15, 0, %0,  c0,  c1, 4"
+#define XREG_CP15_MEMORY_FEATURE_1		"p15, 0, %0,  c0,  c1, 5"
+#define XREG_CP15_MEMORY_FEATURE_2		"p15, 0, %0,  c0,  c1, 6"
+#define XREG_CP15_MEMORY_FEATURE_3		"p15, 0, %0,  c0,  c1, 7"
+
+#define XREG_CP15_INST_FEATURE_0		"p15, 0, %0,  c0,  c2, 0"
+#define XREG_CP15_INST_FEATURE_1		"p15, 0, %0,  c0,  c2, 1"
+#define XREG_CP15_INST_FEATURE_2		"p15, 0, %0,  c0,  c2, 2"
+#define XREG_CP15_INST_FEATURE_3		"p15, 0, %0,  c0,  c2, 3"
+#define XREG_CP15_INST_FEATURE_4		"p15, 0, %0,  c0,  c2, 4"
+
+#define XREG_CP15_CACHE_SIZE_ID			"p15, 1, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_LEVEL_ID		"p15, 1, %0,  c0,  c0, 1"
+#define XREG_CP15_AUXILARY_ID			"p15, 1, %0,  c0,  c0, 7"
+
+#define XREG_CP15_CACHE_SIZE_SEL		"p15, 2, %0,  c0,  c0, 0"
+
+/* C1 Register Defines */
+#define XREG_CP15_SYS_CONTROL			"p15, 0, %0,  c1,  c0, 0"
+#define XREG_CP15_AUX_CONTROL			"p15, 0, %0,  c1,  c0, 1"
+#define XREG_CP15_CP_ACCESS_CONTROL		"p15, 0, %0,  c1,  c0, 2"
+
+#define XREG_CP15_SECURE_CONFIG			"p15, 0, %0,  c1,  c1, 0"
+#define XREG_CP15_SECURE_DEBUG_ENABLE		"p15, 0, %0,  c1,  c1, 1"
+#define XREG_CP15_NS_ACCESS_CONTROL		"p15, 0, %0,  c1,  c1, 2"
+#define XREG_CP15_VIRTUAL_CONTROL		"p15, 0, %0,  c1,  c1, 3"
+
+
+/* XREG_CP15_CONTROL bit defines */
+#define XREG_CP15_CONTROL_TE_BIT		0x40000000U
+#define XREG_CP15_CONTROL_AFE_BIT		0x20000000U
+#define XREG_CP15_CONTROL_TRE_BIT		0x10000000U
+#define XREG_CP15_CONTROL_NMFI_BIT		0x08000000U
+#define XREG_CP15_CONTROL_EE_BIT		0x02000000U
+#define XREG_CP15_CONTROL_HA_BIT		0x00020000U
+#define XREG_CP15_CONTROL_RR_BIT		0x00004000U
+#define XREG_CP15_CONTROL_V_BIT			0x00002000U
+#define XREG_CP15_CONTROL_I_BIT			0x00001000U
+#define XREG_CP15_CONTROL_Z_BIT			0x00000800U
+#define XREG_CP15_CONTROL_SW_BIT		0x00000400U
+#define XREG_CP15_CONTROL_B_BIT			0x00000080U
+#define XREG_CP15_CONTROL_C_BIT			0x00000004U
+#define XREG_CP15_CONTROL_A_BIT			0x00000002U
+#define XREG_CP15_CONTROL_M_BIT			0x00000001U
+
+
+/* C2 Register Defines */
+#define XREG_CP15_TTBR0				"p15, 0, %0,  c2,  c0, 0"
+#define XREG_CP15_TTBR1				"p15, 0, %0,  c2,  c0, 1"
+#define XREG_CP15_TTB_CONTROL			"p15, 0, %0,  c2,  c0, 2"
+
+/* C3 Register Defines */
+#define XREG_CP15_DOMAIN_ACCESS_CTRL		"p15, 0, %0,  c3,  c0, 0"
+
+/* C4 Register Defines */
+/* Not Used */
+
+/* C5 Register Defines */
+#define XREG_CP15_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 0"
+#define XREG_CP15_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 1"
+
+#define XREG_CP15_AUX_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 0"
+#define XREG_CP15_AUX_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 1"
+
+/* C6 Register Defines */
+#define XREG_CP15_DATA_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 0"
+#define XREG_CP15_INST_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 2"
+
+/* C7 Register Defines */
+#define XREG_CP15_NOP				"p15, 0, %0,  c7,  c0, 4"
+
+#define XREG_CP15_INVAL_IC_POU_IS		"p15, 0, %0,  c7,  c1, 0"
+#define XREG_CP15_INVAL_BRANCH_ARRAY_IS		"p15, 0, %0,  c7,  c1, 6"
+
+#define XREG_CP15_PHYS_ADDR			"p15, 0, %0,  c7,  c4, 0"
+
+#define XREG_CP15_INVAL_IC_POU			"p15, 0, %0,  c7,  c5, 0"
+#define XREG_CP15_INVAL_IC_LINE_MVA_POU		"p15, 0, %0,  c7,  c5, 1"
+
+/* The CP15 register access below has been deprecated in favor of the new
+ * isb instruction in Cortex A53.
+ */
+#define XREG_CP15_INST_SYNC_BARRIER		"p15, 0, %0,  c7,  c5, 4"
+#define XREG_CP15_INVAL_BRANCH_ARRAY		"p15, 0, %0,  c7,  c5, 6"
+
+#define XREG_CP15_INVAL_DC_LINE_MVA_POC		"p15, 0, %0,  c7,  c6, 1"
+#define XREG_CP15_INVAL_DC_LINE_SW		"p15, 0, %0,  c7,  c6, 2"
+
+#define XREG_CP15_VA_TO_PA_CURRENT_0		"p15, 0, %0,  c7,  c8, 0"
+#define XREG_CP15_VA_TO_PA_CURRENT_1		"p15, 0, %0,  c7,  c8, 1"
+#define XREG_CP15_VA_TO_PA_CURRENT_2		"p15, 0, %0,  c7,  c8, 2"
+#define XREG_CP15_VA_TO_PA_CURRENT_3		"p15, 0, %0,  c7,  c8, 3"
+
+#define XREG_CP15_VA_TO_PA_OTHER_0		"p15, 0, %0,  c7,  c8, 4"
+#define XREG_CP15_VA_TO_PA_OTHER_1		"p15, 0, %0,  c7,  c8, 5"
+#define XREG_CP15_VA_TO_PA_OTHER_2		"p15, 0, %0,  c7,  c8, 6"
+#define XREG_CP15_VA_TO_PA_OTHER_3		"p15, 0, %0,  c7,  c8, 7"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POC		"p15, 0, %0,  c7, c10, 1"
+#define XREG_CP15_CLEAN_DC_LINE_SW		"p15, 0, %0,  c7, c10, 2"
+
+/* The next two CP15 register accesses below have been deprecated in favor
+ * of the new dsb and dmb instructions in Cortex A53.
+ */
+#define XREG_CP15_DATA_SYNC_BARRIER		"p15, 0, %0,  c7, c10, 4"
+#define XREG_CP15_DATA_MEMORY_BARRIER		"p15, 0, %0,  c7, c10, 5"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POU		"p15, 0, %0,  c7, c11, 1"
+
+#define XREG_CP15_NOP2				"p15, 0, %0,  c7, c13, 1"
+
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC	"p15, 0, %0,  c7, c14, 1"
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW	"p15, 0, %0,  c7, c14, 2"
+
+/* C8 Register Defines */
+#define XREG_CP15_INVAL_TLB_IS			"p15, 0, %0,  c8,  c3, 0"
+#define XREG_CP15_INVAL_TLB_MVA_IS		"p15, 0, %0,  c8,  c3, 1"
+#define XREG_CP15_INVAL_TLB_ASID_IS		"p15, 0, %0,  c8,  c3, 2"
+#define XREG_CP15_INVAL_TLB_MVA_ASID_IS		"p15, 0, %0,  c8,  c3, 3"
+
+#define XREG_CP15_INVAL_ITLB_UNLOCKED		"p15, 0, %0,  c8,  c5, 0"
+#define XREG_CP15_INVAL_ITLB_MVA		"p15, 0, %0,  c8,  c5, 1"
+#define XREG_CP15_INVAL_ITLB_ASID		"p15, 0, %0,  c8,  c5, 2"
+
+#define XREG_CP15_INVAL_DTLB_UNLOCKED		"p15, 0, %0,  c8,  c6, 0"
+#define XREG_CP15_INVAL_DTLB_MVA		"p15, 0, %0,  c8,  c6, 1"
+#define XREG_CP15_INVAL_DTLB_ASID		"p15, 0, %0,  c8,  c6, 2"
+
+#define XREG_CP15_INVAL_UTLB_UNLOCKED		"p15, 0, %0,  c8,  c7, 0"
+#define XREG_CP15_INVAL_UTLB_MVA		"p15, 0, %0,  c8,  c7, 1"
+#define XREG_CP15_INVAL_UTLB_ASID		"p15, 0, %0,  c8,  c7, 2"
+#define XREG_CP15_INVAL_UTLB_MVA_ASID		"p15, 0, %0,  c8,  c7, 3"
+
+/* C9 Register Defines */
+#define XREG_CP15_PERF_MONITOR_CTRL		"p15, 0, %0,  c9, c12, 0"
+#define XREG_CP15_COUNT_ENABLE_SET		"p15, 0, %0,  c9, c12, 1"
+#define XREG_CP15_COUNT_ENABLE_CLR		"p15, 0, %0,  c9, c12, 2"
+#define XREG_CP15_V_FLAG_STATUS			"p15, 0, %0,  c9, c12, 3"
+#define XREG_CP15_SW_INC			"p15, 0, %0,  c9, c12, 4"
+#define XREG_CP15_EVENT_CNTR_SEL		"p15, 0, %0,  c9, c12, 5"
+
+#define XREG_CP15_PERF_CYCLE_COUNTER		"p15, 0, %0,  c9, c13, 0"
+#define XREG_CP15_EVENT_TYPE_SEL		"p15, 0, %0,  c9, c13, 1"
+#define XREG_CP15_PERF_MONITOR_COUNT		"p15, 0, %0,  c9, c13, 2"
+
+#define XREG_CP15_USER_ENABLE			"p15, 0, %0,  c9, c14, 0"
+#define XREG_CP15_INTR_ENABLE_SET		"p15, 0, %0,  c9, c14, 1"
+#define XREG_CP15_INTR_ENABLE_CLR		"p15, 0, %0,  c9, c14, 2"
+
+/* C10 Register Defines */
+#define XREG_CP15_TLB_LOCKDWN			"p15, 0, %0, c10,  c0, 0"
+
+#define XREG_CP15_PRI_MEM_REMAP			"p15, 0, %0, c10,  c2, 0"
+#define XREG_CP15_NORM_MEM_REMAP		"p15, 0, %0, c10,  c2, 1"
+
+/* C11 Register Defines */
+/* Not used */
+
+/* C12 Register Defines */
+#define XREG_CP15_VEC_BASE_ADDR			"p15, 0, %0, c12,  c0, 0"
+#define XREG_CP15_MONITOR_VEC_BASE_ADDR		"p15, 0, %0, c12,  c0, 1"
+
+#define XREG_CP15_INTERRUPT_STATUS		"p15, 0, %0, c12,  c1, 0"
+#define XREG_CP15_VIRTUALIZATION_INTR		"p15, 0, %0, c12,  c1, 1"
+
+/* C13 Register Defines */
+#define XREG_CP15_CONTEXT_ID			"p15, 0, %0, c13,  c0, 1"
+#define USER_RW_THREAD_PID			"p15, 0, %0, c13,  c0, 2"
+#define USER_RO_THREAD_PID			"p15, 0, %0, c13,  c0, 3"
+#define USER_PRIV_THREAD_PID			"p15, 0, %0, c13,  c0, 4"
+
+/* C14 Register Defines */
+/* not used */
+
+/* C15 Register Defines */
+#define XREG_CP15_POWER_CTRL			"p15, 0, %0, c15,  c0, 0"
+#define XREG_CP15_CONFIG_BASE_ADDR		"p15, 4, %0, c15,  c0, 0"
+
+#define XREG_CP15_READ_TLB_ENTRY		"p15, 5, %0, c15,  c4, 2"
+#define XREG_CP15_WRITE_TLB_ENTRY		"p15, 5, %0, c15,  c4, 4"
+
+#define XREG_CP15_MAIN_TLB_VA			"p15, 5, %0, c15,  c5, 2"
+
+#define XREG_CP15_MAIN_TLB_PA			"p15, 5, %0, c15,  c6, 2"
+
+#define XREG_CP15_MAIN_TLB_ATTR			"p15, 5, %0, c15,  c7, 2"
+
+/* MPE register definitions */
+#define XREG_FPSID				c0
+#define XREG_FPSCR				c1
+#define XREG_MVFR1				c6
+#define XREG_MVFR0				c7
+#define XREG_FPEXC				c8
+#define XREG_FPINST				c9
+#define XREG_FPINST2				c10
+
+/* FPSID bits */
+#define XREG_FPSID_IMPLEMENTER_BIT	(24)
+#define XREG_FPSID_IMPLEMENTER_MASK	(0xFF << FPSID_IMPLEMENTER_BIT)
+#define XREG_FPSID_SOFTWARE		(1<<23)
+#define XREG_FPSID_ARCH_BIT		(16)
+#define XREG_FPSID_ARCH_MASK		(0xF  << FPSID_ARCH_BIT)
+#define XREG_FPSID_PART_BIT		(8)
+#define XREG_FPSID_PART_MASK		(0xFF << FPSID_PART_BIT)
+#define XREG_FPSID_VARIANT_BIT		(4)
+#define XREG_FPSID_VARIANT_MASK		(0xF  << FPSID_VARIANT_BIT)
+#define XREG_FPSID_REV_BIT		(0)
+#define XREG_FPSID_REV_MASK		(0xF  << FPSID_REV_BIT)
+
+/* FPSCR bits */
+#define XREG_FPSCR_N_BIT		(1 << 31)
+#define XREG_FPSCR_Z_BIT		(1 << 30)
+#define XREG_FPSCR_C_BIT		(1 << 29)
+#define XREG_FPSCR_V_BIT		(1 << 28)
+#define XREG_FPSCR_QC			(1 << 27)
+#define XREG_FPSCR_AHP			(1 << 26)
+#define XREG_FPSCR_DEFAULT_NAN		(1 << 25)
+#define XREG_FPSCR_FLUSHTOZERO		(1 << 24)
+#define XREG_FPSCR_ROUND_NEAREST	(0 << 22)
+#define XREG_FPSCR_ROUND_PLUSINF	(1 << 22)
+#define XREG_FPSCR_ROUND_MINUSINF	(2 << 22)
+#define XREG_FPSCR_ROUND_TOZERO		(3 << 22)
+#define XREG_FPSCR_RMODE_BIT		(22)
+#define XREG_FPSCR_RMODE_MASK		(3 << FPSCR_RMODE_BIT)
+#define XREG_FPSCR_STRIDE_BIT		(20)
+#define XREG_FPSCR_STRIDE_MASK		(3 << FPSCR_STRIDE_BIT)
+#define XREG_FPSCR_LENGTH_BIT		(16)
+#define XREG_FPSCR_LENGTH_MASK		(7 << FPSCR_LENGTH_BIT)
+#define XREG_FPSCR_IDC			(1 << 7)
+#define XREG_FPSCR_IXC			(1 << 4)
+#define XREG_FPSCR_UFC			(1 << 3)
+#define XREG_FPSCR_OFC			(1 << 2)
+#define XREG_FPSCR_DZC			(1 << 1)
+#define XREG_FPSCR_IOC			(1 << 0)
+
+/* MVFR0 bits */
+#define XREG_MVFR0_RMODE_BIT		(28)
+#define XREG_MVFR0_RMODE_MASK		(0xF << XREG_MVFR0_RMODE_BIT)
+#define XREG_MVFR0_SHORT_VEC_BIT	(24)
+#define XREG_MVFR0_SHORT_VEC_MASK	(0xF << XREG_MVFR0_SHORT_VEC_BIT)
+#define XREG_MVFR0_SQRT_BIT		(20)
+#define XREG_MVFR0_SQRT_MASK		(0xF << XREG_MVFR0_SQRT_BIT)
+#define XREG_MVFR0_DIVIDE_BIT		(16)
+#define XREG_MVFR0_DIVIDE_MASK		(0xF << XREG_MVFR0_DIVIDE_BIT)
+#define XREG_MVFR0_EXEC_TRAP_BIT	(12)
+#define XREG_MVFR0_EXEC_TRAP_MASK	(0xF << XREG_MVFR0_EXEC_TRAP_BIT)
+#define XREG_MVFR0_DP_BIT		(8)
+#define XREG_MVFR0_DP_MASK		(0xF << XREG_MVFR0_DP_BIT)
+#define XREG_MVFR0_SP_BIT		(4)
+#define XREG_MVFR0_SP_MASK		(0xF << XREG_MVFR0_SP_BIT)
+#define XREG_MVFR0_A_SIMD_BIT		(0)
+#define XREG_MVFR0_A_SIMD_MASK		(0xF << MVFR0_A_SIMD_BIT)
+
+/* FPEXC bits */
+#define XREG_FPEXC_EX			(1 << 31)
+#define XREG_FPEXC_EN			(1 << 30)
+#define XREG_FPEXC_DEX			(1 << 29)
+
+
+#define XREG_CONTROL_DCACHE_BIT	(0X00000001U<<2U)
+#define XREG_CONTROL_ICACHE_BIT	(0X00000001U<<12U)
+
+/**
+ *@endcond
+ */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XREG_CORTEXA53_H */
diff --git a/bsps/include/xil/arm/ARMv8/64bit/xil_cache.h b/bsps/include/xil/arm/ARMv8/64bit/xil_cache.h
new file mode 100644
index 0000000000..b878d05299
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/64bit/xil_cache.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup a53_64_cache_apis Cortex A53 64bit Processor Cache Functions
+*
+* Cache functions provide access to cache related operations such as flush
+* and invalidate for instruction and data caches. It gives option to perform
+* the cache operations on a single cacheline, a range of memory and an entire
+* cache.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 	pkp  05/29/14 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *@cond nocomments
+ */
+
+/************************** Constant Definitions *****************************/
+#define L1_DATA_PREFETCH_CONTROL_MASK  0xE000
+#define L1_DATA_PREFETCH_CONTROL_SHIFT  13
+
+/**
+ *@endcond
+ */
+
+/***************** Macros (Inline Functions) Definitions *********************/
+#define Xil_DCacheFlushRange Xil_DCacheInvalidateRange
+
+/************************** Function Prototypes ******************************/
+void Xil_DCacheEnable(void);
+void Xil_DCacheDisable(void);
+void Xil_DCacheInvalidate(void);
+void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len);
+void Xil_DCacheInvalidateLine(INTPTR adr);
+void Xil_DCacheFlush(void);
+void Xil_DCacheFlushLine(INTPTR adr);
+
+void Xil_ICacheEnable(void);
+void Xil_ICacheDisable(void);
+void Xil_ICacheInvalidate(void);
+void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len);
+void Xil_ICacheInvalidateLine(INTPTR adr);
+void Xil_ConfigureL1Prefetch(u8 num);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/**
+* @} End of "addtogroup a53_64_cache_apis".
+*/
diff --git a/bsps/include/xil/arm/ARMv8/64bit/xpseudo_asm.h b/bsps/include/xil/arm/ARMv8/64bit/xpseudo_asm.h
new file mode 100644
index 0000000000..3c79b0b991
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/64bit/xpseudo_asm.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xpseudo_asm.h
+*
+* @addtogroup a53_64_specific Cortex A53 64bit Processor Specific Include Files
+*
+* The xpseudo_asm.h includes xreg_cortexa53.h and xpseudo_asm_gcc.h.
+* The xreg_cortexa53.h file contains definitions for inline assembler code.
+* It provides inline definitions for Cortex A53 GPRs, SPRs and floating point
+* registers.
+*
+* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
+* assembler instructions, available as macros. These can be very useful for
+* tasks such as setting or getting special purpose registers, synchronization,
+* or cache manipulation etc. These inline assembler instructions can be used
+* from drivers and user applications written in C.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 	pkp  05/29/14 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XPSEUDO_ASM_H
+#define XPSEUDO_ASM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xreg_cortexa53.h"
+#ifdef __clang__
+#include "xpseudo_asm_armclang.h"
+#else
+#include "xpseudo_asm_gcc.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* XPSEUDO_ASM_H */
+/**
+* @} End of "addtogroup a53_64_specific".
+*/
diff --git a/bsps/include/xil/arm/ARMv8/64bit/xreg_cortexa53.h b/bsps/include/xil/arm/ARMv8/64bit/xreg_cortexa53.h
new file mode 100644
index 0000000000..b8ea1eac72
--- /dev/null
+++ b/bsps/include/xil/arm/ARMv8/64bit/xreg_cortexa53.h
@@ -0,0 +1,163 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xreg_cortexa53.h
+*
+* This header file contains definitions for using inline assembler code. It is
+* written specifically for the GNU compiler.
+*
+* All of the ARM Cortex A53 GPRs, SPRs, and Debug Registers are defined along
+* with the positions of the bits within the registers.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.00 	pkp  05/29/14 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XREG_CORTEXA53_H
+#define XREG_CORTEXA53_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ *@cond nocomments
+ */
+
+/* GPRs */
+#define XREG_GPR0				x0
+#define XREG_GPR1				x1
+#define XREG_GPR2				x2
+#define XREG_GPR3				x3
+#define XREG_GPR4				x4
+#define XREG_GPR5				x5
+#define XREG_GPR6				x6
+#define XREG_GPR7				x7
+#define XREG_GPR8				x8
+#define XREG_GPR9				x9
+#define XREG_GPR10				x10
+#define XREG_GPR11				x11
+#define XREG_GPR12				x12
+#define XREG_GPR13				x13
+#define XREG_GPR14				x14
+#define XREG_GPR15				x15
+#define XREG_GPR16				x16
+#define XREG_GPR17				x17
+#define XREG_GPR18				x18
+#define XREG_GPR19				x19
+#define XREG_GPR20				x20
+#define XREG_GPR21				x21
+#define XREG_GPR22				x22
+#define XREG_GPR23				x23
+#define XREG_GPR24				x24
+#define XREG_GPR25				x25
+#define XREG_GPR26				x26
+#define XREG_GPR27				x27
+#define XREG_GPR28				x28
+#define XREG_GPR29				x29
+#define XREG_GPR30				x30
+#define XREG_CPSR				cpsr
+
+/* Current Processor Status Register (CPSR) Bits */
+#define XREG_CPSR_MODE_BITS			0x1FU
+#define XREG_CPSR_EL3h_MODE			0xDU
+#define XREG_CPSR_EL3t_MODE			0xCU
+#define XREG_CPSR_EL2h_MODE			0x9U
+#define XREG_CPSR_EL2t_MODE			0x8U
+#define XREG_CPSR_EL1h_MODE			0x5U
+#define XREG_CPSR_EL1t_MODE			0x4U
+#define XREG_CPSR_EL0t_MODE			0x0U
+
+#define XREG_CPSR_IRQ_ENABLE		0x80U
+#define XREG_CPSR_FIQ_ENABLE		0x40U
+
+#define XREG_CPSR_N_BIT				0x80000000U
+#define XREG_CPSR_Z_BIT				0x40000000U
+#define XREG_CPSR_C_BIT				0x20000000U
+#define XREG_CPSR_V_BIT				0x10000000U
+
+/* FPSID bits */
+#define XREG_FPSID_IMPLEMENTER_BIT	(24U)
+#define XREG_FPSID_IMPLEMENTER_MASK	(0x000000FFU << FPSID_IMPLEMENTER_BIT)
+#define XREG_FPSID_SOFTWARE		(0X00000001U<<23U)
+#define XREG_FPSID_ARCH_BIT		(16U)
+#define XREG_FPSID_ARCH_MASK		(0x0000000FU  << FPSID_ARCH_BIT)
+#define XREG_FPSID_PART_BIT		(8U)
+#define XREG_FPSID_PART_MASK		(0x000000FFU << FPSID_PART_BIT)
+#define XREG_FPSID_VARIANT_BIT		(4U)
+#define XREG_FPSID_VARIANT_MASK		(0x0000000FU  << FPSID_VARIANT_BIT)
+#define XREG_FPSID_REV_BIT		(0U)
+#define XREG_FPSID_REV_MASK		(0x0000000FU  << FPSID_REV_BIT)
+
+/* FPSCR bits */
+#define XREG_FPSCR_N_BIT		(0X00000001U << 31U)
+#define XREG_FPSCR_Z_BIT		(0X00000001U << 30U)
+#define XREG_FPSCR_C_BIT		(0X00000001U << 29U)
+#define XREG_FPSCR_V_BIT		(0X00000001U << 28U)
+#define XREG_FPSCR_QC			(0X00000001U << 27U)
+#define XREG_FPSCR_AHP			(0X00000001U << 26U)
+#define XREG_FPSCR_DEFAULT_NAN		(0X00000001U << 25U)
+#define XREG_FPSCR_FLUSHTOZERO		(0X00000001U << 24U)
+#define XREG_FPSCR_ROUND_NEAREST	(0X00000000U << 22U)
+#define XREG_FPSCR_ROUND_PLUSINF	(0X00000001U << 22U)
+#define XREG_FPSCR_ROUND_MINUSINF	(0X00000002U << 22U)
+#define XREG_FPSCR_ROUND_TOZERO		(0X00000003U << 22U)
+#define XREG_FPSCR_RMODE_BIT		(22U)
+#define XREG_FPSCR_RMODE_MASK		(0X00000003U << FPSCR_RMODE_BIT)
+#define XREG_FPSCR_STRIDE_BIT		(20U)
+#define XREG_FPSCR_STRIDE_MASK		(0X00000003U << FPSCR_STRIDE_BIT)
+#define XREG_FPSCR_LENGTH_BIT		(16U)
+#define XREG_FPSCR_LENGTH_MASK		(0X00000007U << FPSCR_LENGTH_BIT)
+#define XREG_FPSCR_IDC			(0X00000001U << 7U)
+#define XREG_FPSCR_IXC			(0X00000001U << 4U)
+#define XREG_FPSCR_UFC			(0X00000001U << 3U)
+#define XREG_FPSCR_OFC			(0X00000001U << 2U)
+#define XREG_FPSCR_DZC			(0X00000001U << 1U)
+#define XREG_FPSCR_IOC			(0X00000001U << 0U)
+
+/* MVFR0 bits */
+#define XREG_MVFR0_RMODE_BIT		(28U)
+#define XREG_MVFR0_RMODE_MASK		(0x0000000FU << XREG_MVFR0_RMODE_BIT)
+#define XREG_MVFR0_SHORT_VEC_BIT	(24U)
+#define XREG_MVFR0_SHORT_VEC_MASK	(0x0000000FU << XREG_MVFR0_SHORT_VEC_BIT)
+#define XREG_MVFR0_SQRT_BIT		(20U)
+#define XREG_MVFR0_SQRT_MASK		(0x0000000FU << XREG_MVFR0_SQRT_BIT)
+#define XREG_MVFR0_DIVIDE_BIT		(16U)
+#define XREG_MVFR0_DIVIDE_MASK		(0x0000000FU << XREG_MVFR0_DIVIDE_BIT)
+#define XREG_MVFR0_EXEC_TRAP_BIT	(0X00000012U)
+#define XREG_MVFR0_EXEC_TRAP_MASK	(0X0000000FU << XREG_MVFR0_EXEC_TRAP_BIT)
+#define XREG_MVFR0_DP_BIT		(8U)
+#define XREG_MVFR0_DP_MASK		(0x0000000FU << XREG_MVFR0_DP_BIT)
+#define XREG_MVFR0_SP_BIT		(4U)
+#define XREG_MVFR0_SP_MASK		(0x0000000FU << XREG_MVFR0_SP_BIT)
+#define XREG_MVFR0_A_SIMD_BIT		(0U)
+#define XREG_MVFR0_A_SIMD_MASK		(0x0000000FU << MVFR0_A_SIMD_BIT)
+
+/* FPEXC bits */
+#define XREG_FPEXC_EX			(0X00000001U << 31U)
+#define XREG_FPEXC_EN			(0X00000001U << 30U)
+#define XREG_FPEXC_DEX			(0X00000001U << 29U)
+
+
+#define XREG_CONTROL_DCACHE_BIT	(0X00000001U<<2U)
+#define XREG_CONTROL_ICACHE_BIT	(0X00000001U<<12U)
+
+/**
+ *@endcond
+ */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XREG_CORTEXA53_H */
diff --git a/bsps/include/xil/arm/cortexa9/xil_cache.h b/bsps/include/xil/arm/cortexa9/xil_cache.h
new file mode 100644
index 0000000000..75cd6f6a8b
--- /dev/null
+++ b/bsps/include/xil/arm/cortexa9/xil_cache.h
@@ -0,0 +1,105 @@
+/******************************************************************************
+* Copyright (c) 2010 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup a9_cache_apis Cortex A9 Processor Cache Functions
+*
+* Cache functions provide access to cache related operations such as flush
+* and invalidate for instruction and data caches. It gives option to perform
+* the cache operations on a single cacheline, a range of memory and an entire
+* cache.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm  01/29/10 First release
+* 3.04a sdm  01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
+*		      APIs.
+* 6.8   aru  09/06/18 Removed compilation warnings for ARMCC toolchain.
+* </pre>
+*
+******************************************************************************/
+
+/**
+*@cond nocomments
+*/
+
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __GNUC__
+
+#define asm_cp15_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
+			XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param));
+
+#define asm_cp15_clean_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
+			XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param));
+
+#define asm_cp15_inval_ic_line_mva_pou(param) __asm__ __volatile__("mcr " \
+			XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param));
+
+#define asm_cp15_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
+			XREG_CP15_INVAL_DC_LINE_SW :: "r" (param));
+
+#define asm_cp15_clean_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
+			XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param));
+
+#elif defined (__ICCARM__)
+
+#define asm_cp15_inval_dc_line_mva_poc(param) __asm volatile ("mcr " \
+			XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param));
+
+#define asm_cp15_clean_inval_dc_line_mva_poc(param) __asm volatile ("mcr " \
+			XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param));
+
+#define asm_cp15_inval_ic_line_mva_pou(param) __asm volatile ("mcr " \
+			XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param));
+
+#define asm_cp15_inval_dc_line_sw(param) __asm volatile ("mcr " \
+			XREG_CP15_INVAL_DC_LINE_SW :: "r" (param));
+
+#define asm_cp15_clean_inval_dc_line_sw(param) __asm volatile ("mcr " \
+			XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param));
+
+#endif
+
+/**
+*@endcond
+*/
+
+void Xil_DCacheEnable(void);
+void Xil_DCacheDisable(void);
+void Xil_DCacheInvalidate(void);
+void Xil_DCacheInvalidateRange(INTPTR adr, u32 len);
+void Xil_DCacheFlush(void);
+void Xil_DCacheFlushRange(INTPTR adr, u32 len);
+
+void Xil_ICacheEnable(void);
+void Xil_ICacheDisable(void);
+void Xil_ICacheInvalidate(void);
+void Xil_ICacheInvalidateRange(INTPTR adr, u32 len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/**
+* @} End of "addtogroup a9_cache_apis".
+*/
diff --git a/bsps/include/xil/arm/cortexa9/xpseudo_asm.h b/bsps/include/xil/arm/cortexa9/xpseudo_asm.h
new file mode 100644
index 0000000000..6d07851fa6
--- /dev/null
+++ b/bsps/include/xil/arm/cortexa9/xpseudo_asm.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xpseudo_asm.h
+*
+* @addtogroup a9_specific Cortex A9 Processor Specific Include Files
+*
+* The xpseudo_asm.h includes xreg_cortexa9.h and xpseudo_asm_gcc.h.
+*
+* The xreg_cortexa9.h file contains definitions for inline assembler code.
+* It provides inline definitions for Cortex A9 GPRs, SPRs, MPE registers,
+* co-processor registers and Debug registers.
+*
+* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
+* assembler instructions, available as macros. These can be very useful for
+* tasks such as setting or getting special purpose registers, synchronization,
+* or cache manipulation etc. These inline assembler instructions can be used
+* from drivers and user applications written in C.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 1.00a ecm  10/18/09 First release
+* 3.04a sdm  01/02/12 Remove redundant dsb in mcr instruction.
+* 6.8   aru  09/06/18 Removed compilation warnings for ARMCC toolchain.
+* </pre>
+*
+******************************************************************************/
+#ifndef XPSEUDO_ASM_H
+#define XPSEUDO_ASM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xreg_cortexa9.h"
+#ifdef __GNUC__
+ #include "xpseudo_asm_gcc.h"
+#elif defined (__ICCARM__)
+ #include "xpseudo_asm_iccarm.h"
+#else
+ #include "xpseudo_asm_rvct.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* XPSEUDO_ASM_H */
+/**
+* @} End of "addtogroup a9_specific".
+*/
diff --git a/bsps/include/xil/arm/cortexa9/xreg_cortexa9.h b/bsps/include/xil/arm/cortexa9/xreg_cortexa9.h
new file mode 100644
index 0000000000..2a4fff23f0
--- /dev/null
+++ b/bsps/include/xil/arm/cortexa9/xreg_cortexa9.h
@@ -0,0 +1,573 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xreg_cortexa9.h
+*
+* This header file contains definitions for using inline assembler code. It is
+* written specifically for the GNU, ARMCC compiler.
+*
+* All of the ARM Cortex A9 GPRs, SPRs, and Debug Registers are defined along
+* with the positions of the bits within the registers.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 1.00a ecm/sdm  10/20/09 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XREG_CORTEXA9_H
+#define XREG_CORTEXA9_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ *@cond nocomments
+ */
+
+/* GPRs */
+#define XREG_GPR0				r0
+#define XREG_GPR1				r1
+#define XREG_GPR2				r2
+#define XREG_GPR3				r3
+#define XREG_GPR4				r4
+#define XREG_GPR5				r5
+#define XREG_GPR6				r6
+#define XREG_GPR7				r7
+#define XREG_GPR8				r8
+#define XREG_GPR9				r9
+#define XREG_GPR10				r10
+#define XREG_GPR11				r11
+#define XREG_GPR12				r12
+#define XREG_GPR13				r13
+#define XREG_GPR14				r14
+#define XREG_GPR15				r15
+#define XREG_CPSR				cpsr
+
+/* Coprocessor number defines */
+#define XREG_CP0				0
+#define XREG_CP1				1
+#define XREG_CP2				2
+#define XREG_CP3				3
+#define XREG_CP4				4
+#define XREG_CP5				5
+#define XREG_CP6				6
+#define XREG_CP7				7
+#define XREG_CP8				8
+#define XREG_CP9				9
+#define XREG_CP10				10
+#define XREG_CP11				11
+#define XREG_CP12				12
+#define XREG_CP13				13
+#define XREG_CP14				14
+#define XREG_CP15				15
+
+/* Coprocessor control register defines */
+#define XREG_CR0				cr0
+#define XREG_CR1				cr1
+#define XREG_CR2				cr2
+#define XREG_CR3				cr3
+#define XREG_CR4				cr4
+#define XREG_CR5				cr5
+#define XREG_CR6				cr6
+#define XREG_CR7				cr7
+#define XREG_CR8				cr8
+#define XREG_CR9				cr9
+#define XREG_CR10				cr10
+#define XREG_CR11				cr11
+#define XREG_CR12				cr12
+#define XREG_CR13				cr13
+#define XREG_CR14				cr14
+#define XREG_CR15				cr15
+
+/* Current Processor Status Register (CPSR) Bits */
+#define XREG_CPSR_THUMB_MODE			0x20
+#define XREG_CPSR_MODE_BITS			0x1F
+#define XREG_CPSR_SYSTEM_MODE			0x1F
+#define XREG_CPSR_UNDEFINED_MODE		0x1B
+#define XREG_CPSR_DATA_ABORT_MODE		0x17
+#define XREG_CPSR_SVC_MODE			0x13
+#define XREG_CPSR_IRQ_MODE			0x12
+#define XREG_CPSR_FIQ_MODE			0x11
+#define XREG_CPSR_USER_MODE			0x10
+
+#define XREG_CPSR_IRQ_ENABLE			0x80
+#define XREG_CPSR_FIQ_ENABLE			0x40
+
+#define XREG_CPSR_N_BIT				0x80000000
+#define XREG_CPSR_Z_BIT				0x40000000
+#define XREG_CPSR_C_BIT				0x20000000
+#define XREG_CPSR_V_BIT				0x10000000
+
+
+/* CP15 defines */
+#if defined (__GNUC__) || defined (__ICCARM__)
+/* C0 Register defines */
+#define XREG_CP15_MAIN_ID			"p15, 0, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_TYPE			"p15, 0, %0,  c0,  c0, 1"
+#define XREG_CP15_TCM_TYPE			"p15, 0, %0,  c0,  c0, 2"
+#define XREG_CP15_TLB_TYPE			"p15, 0, %0,  c0,  c0, 3"
+#define XREG_CP15_MULTI_PROC_AFFINITY		"p15, 0, %0,  c0,  c0, 5"
+
+#define XREG_CP15_PROC_FEATURE_0		"p15, 0, %0,  c0,  c1, 0"
+#define XREG_CP15_PROC_FEATURE_1		"p15, 0, %0,  c0,  c1, 1"
+#define XREG_CP15_DEBUG_FEATURE_0		"p15, 0, %0,  c0,  c1, 2"
+#define XREG_CP15_MEMORY_FEATURE_0		"p15, 0, %0,  c0,  c1, 4"
+#define XREG_CP15_MEMORY_FEATURE_1		"p15, 0, %0,  c0,  c1, 5"
+#define XREG_CP15_MEMORY_FEATURE_2		"p15, 0, %0,  c0,  c1, 6"
+#define XREG_CP15_MEMORY_FEATURE_3		"p15, 0, %0,  c0,  c1, 7"
+
+#define XREG_CP15_INST_FEATURE_0		"p15, 0, %0,  c0,  c2, 0"
+#define XREG_CP15_INST_FEATURE_1		"p15, 0, %0,  c0,  c2, 1"
+#define XREG_CP15_INST_FEATURE_2		"p15, 0, %0,  c0,  c2, 2"
+#define XREG_CP15_INST_FEATURE_3		"p15, 0, %0,  c0,  c2, 3"
+#define XREG_CP15_INST_FEATURE_4		"p15, 0, %0,  c0,  c2, 4"
+
+#define XREG_CP15_CACHE_SIZE_ID			"p15, 1, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_LEVEL_ID		"p15, 1, %0,  c0,  c0, 1"
+#define XREG_CP15_AUXILARY_ID			"p15, 1, %0,  c0,  c0, 7"
+
+#define XREG_CP15_CACHE_SIZE_SEL		"p15, 2, %0,  c0,  c0, 0"
+
+/* C1 Register Defines */
+#define XREG_CP15_SYS_CONTROL			"p15, 0, %0,  c1,  c0, 0"
+#define XREG_CP15_AUX_CONTROL			"p15, 0, %0,  c1,  c0, 1"
+#define XREG_CP15_CP_ACCESS_CONTROL		"p15, 0, %0,  c1,  c0, 2"
+
+#define XREG_CP15_SECURE_CONFIG			"p15, 0, %0,  c1,  c1, 0"
+#define XREG_CP15_SECURE_DEBUG_ENABLE		"p15, 0, %0,  c1,  c1, 1"
+#define XREG_CP15_NS_ACCESS_CONTROL		"p15, 0, %0,  c1,  c1, 2"
+#define XREG_CP15_VIRTUAL_CONTROL		"p15, 0, %0,  c1,  c1, 3"
+
+#else /* RVCT */
+/* C0 Register defines */
+#define XREG_CP15_MAIN_ID			"cp15:0:c0:c0:0"
+#define XREG_CP15_CACHE_TYPE			"cp15:0:c0:c0:1"
+#define XREG_CP15_TCM_TYPE			"cp15:0:c0:c0:2"
+#define XREG_CP15_TLB_TYPE			"cp15:0:c0:c0:3"
+#define XREG_CP15_MULTI_PROC_AFFINITY		"cp15:0:c0:c0:5"
+
+#define XREG_CP15_PROC_FEATURE_0		"cp15:0:c0:c1:0"
+#define XREG_CP15_PROC_FEATURE_1		"cp15:0:c0:c1:1"
+#define XREG_CP15_DEBUG_FEATURE_0		"cp15:0:c0:c1:2"
+#define XREG_CP15_MEMORY_FEATURE_0		"cp15:0:c0:c1:4"
+#define XREG_CP15_MEMORY_FEATURE_1		"cp15:0:c0:c1:5"
+#define XREG_CP15_MEMORY_FEATURE_2		"cp15:0:c0:c1:6"
+#define XREG_CP15_MEMORY_FEATURE_3		"cp15:0:c0:c1:7"
+
+#define XREG_CP15_INST_FEATURE_0		"cp15:0:c0:c2:0"
+#define XREG_CP15_INST_FEATURE_1		"cp15:0:c0:c2:1"
+#define XREG_CP15_INST_FEATURE_2		"cp15:0:c0:c2:2"
+#define XREG_CP15_INST_FEATURE_3		"cp15:0:c0:c2:3"
+#define XREG_CP15_INST_FEATURE_4		"cp15:0:c0:c2:4"
+
+#define XREG_CP15_CACHE_SIZE_ID			"cp15:1:c0:c0:0"
+#define XREG_CP15_CACHE_LEVEL_ID		"cp15:1:c0:c0:1"
+#define XREG_CP15_AUXILARY_ID			"cp15:1:c0:c0:7"
+
+#define XREG_CP15_CACHE_SIZE_SEL		"cp15:2:c0:c0:0"
+
+/* C1 Register Defines */
+#define XREG_CP15_SYS_CONTROL			"cp15:0:c1:c0:0"
+#define XREG_CP15_AUX_CONTROL			"cp15:0:c1:c0:1"
+#define XREG_CP15_CP_ACCESS_CONTROL		"cp15:0:c1:c0:2"
+
+#define XREG_CP15_SECURE_CONFIG			"cp15:0:c1:c1:0"
+#define XREG_CP15_SECURE_DEBUG_ENABLE		"cp15:0:c1:c1:1"
+#define XREG_CP15_NS_ACCESS_CONTROL		"cp15:0:c1:c1:2"
+#define XREG_CP15_VIRTUAL_CONTROL		"cp15:0:c1:c1:3"
+#endif
+
+/* XREG_CP15_CONTROL bit defines */
+#define XREG_CP15_CONTROL_TE_BIT		0x40000000U
+#define XREG_CP15_CONTROL_AFE_BIT		0x20000000U
+#define XREG_CP15_CONTROL_TRE_BIT		0x10000000U
+#define XREG_CP15_CONTROL_NMFI_BIT		0x08000000U
+#define XREG_CP15_CONTROL_EE_BIT		0x02000000U
+#define XREG_CP15_CONTROL_HA_BIT		0x00020000U
+#define XREG_CP15_CONTROL_RR_BIT		0x00004000U
+#define XREG_CP15_CONTROL_V_BIT			0x00002000U
+#define XREG_CP15_CONTROL_I_BIT			0x00001000U
+#define XREG_CP15_CONTROL_Z_BIT			0x00000800U
+#define XREG_CP15_CONTROL_SW_BIT		0x00000400U
+#define XREG_CP15_CONTROL_B_BIT			0x00000080U
+#define XREG_CP15_CONTROL_C_BIT			0x00000004U
+#define XREG_CP15_CONTROL_A_BIT			0x00000002U
+#define XREG_CP15_CONTROL_M_BIT			0x00000001U
+
+#if defined (__GNUC__) || defined (__ICCARM__)
+/* C2 Register Defines */
+#define XREG_CP15_TTBR0				"p15, 0, %0,  c2,  c0, 0"
+#define XREG_CP15_TTBR1				"p15, 0, %0,  c2,  c0, 1"
+#define XREG_CP15_TTB_CONTROL			"p15, 0, %0,  c2,  c0, 2"
+
+/* C3 Register Defines */
+#define XREG_CP15_DOMAIN_ACCESS_CTRL		"p15, 0, %0,  c3,  c0, 0"
+
+/* C4 Register Defines */
+/* Not Used */
+
+/* C5 Register Defines */
+#define XREG_CP15_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 0"
+#define XREG_CP15_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 1"
+
+#define XREG_CP15_AUX_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 0"
+#define XREG_CP15_AUX_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 1"
+
+/* C6 Register Defines */
+#define XREG_CP15_DATA_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 0"
+#define XREG_CP15_INST_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 2"
+
+/* C7 Register Defines */
+#define XREG_CP15_NOP				"p15, 0, %0,  c7,  c0, 4"
+
+#define XREG_CP15_INVAL_IC_POU_IS		"p15, 0, %0,  c7,  c1, 0"
+#define XREG_CP15_INVAL_BRANCH_ARRAY_IS		"p15, 0, %0,  c7,  c1, 6"
+
+#define XREG_CP15_PHYS_ADDR			"p15, 0, %0,  c7,  c4, 0"
+
+#define XREG_CP15_INVAL_IC_POU			"p15, 0, %0,  c7,  c5, 0"
+#define XREG_CP15_INVAL_IC_LINE_MVA_POU		"p15, 0, %0,  c7,  c5, 1"
+
+/* The CP15 register access below has been deprecated in favor of the new
+ * isb instruction in Cortex A9.
+ */
+#define XREG_CP15_INST_SYNC_BARRIER		"p15, 0, %0,  c7,  c5, 4"
+#define XREG_CP15_INVAL_BRANCH_ARRAY		"p15, 0, %0,  c7,  c5, 6"
+
+#define XREG_CP15_INVAL_DC_LINE_MVA_POC		"p15, 0, %0,  c7,  c6, 1"
+#define XREG_CP15_INVAL_DC_LINE_SW		"p15, 0, %0,  c7,  c6, 2"
+
+#define XREG_CP15_VA_TO_PA_CURRENT_0		"p15, 0, %0,  c7,  c8, 0"
+#define XREG_CP15_VA_TO_PA_CURRENT_1		"p15, 0, %0,  c7,  c8, 1"
+#define XREG_CP15_VA_TO_PA_CURRENT_2		"p15, 0, %0,  c7,  c8, 2"
+#define XREG_CP15_VA_TO_PA_CURRENT_3		"p15, 0, %0,  c7,  c8, 3"
+
+#define XREG_CP15_VA_TO_PA_OTHER_0		"p15, 0, %0,  c7,  c8, 4"
+#define XREG_CP15_VA_TO_PA_OTHER_1		"p15, 0, %0,  c7,  c8, 5"
+#define XREG_CP15_VA_TO_PA_OTHER_2		"p15, 0, %0,  c7,  c8, 6"
+#define XREG_CP15_VA_TO_PA_OTHER_3		"p15, 0, %0,  c7,  c8, 7"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POC		"p15, 0, %0,  c7, c10, 1"
+#define XREG_CP15_CLEAN_DC_LINE_SW		"p15, 0, %0,  c7, c10, 2"
+
+/* The next two CP15 register accesses below have been deprecated in favor
+ * of the new dsb and dmb instructions in Cortex A9.
+ */
+#define XREG_CP15_DATA_SYNC_BARRIER		"p15, 0, %0,  c7, c10, 4"
+#define XREG_CP15_DATA_MEMORY_BARRIER		"p15, 0, %0,  c7, c10, 5"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POU		"p15, 0, %0,  c7, c11, 1"
+
+#define XREG_CP15_NOP2				"p15, 0, %0,  c7, c13, 1"
+
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC	"p15, 0, %0,  c7, c14, 1"
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW	"p15, 0, %0,  c7, c14, 2"
+
+/* C8 Register Defines */
+#define XREG_CP15_INVAL_TLB_IS			"p15, 0, %0,  c8,  c3, 0"
+#define XREG_CP15_INVAL_TLB_MVA_IS		"p15, 0, %0,  c8,  c3, 1"
+#define XREG_CP15_INVAL_TLB_ASID_IS		"p15, 0, %0,  c8,  c3, 2"
+#define XREG_CP15_INVAL_TLB_MVA_ASID_IS		"p15, 0, %0,  c8,  c3, 3"
+
+#define XREG_CP15_INVAL_ITLB_UNLOCKED		"p15, 0, %0,  c8,  c5, 0"
+#define XREG_CP15_INVAL_ITLB_MVA		"p15, 0, %0,  c8,  c5, 1"
+#define XREG_CP15_INVAL_ITLB_ASID		"p15, 0, %0,  c8,  c5, 2"
+
+#define XREG_CP15_INVAL_DTLB_UNLOCKED		"p15, 0, %0,  c8,  c6, 0"
+#define XREG_CP15_INVAL_DTLB_MVA		"p15, 0, %0,  c8,  c6, 1"
+#define XREG_CP15_INVAL_DTLB_ASID		"p15, 0, %0,  c8,  c6, 2"
+
+#define XREG_CP15_INVAL_UTLB_UNLOCKED		"p15, 0, %0,  c8,  c7, 0"
+#define XREG_CP15_INVAL_UTLB_MVA		"p15, 0, %0,  c8,  c7, 1"
+#define XREG_CP15_INVAL_UTLB_ASID		"p15, 0, %0,  c8,  c7, 2"
+#define XREG_CP15_INVAL_UTLB_MVA_ASID		"p15, 0, %0,  c8,  c7, 3"
+
+/* C9 Register Defines */
+#define XREG_CP15_PERF_MONITOR_CTRL		"p15, 0, %0,  c9, c12, 0"
+#define XREG_CP15_COUNT_ENABLE_SET		"p15, 0, %0,  c9, c12, 1"
+#define XREG_CP15_COUNT_ENABLE_CLR		"p15, 0, %0,  c9, c12, 2"
+#define XREG_CP15_V_FLAG_STATUS			"p15, 0, %0,  c9, c12, 3"
+#define XREG_CP15_SW_INC			"p15, 0, %0,  c9, c12, 4"
+#define XREG_CP15_EVENT_CNTR_SEL		"p15, 0, %0,  c9, c12, 5"
+
+#define XREG_CP15_PERF_CYCLE_COUNTER		"p15, 0, %0,  c9, c13, 0"
+#define XREG_CP15_EVENT_TYPE_SEL		"p15, 0, %0,  c9, c13, 1"
+#define XREG_CP15_PERF_MONITOR_COUNT		"p15, 0, %0,  c9, c13, 2"
+
+#define XREG_CP15_USER_ENABLE			"p15, 0, %0,  c9, c14, 0"
+#define XREG_CP15_INTR_ENABLE_SET		"p15, 0, %0,  c9, c14, 1"
+#define XREG_CP15_INTR_ENABLE_CLR		"p15, 0, %0,  c9, c14, 2"
+
+/* C10 Register Defines */
+#define XREG_CP15_TLB_LOCKDWN			"p15, 0, %0, c10,  c0, 0"
+
+#define XREG_CP15_PRI_MEM_REMAP			"p15, 0, %0, c10,  c2, 0"
+#define XREG_CP15_NORM_MEM_REMAP		"p15, 0, %0, c10,  c2, 1"
+
+/* C11 Register Defines */
+/* Not used */
+
+/* C12 Register Defines */
+#define XREG_CP15_VEC_BASE_ADDR			"p15, 0, %0, c12,  c0, 0"
+#define XREG_CP15_MONITOR_VEC_BASE_ADDR		"p15, 0, %0, c12,  c0, 1"
+
+#define XREG_CP15_INTERRUPT_STATUS		"p15, 0, %0, c12,  c1, 0"
+#define XREG_CP15_VIRTUALIZATION_INTR		"p15, 0, %0, c12,  c1, 1"
+
+/* C13 Register Defines */
+#define XREG_CP15_CONTEXT_ID			"p15, 0, %0, c13,  c0, 1"
+#define USER_RW_THREAD_PID			"p15, 0, %0, c13,  c0, 2"
+#define USER_RO_THREAD_PID			"p15, 0, %0, c13,  c0, 3"
+#define USER_PRIV_THREAD_PID			"p15, 0, %0, c13,  c0, 4"
+
+/* C14 Register Defines */
+/* not used */
+
+/* C15 Register Defines */
+#define XREG_CP15_POWER_CTRL			"p15, 0, %0, c15,  c0, 0"
+#define XREG_CP15_CONFIG_BASE_ADDR		"p15, 4, %0, c15,  c0, 0"
+
+#define XREG_CP15_READ_TLB_ENTRY		"p15, 5, %0, c15,  c4, 2"
+#define XREG_CP15_WRITE_TLB_ENTRY		"p15, 5, %0, c15,  c4, 4"
+
+#define XREG_CP15_MAIN_TLB_VA			"p15, 5, %0, c15,  c5, 2"
+
+#define XREG_CP15_MAIN_TLB_PA			"p15, 5, %0, c15,  c6, 2"
+
+#define XREG_CP15_MAIN_TLB_ATTR			"p15, 5, %0, c15,  c7, 2"
+
+#else
+/* C2 Register Defines */
+#define XREG_CP15_TTBR0				"cp15:0:c2:c0:0"
+#define XREG_CP15_TTBR1				"cp15:0:c2:c0:1"
+#define XREG_CP15_TTB_CONTROL			"cp15:0:c2:c0:2"
+
+/* C3 Register Defines */
+#define XREG_CP15_DOMAIN_ACCESS_CTRL		"cp15:0:c3:c0:0"
+
+/* C4 Register Defines */
+/* Not Used */
+
+/* C5 Register Defines */
+#define XREG_CP15_DATA_FAULT_STATUS		"cp15:0:c5:c0:0"
+#define XREG_CP15_INST_FAULT_STATUS		"cp15:0:c5:c0:1"
+
+#define XREG_CP15_AUX_DATA_FAULT_STATUS		"cp15:0:c5:c1:0"
+#define XREG_CP15_AUX_INST_FAULT_STATUS		"cp15:0:c5:c1:1"
+
+/* C6 Register Defines */
+#define XREG_CP15_DATA_FAULT_ADDRESS		"cp15:0:c6:c0:0"
+#define XREG_CP15_INST_FAULT_ADDRESS		"cp15:0:c6:c0:2"
+
+/* C7 Register Defines */
+#define XREG_CP15_NOP				"cp15:0:c7:c0:4"
+
+#define XREG_CP15_INVAL_IC_POU_IS		"cp15:0:c7:c1:0"
+#define XREG_CP15_INVAL_BRANCH_ARRAY_IS		"cp15:0:c7:c1:6"
+
+#define XREG_CP15_PHYS_ADDR			"cp15:0:c7:c4:0"
+
+#define XREG_CP15_INVAL_IC_POU			"cp15:0:c7:c5:0"
+#define XREG_CP15_INVAL_IC_LINE_MVA_POU		"cp15:0:c7:c5:1"
+
+/* The CP15 register access below has been deprecated in favor of the new
+ * isb instruction in Cortex A9.
+ */
+#define XREG_CP15_INST_SYNC_BARRIER		"cp15:0:c7:c5:4"
+#define XREG_CP15_INVAL_BRANCH_ARRAY		"cp15:0:c7:c5:6"
+
+#define XREG_CP15_INVAL_DC_LINE_MVA_POC		"cp15:0:c7:c6:1"
+#define XREG_CP15_INVAL_DC_LINE_SW		"cp15:0:c7:c6:2"
+
+#define XREG_CP15_VA_TO_PA_CURRENT_0		"cp15:0:c7:c8:0"
+#define XREG_CP15_VA_TO_PA_CURRENT_1		"cp15:0:c7:c8:1"
+#define XREG_CP15_VA_TO_PA_CURRENT_2		"cp15:0:c7:c8:2"
+#define XREG_CP15_VA_TO_PA_CURRENT_3		"cp15:0:c7:c8:3"
+
+#define XREG_CP15_VA_TO_PA_OTHER_0		"cp15:0:c7:c8:4"
+#define XREG_CP15_VA_TO_PA_OTHER_1		"cp15:0:c7:c8:5"
+#define XREG_CP15_VA_TO_PA_OTHER_2		"cp15:0:c7:c8:6"
+#define XREG_CP15_VA_TO_PA_OTHER_3		"cp15:0:c7:c8:7"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POC		"cp15:0:c7:c10:1"
+#define XREG_CP15_CLEAN_DC_LINE_SW		"cp15:0:c7:c10:2"
+
+/* The next two CP15 register accesses below have been deprecated in favor
+ * of the new dsb and dmb instructions in Cortex A9.
+ */
+#define XREG_CP15_DATA_SYNC_BARRIER		"cp15:0:c7:c10:4"
+#define XREG_CP15_DATA_MEMORY_BARRIER		"cp15:0:c7:c10:5"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POU		"cp15:0:c7:c11:1"
+
+#define XREG_CP15_NOP2				"cp15:0:c7:c13:1"
+
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC	"cp15:0:c7:c14:1"
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW	"cp15:0:c7:c14:2"
+
+/* C8 Register Defines */
+#define XREG_CP15_INVAL_TLB_IS			"cp15:0:c8:c3:0"
+#define XREG_CP15_INVAL_TLB_MVA_IS		"cp15:0:c8:c3:1"
+#define XREG_CP15_INVAL_TLB_ASID_IS		"cp15:0:c8:c3:2"
+#define XREG_CP15_INVAL_TLB_MVA_ASID_IS		"cp15:0:c8:c3:3"
+
+#define XREG_CP15_INVAL_ITLB_UNLOCKED		"cp15:0:c8:c5:0"
+#define XREG_CP15_INVAL_ITLB_MVA		"cp15:0:c8:c5:1"
+#define XREG_CP15_INVAL_ITLB_ASID		"cp15:0:c8:c5:2"
+
+#define XREG_CP15_INVAL_DTLB_UNLOCKED		"cp15:0:c8:c6:0"
+#define XREG_CP15_INVAL_DTLB_MVA		"cp15:0:c8:c6:1"
+#define XREG_CP15_INVAL_DTLB_ASID		"cp15:0:c8:c6:2"
+
+#define XREG_CP15_INVAL_UTLB_UNLOCKED		"cp15:0:c8:c7:0"
+#define XREG_CP15_INVAL_UTLB_MVA		"cp15:0:c8:c7:1"
+#define XREG_CP15_INVAL_UTLB_ASID		"cp15:0:c8:c7:2"
+#define XREG_CP15_INVAL_UTLB_MVA_ASID		"cp15:0:c8:c7:3"
+
+/* C9 Register Defines */
+#define XREG_CP15_PERF_MONITOR_CTRL		"cp15:0:c9:c12:0"
+#define XREG_CP15_COUNT_ENABLE_SET		"cp15:0:c9:c12:1"
+#define XREG_CP15_COUNT_ENABLE_CLR		"cp15:0:c9:c12:2"
+#define XREG_CP15_V_FLAG_STATUS			"cp15:0:c9:c12:3"
+#define XREG_CP15_SW_INC			"cp15:0:c9:c12:4"
+#define XREG_CP15_EVENT_CNTR_SEL		"cp15:0:c9:c12:5"
+
+#define XREG_CP15_PERF_CYCLE_COUNTER		"cp15:0:c9:c13:0"
+#define XREG_CP15_EVENT_TYPE_SEL		"cp15:0:c9:c13:1"
+#define XREG_CP15_PERF_MONITOR_COUNT		"cp15:0:c9:c13:2"
+
+#define XREG_CP15_USER_ENABLE			"cp15:0:c9:c14:0"
+#define XREG_CP15_INTR_ENABLE_SET		"cp15:0:c9:c14:1"
+#define XREG_CP15_INTR_ENABLE_CLR		"cp15:0:c9:c14:2"
+
+/* C10 Register Defines */
+#define XREG_CP15_TLB_LOCKDWN			"cp15:0:c10:c0:0"
+
+#define XREG_CP15_PRI_MEM_REMAP			"cp15:0:c10:c2:0"
+#define XREG_CP15_NORM_MEM_REMAP		"cp15:0:c10:c2:1"
+
+/* C11 Register Defines */
+/* Not used */
+
+/* C12 Register Defines */
+#define XREG_CP15_VEC_BASE_ADDR			"cp15:0:c12:c0:0"
+#define XREG_CP15_MONITOR_VEC_BASE_ADDR		"cp15:0:c12:c0:1"
+
+#define XREG_CP15_INTERRUPT_STATUS		"cp15:0:c12:c1:0"
+#define XREG_CP15_VIRTUALIZATION_INTR		"cp15:0:c12:c1:1"
+
+/* C13 Register Defines */
+#define XREG_CP15_CONTEXT_ID			"cp15:0:c13:c0:1"
+#define USER_RW_THREAD_PID			"cp15:0:c13:c0:2"
+#define USER_RO_THREAD_PID			"cp15:0:c13:c0:3"
+#define USER_PRIV_THREAD_PID			"cp15:0:c13:c0:4"
+
+/* C14 Register Defines */
+/* not used */
+
+/* C15 Register Defines */
+#define XREG_CP15_POWER_CTRL			"cp15:0:c15:c0:0"
+#define XREG_CP15_CONFIG_BASE_ADDR		"cp15:4:c15:c0:0"
+
+#define XREG_CP15_READ_TLB_ENTRY		"cp15:5:c15:c4:2"
+#define XREG_CP15_WRITE_TLB_ENTRY		"cp15:5:c15:c4:4"
+
+#define XREG_CP15_MAIN_TLB_VA			"cp15:5:c15:c5:2"
+
+#define XREG_CP15_MAIN_TLB_PA			"cp15:5:c15:c6:2"
+
+#define XREG_CP15_MAIN_TLB_ATTR			"cp15:5:c15:c7:2"
+#endif
+
+
+/* MPE register definitions */
+#define XREG_FPSID				c0
+#define XREG_FPSCR				c1
+#define XREG_MVFR1				c6
+#define XREG_MVFR0				c7
+#define XREG_FPEXC				c8
+#define XREG_FPINST				c9
+#define XREG_FPINST2				c10
+
+/* FPSID bits */
+#define XREG_FPSID_IMPLEMENTER_BIT	(24)
+#define XREG_FPSID_IMPLEMENTER_MASK	(0xFF << FPSID_IMPLEMENTER_BIT)
+#define XREG_FPSID_SOFTWARE		(1<<23)
+#define XREG_FPSID_ARCH_BIT		(16)
+#define XREG_FPSID_ARCH_MASK		(0xF  << FPSID_ARCH_BIT)
+#define XREG_FPSID_PART_BIT		(8)
+#define XREG_FPSID_PART_MASK		(0xFF << FPSID_PART_BIT)
+#define XREG_FPSID_VARIANT_BIT		(4)
+#define XREG_FPSID_VARIANT_MASK		(0xF  << FPSID_VARIANT_BIT)
+#define XREG_FPSID_REV_BIT		(0)
+#define XREG_FPSID_REV_MASK		(0xF  << FPSID_REV_BIT)
+
+/* FPSCR bits */
+#define XREG_FPSCR_N_BIT		(1 << 31)
+#define XREG_FPSCR_Z_BIT		(1 << 30)
+#define XREG_FPSCR_C_BIT		(1 << 29)
+#define XREG_FPSCR_V_BIT		(1 << 28)
+#define XREG_FPSCR_QC			(1 << 27)
+#define XREG_FPSCR_AHP			(1 << 26)
+#define XREG_FPSCR_DEFAULT_NAN		(1 << 25)
+#define XREG_FPSCR_FLUSHTOZERO		(1 << 24)
+#define XREG_FPSCR_ROUND_NEAREST	(0 << 22)
+#define XREG_FPSCR_ROUND_PLUSINF	(1 << 22)
+#define XREG_FPSCR_ROUND_MINUSINF	(2 << 22)
+#define XREG_FPSCR_ROUND_TOZERO		(3 << 22)
+#define XREG_FPSCR_RMODE_BIT		(22)
+#define XREG_FPSCR_RMODE_MASK		(3 << FPSCR_RMODE_BIT)
+#define XREG_FPSCR_STRIDE_BIT		(20)
+#define XREG_FPSCR_STRIDE_MASK		(3 << FPSCR_STRIDE_BIT)
+#define XREG_FPSCR_LENGTH_BIT		(16)
+#define XREG_FPSCR_LENGTH_MASK		(7 << FPSCR_LENGTH_BIT)
+#define XREG_FPSCR_IDC			(1 << 7)
+#define XREG_FPSCR_IXC			(1 << 4)
+#define XREG_FPSCR_UFC			(1 << 3)
+#define XREG_FPSCR_OFC			(1 << 2)
+#define XREG_FPSCR_DZC			(1 << 1)
+#define XREG_FPSCR_IOC			(1 << 0)
+
+/* MVFR0 bits */
+#define XREG_MVFR0_RMODE_BIT		(28)
+#define XREG_MVFR0_RMODE_MASK		(0xF << XREG_MVFR0_RMODE_BIT)
+#define XREG_MVFR0_SHORT_VEC_BIT	(24)
+#define XREG_MVFR0_SHORT_VEC_MASK	(0xF << XREG_MVFR0_SHORT_VEC_BIT)
+#define XREG_MVFR0_SQRT_BIT		(20)
+#define XREG_MVFR0_SQRT_MASK		(0xF << XREG_MVFR0_SQRT_BIT)
+#define XREG_MVFR0_DIVIDE_BIT		(16)
+#define XREG_MVFR0_DIVIDE_MASK		(0xF << XREG_MVFR0_DIVIDE_BIT)
+#define XREG_MVFR0_EXEC_TRAP_BIT	(12)
+#define XREG_MVFR0_EXEC_TRAP_MASK	(0xF << XREG_MVFR0_EXEC_TRAP_BIT)
+#define XREG_MVFR0_DP_BIT		(8)
+#define XREG_MVFR0_DP_MASK		(0xF << XREG_MVFR0_DP_BIT)
+#define XREG_MVFR0_SP_BIT		(4)
+#define XREG_MVFR0_SP_MASK		(0xF << XREG_MVFR0_SP_BIT)
+#define XREG_MVFR0_A_SIMD_BIT		(0)
+#define XREG_MVFR0_A_SIMD_MASK		(0xF << MVFR0_A_SIMD_BIT)
+
+/* FPEXC bits */
+#define XREG_FPEXC_EX			(1 << 31)
+#define XREG_FPEXC_EN			(1 << 30)
+#define XREG_FPEXC_DEX			(1 << 29)
+
+
+/**
+ *@endcond
+ */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XREG_CORTEXA9_H */
diff --git a/bsps/include/xil/arm/cortexr5/xil_cache.h b/bsps/include/xil/arm/cortexr5/xil_cache.h
new file mode 100644
index 0000000000..fcc74504db
--- /dev/null
+++ b/bsps/include/xil/arm/cortexr5/xil_cache.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup r5_cache_apis Cortex R5 Processor Cache Functions
+*
+* Cache functions provide access to cache related operations such as flush
+*  and invalidate for instruction and data caches. It gives option to perform
+* the cache operations on a single cacheline, a range of memory and an entire
+* cache.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 	pkp  02/20/14 First release
+* 6.2   mus  01/27/17 Updated to support IAR compiler
+* </pre>
+*
+******************************************************************************/
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *@cond nocomments
+ */
+
+#if defined (__GNUC__)
+#define asm_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
+		XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param))
+
+#define asm_clean_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
+		XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param))
+
+#define asm_clean_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
+		XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param))
+
+#define asm_inval_ic_line_mva_pou(param) __asm__ __volatile__("mcr " \
+		XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param))
+#elif defined (__ICCARM__)
+#define asm_inval_dc_line_mva_poc(param) __asm volatile("mcr " \
+		XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param))
+
+#define asm_clean_inval_dc_line_sw(param) __asm volatile("mcr " \
+		XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param))
+
+#define asm_clean_inval_dc_line_mva_poc(param) __asm volatile("mcr " \
+		XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param))
+
+#define asm_inval_ic_line_mva_pou(param) __asm volatile("mcr " \
+		XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param))
+#endif
+
+/**
+ *@endcond
+ */
+
+void Xil_DCacheEnable(void);
+void Xil_DCacheDisable(void);
+void Xil_DCacheInvalidate(void);
+void Xil_DCacheInvalidateRange(INTPTR adr, u32 len);
+void Xil_DCacheFlush(void);
+void Xil_DCacheFlushRange(INTPTR adr, u32 len);
+void Xil_DCacheInvalidateLine(INTPTR adr);
+void Xil_DCacheFlushLine(INTPTR adr);
+void Xil_DCacheStoreLine(INTPTR adr);
+
+void Xil_ICacheEnable(void);
+void Xil_ICacheDisable(void);
+void Xil_ICacheInvalidate(void);
+void Xil_ICacheInvalidateRange(INTPTR adr, u32 len);
+void Xil_ICacheInvalidateLine(INTPTR adr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/**
+* @} End of "addtogroup r5_cache_apis".
+*/
diff --git a/bsps/include/xil/arm/cortexr5/xpseudo_asm.h b/bsps/include/xil/arm/cortexr5/xpseudo_asm.h
new file mode 100644
index 0000000000..46b704539c
--- /dev/null
+++ b/bsps/include/xil/arm/cortexr5/xpseudo_asm.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xpseudo_asm.h
+*
+* @addtogroup r5_specific Cortex R5 Processor Specific Include Files
+*
+* The xpseudo_asm.h includes xreg_cortexr5.h and xpseudo_asm_gcc.h.
+*
+* The xreg_cortexr5.h file contains definitions for inline assembler code.
+* It provides inline definitions for Cortex R5 GPRs, SPRs,co-processor
+* registers and Debug register
+*
+* The xpseudo_asm_gcc.h contains the definitions for the most often used
+* inline assembler instructions, available as macros. These can be very
+* useful for tasks such as setting or getting special purpose registers,
+* synchronization,or cache manipulation. These inline assembler instructions
+* can be used from drivers and user applications written in C.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00  pkp  02/10/14 Initial version
+* 6.2   mus  01/27/17 Updated to support IAR compiler
+* 7.3   dp   06/25/20 Initial version for armclang
+* </pre>
+*
+******************************************************************************/
+#ifndef XPSEUDO_ASM_H /* prevent circular inclusions */
+#define XPSEUDO_ASM_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xreg_cortexr5.h"
+#if defined (__clang__)
+#include "xpseudo_asm_armclang.h"
+#elif defined (__GNUC__)
+#include "xpseudo_asm_gcc.h"
+#elif defined (__ICCARM__)
+#include "xpseudo_asm_iccarm.h"
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* XPSEUDO_ASM_H */
+/**
+* @} End of "addtogroup r5_specific".
+*/
diff --git a/bsps/include/xil/arm/cortexr5/xreg_cortexr5.h b/bsps/include/xil/arm/cortexr5/xreg_cortexr5.h
new file mode 100644
index 0000000000..8034672636
--- /dev/null
+++ b/bsps/include/xil/arm/cortexr5/xreg_cortexr5.h
@@ -0,0 +1,429 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xreg_cortexr5.h
+*
+* This header file contains definitions for using inline assembler code. It is
+* written specifically for the GNU, IAR, ARMCC compiler.
+*
+* All of the ARM Cortex R5 GPRs, SPRs, and Debug Registers are defined along
+* with the positions of the bits within the registers.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.00  pkp  02/10/14 Initial version
+* 7.7	sk   01/10/22 Update PRIV_RW_USER_RW macro from unsigned to unsigned
+* 		      long to fix misra_c_2012_rule_12_2 violation.
+* </pre>
+*
+******************************************************************************/
+/**
+ *@cond nocomments
+ */
+
+#ifndef XREG_CORTEXR5_H	/* prevent circular inclusions */
+#define XREG_CORTEXR5_H	/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* GPRs */
+#define XREG_GPR0				r0
+#define XREG_GPR1				r1
+#define XREG_GPR2				r2
+#define XREG_GPR3				r3
+#define XREG_GPR4				r4
+#define XREG_GPR5				r5
+#define XREG_GPR6				r6
+#define XREG_GPR7				r7
+#define XREG_GPR8				r8
+#define XREG_GPR9				r9
+#define XREG_GPR10				r10
+#define XREG_GPR11				r11
+#define XREG_GPR12				r12
+#define XREG_GPR13				r13
+#define XREG_GPR14				r14
+#define XREG_GPR15				r15
+#define XREG_CPSR				cpsr
+
+/* Coprocessor number defines */
+#define XREG_CP0				0
+#define XREG_CP1				1
+#define XREG_CP2				2
+#define XREG_CP3				3
+#define XREG_CP4				4
+#define XREG_CP5				5
+#define XREG_CP6				6
+#define XREG_CP7				7
+#define XREG_CP8				8
+#define XREG_CP9				9
+#define XREG_CP10				10
+#define XREG_CP11				11
+#define XREG_CP12				12
+#define XREG_CP13				13
+#define XREG_CP14				14
+#define XREG_CP15				15
+
+/* Coprocessor control register defines */
+#define XREG_CR0				cr0
+#define XREG_CR1				cr1
+#define XREG_CR2				cr2
+#define XREG_CR3				cr3
+#define XREG_CR4				cr4
+#define XREG_CR5				cr5
+#define XREG_CR6				cr6
+#define XREG_CR7				cr7
+#define XREG_CR8				cr8
+#define XREG_CR9				cr9
+#define XREG_CR10				cr10
+#define XREG_CR11				cr11
+#define XREG_CR12				cr12
+#define XREG_CR13				cr13
+#define XREG_CR14				cr14
+#define XREG_CR15				cr15
+
+/* Current Processor Status Register (CPSR) Bits */
+#define XREG_CPSR_THUMB_MODE			0x20U
+#define XREG_CPSR_MODE_BITS			0x1FU
+#define XREG_CPSR_SYSTEM_MODE			0x1FU
+#define XREG_CPSR_UNDEFINED_MODE		0x1BU
+#define XREG_CPSR_DATA_ABORT_MODE		0x17U
+#define XREG_CPSR_SVC_MODE			0x13U
+#define XREG_CPSR_IRQ_MODE			0x12U
+#define XREG_CPSR_FIQ_MODE			0x11U
+#define XREG_CPSR_USER_MODE			0x10U
+
+#define XREG_CPSR_IRQ_ENABLE			0x80U
+#define XREG_CPSR_FIQ_ENABLE			0x40U
+
+#define XREG_CPSR_N_BIT				0x80000000U
+#define XREG_CPSR_Z_BIT				0x40000000U
+#define XREG_CPSR_C_BIT				0x20000000U
+#define XREG_CPSR_V_BIT				0x10000000U
+
+/*MPU region definitions*/
+#define REGION_32B     0x00000004U
+#define REGION_64B     0x00000005U
+#define REGION_128B    0x00000006U
+#define REGION_256B    0x00000007U
+#define REGION_512B    0x00000008U
+#define REGION_1K      0x00000009U
+#define REGION_2K      0x0000000AU
+#define REGION_4K      0x0000000BU
+#define REGION_8K      0x0000000CU
+#define REGION_16K     0x0000000DU
+#define REGION_32K     0x0000000EU
+#define REGION_64K     0x0000000FU
+#define REGION_128K    0x00000010U
+#define REGION_256K    0x00000011U
+#define REGION_512K    0x00000012U
+#define REGION_1M      0x00000013U
+#define REGION_2M      0x00000014U
+#define REGION_4M      0x00000015U
+#define REGION_8M      0x00000016U
+#define REGION_16M     0x00000017U
+#define REGION_32M     0x00000018U
+#define REGION_64M     0x00000019U
+#define REGION_128M    0x0000001AU
+#define REGION_256M    0x0000001BU
+#define REGION_512M    0x0000001CU
+#define REGION_1G      0x0000001DU
+#define REGION_2G      0x0000001EU
+#define REGION_4G      0x0000001FU
+
+#define REGION_EN  0x00000001U
+
+
+
+#define SHAREABLE				0x00000004U 	/*shareable */
+#define STRONG_ORDERD_SHARED	0x00000000U	/*strongly ordered, always shareable*/
+
+#define DEVICE_SHARED			0x00000001U	/*device, shareable*/
+#define DEVICE_NONSHARED		0x00000010U	/*device, non shareable*/
+
+#define NORM_NSHARED_WT_NWA		0x00000002U 	/*Outer and Inner write-through, no write-allocate non-shareable*/
+#define NORM_SHARED_WT_NWA		0x00000006U 	/*Outer and Inner write-through, no write-allocate shareable*/
+
+#define NORM_NSHARED_WB_NWA 	0x00000003U 	/*Outer and Inner write-back, no write-allocate non shareable*/
+#define NORM_SHARED_WB_NWA 		0x00000007U 	/*Outer and Inner write-back, no write-allocate shareable*/
+
+#define NORM_NSHARED_NCACHE 	0x00000008U 	/*Outer and Inner Non cacheable  non shareable*/
+#define NORM_SHARED_NCACHE 		0x0000000CU 	/*Outer and Inner Non cacheable shareable*/
+
+#define NORM_NSHARED_WB_WA 		0x0000000BU 	/*Outer and Inner write-back non shared*/
+#define NORM_SHARED_WB_WA 		0x0000000FU 	/*Outer and Inner write-back shared*/
+
+/* inner and outer cache policies can be combined for different combinations */
+
+#define NORM_IN_POLICY_NCACHE	0x00000020U	/*inner non cacheable*/
+#define NORM_IN_POLICY_WB_WA	0x00000021U	/*inner write back write allocate*/
+#define NORM_IN_POLICY_WT_NWA	0x00000022U	/*inner write through no write allocate*/
+#define NORM_IN_POLICY_WB_NWA	0x00000023U	/*inner write back no write allocate*/
+
+#define NORM_OUT_POLICY_NCACHE	0x00000020U	/*outer non cacheable*/
+#define NORM_OUT_POLICY_WB_WA	0x00000028U	/*outer write back write allocate*/
+#define NORM_OUT_POLICY_WT_NWA	0x00000030U	/*outer write through no write allocate*/
+#define NORM_OUT_POLICY_WB_NWA	0x00000038U	/*outer write back no write allocate*/
+
+#define NO_ACCESS				(0x00000000U<<8U)	/*No access*/
+#define PRIV_RW_USER_NA			(0x00000001U<<8U) /*Privileged access only*/
+#define PRIV_RW_USER_RO			(0x00000002U<<8U) /*Writes in User mode generate permission faults*/
+#define	PRIV_RW_USER_RW			(0x00000003UL<<8U)	/*Full Access*/
+#define PRIV_RO_USER_NA			(0x00000005U<<8U) /*Privileged eead only*/
+#define PRIV_RO_USER_RO			(0x00000006U<<8U) /*Privileged/User read-only*/
+
+#define EXECUTE_NEVER  			(0x00000001U<<12U)  /* Bit 12*/
+
+
+/* CP15 defines */
+
+/* C0 Register defines */
+#define XREG_CP15_MAIN_ID			"p15, 0, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_TYPE			"p15, 0, %0,  c0,  c0, 1"
+#define XREG_CP15_TCM_TYPE			"p15, 0, %0,  c0,  c0, 2"
+#define XREG_CP15_TLB_TYPE			"p15, 0, %0,  c0,  c0, 3"
+#define XREG_CP15_MPU_TYPE			"p15, 0, %0,  c0,  c0, 4"
+#define XREG_CP15_MULTI_PROC_AFFINITY		"p15, 0, %0,  c0,  c0, 5"
+
+#define XREG_CP15_PROC_FEATURE_0		"p15, 0, %0,  c0,  c1, 0"
+#define XREG_CP15_PROC_FEATURE_1		"p15, 0, %0,  c0,  c1, 1"
+#define XREG_CP15_DEBUG_FEATURE_0		"p15, 0, %0,  c0,  c1, 2"
+#define XREG_CP15_MEMORY_FEATURE_0		"p15, 0, %0,  c0,  c1, 4"
+#define XREG_CP15_MEMORY_FEATURE_1		"p15, 0, %0,  c0,  c1, 5"
+#define XREG_CP15_MEMORY_FEATURE_2		"p15, 0, %0,  c0,  c1, 6"
+#define XREG_CP15_MEMORY_FEATURE_3		"p15, 0, %0,  c0,  c1, 7"
+
+#define XREG_CP15_INST_FEATURE_0		"p15, 0, %0,  c0,  c2, 0"
+#define XREG_CP15_INST_FEATURE_1		"p15, 0, %0,  c0,  c2, 1"
+#define XREG_CP15_INST_FEATURE_2		"p15, 0, %0,  c0,  c2, 2"
+#define XREG_CP15_INST_FEATURE_3		"p15, 0, %0,  c0,  c2, 3"
+#define XREG_CP15_INST_FEATURE_4		"p15, 0, %0,  c0,  c2, 4"
+#define XREG_CP15_INST_FEATURE_5		"p15, 0, %0,  c0,  c2, 5"
+
+#define XREG_CP15_CACHE_SIZE_ID			"p15, 1, %0,  c0,  c0, 0"
+#define XREG_CP15_CACHE_LEVEL_ID		"p15, 1, %0,  c0,  c0, 1"
+#define XREG_CP15_AUXILARY_ID			"p15, 1, %0,  c0,  c0, 7"
+
+#define XREG_CP15_CACHE_SIZE_SEL		"p15, 2, %0,  c0,  c0, 0"
+
+/* C1 Register Defines */
+#define XREG_CP15_SYS_CONTROL			"p15, 0, %0,  c1,  c0, 0"
+#define XREG_CP15_AUX_CONTROL			"p15, 0, %0,  c1,  c0, 1"
+#define XREG_CP15_CP_ACCESS_CONTROL		"p15, 0, %0,  c1,  c0, 2"
+
+
+/* XREG_CP15_CONTROL bit defines */
+#define XREG_CP15_CONTROL_TE_BIT		0x40000000U
+#define XREG_CP15_CONTROL_AFE_BIT		0x20000000U
+#define XREG_CP15_CONTROL_TRE_BIT		0x10000000U
+#define XREG_CP15_CONTROL_NMFI_BIT		0x08000000U
+#define XREG_CP15_CONTROL_EE_BIT		0x02000000U
+#define XREG_CP15_CONTROL_HA_BIT		0x00020000U
+#define XREG_CP15_CONTROL_RR_BIT		0x00004000U
+#define XREG_CP15_CONTROL_V_BIT			0x00002000U
+#define XREG_CP15_CONTROL_I_BIT			0x00001000U
+#define XREG_CP15_CONTROL_Z_BIT			0x00000800U
+#define XREG_CP15_CONTROL_SW_BIT		0x00000400U
+#define XREG_CP15_CONTROL_B_BIT			0x00000080U
+#define XREG_CP15_CONTROL_C_BIT			0x00000004U
+#define XREG_CP15_CONTROL_A_BIT			0x00000002U
+#define XREG_CP15_CONTROL_M_BIT			0x00000001U
+/* C2 Register Defines */
+/* Not Used */
+
+/* C3 Register Defines */
+/* Not Used */
+
+/* C4 Register Defines */
+/* Not Used */
+
+/* C5 Register Defines */
+#define XREG_CP15_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 0"
+#define XREG_CP15_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c0, 1"
+
+#define XREG_CP15_AUX_DATA_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 0"
+#define XREG_CP15_AUX_INST_FAULT_STATUS		"p15, 0, %0,  c5,  c1, 1"
+
+/* C6 Register Defines */
+#define XREG_CP15_DATA_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 0"
+#define XREG_CP15_INST_FAULT_ADDRESS		"p15, 0, %0,  c6,  c0, 2"
+
+#define XREG_CP15_MPU_REG_BASEADDR			"p15, 0, %0,  c6,  c1, 0"
+#define XREG_CP15_MPU_REG_SIZE_EN			"p15, 0, %0,  c6,  c1, 2"
+#define XREG_CP15_MPU_REG_ACCESS_CTRL		"p15, 0, %0,  c6,  c1, 4"
+
+#define XREG_CP15_MPU_MEMORY_REG_NUMBER			"p15, 0, %0,  c6,  c2, 0"
+
+/* C7 Register Defines */
+#define XREG_CP15_NOP				"p15, 0, %0,  c7,  c0, 4"
+
+#define XREG_CP15_INVAL_IC_POU			"p15, 0, %0,  c7,  c5, 0"
+#define XREG_CP15_INVAL_IC_LINE_MVA_POU		"p15, 0, %0,  c7,  c5, 1"
+
+/* The CP15 register access below has been deprecated in favor of the new
+ * isb instruction in Cortex R5.
+ */
+#define XREG_CP15_INST_SYNC_BARRIER		"p15, 0, %0,  c7,  c5, 4"
+#define XREG_CP15_INVAL_BRANCH_ARRAY		"p15, 0, %0,  c7,  c5, 6"
+#define XREG_CP15_INVAL_BRANCH_ARRAY_LINE		"p15, 0, %0,  c7,  c5, 7"
+
+#define XREG_CP15_INVAL_DC_LINE_MVA_POC		"p15, 0, %0,  c7,  c6, 1"
+#define XREG_CP15_INVAL_DC_LINE_SW		"p15, 0, %0,  c7,  c6, 2"
+
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POC		"p15, 0, %0,  c7, c10, 1"
+#define XREG_CP15_CLEAN_DC_LINE_SW		"p15, 0, %0,  c7, c10, 2"
+
+#define XREG_CP15_INVAL_DC_ALL		"p15, 0, %0,  c15, c5, 0"
+/* The next two CP15 register accesses below have been deprecated in favor
+ * of the new dsb and dmb instructions in Cortex R5.
+ */
+#define XREG_CP15_DATA_SYNC_BARRIER		"p15, 0, %0,  c7, c10, 4"
+#define XREG_CP15_DATA_MEMORY_BARRIER		"p15, 0, %0,  c7, c10, 5"
+
+#define XREG_CP15_CLEAN_DC_LINE_MVA_POU		"p15, 0, %0,  c7, c11, 1"
+
+#define XREG_CP15_NOP2				"p15, 0, %0,  c7, c13, 1"
+
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC	"p15, 0, %0,  c7, c14, 1"
+#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW	"p15, 0, %0,  c7, c14, 2"
+
+/* C8 Register Defines */
+/* Not Used */
+
+
+/* C9 Register Defines */
+
+#define XREG_CP15_ATCM_REG_SIZE_ADDR		"p15, 0, %0,  c9, c1, 1"
+#define XREG_CP15_BTCM_REG_SIZE_ADDR		"p15, 0, %0,  c9, c1, 0"
+#define XREG_CP15_TCM_SELECTION				"p15, 0, %0,  c9, c2, 0"
+
+#define XREG_CP15_PERF_MONITOR_CTRL		"p15, 0, %0,  c9, c12, 0"
+#define XREG_CP15_COUNT_ENABLE_SET		"p15, 0, %0,  c9, c12, 1"
+#define XREG_CP15_COUNT_ENABLE_CLR		"p15, 0, %0,  c9, c12, 2"
+#define XREG_CP15_V_FLAG_STATUS			"p15, 0, %0,  c9, c12, 3"
+#define XREG_CP15_SW_INC			"p15, 0, %0,  c9, c12, 4"
+#define XREG_CP15_EVENT_CNTR_SEL		"p15, 0, %0,  c9, c12, 5"
+
+#define XREG_CP15_PERF_CYCLE_COUNTER		"p15, 0, %0,  c9, c13, 0"
+#define XREG_CP15_EVENT_TYPE_SEL		"p15, 0, %0,  c9, c13, 1"
+#define XREG_CP15_PERF_MONITOR_COUNT		"p15, 0, %0,  c9, c13, 2"
+
+#define XREG_CP15_USER_ENABLE			"p15, 0, %0,  c9, c14, 0"
+#define XREG_CP15_INTR_ENABLE_SET		"p15, 0, %0,  c9, c14, 1"
+#define XREG_CP15_INTR_ENABLE_CLR		"p15, 0, %0,  c9, c14, 2"
+
+/* C10 Register Defines */
+/* Not used */
+
+/* C11 Register Defines */
+/* Not used */
+
+/* C12 Register Defines */
+/* Not used */
+
+/* C13 Register Defines */
+#define XREG_CP15_CONTEXT_ID			"p15, 0, %0, c13,  c0, 1"
+#define USER_RW_THREAD_PID			"p15, 0, %0, c13,  c0, 2"
+#define USER_RO_THREAD_PID			"p15, 0, %0, c13,  c0, 3"
+#define USER_PRIV_THREAD_PID			"p15, 0, %0, c13,  c0, 4"
+
+/* C14 Register Defines */
+/* not used */
+
+/* C15 Register Defines */
+#define XREG_CP15_SEC_AUX_CTRL			"p15, 0, %0, c15,  c0, 0"
+
+
+
+
+/* MPE register definitions */
+#define XREG_FPSID				c0
+#define XREG_FPSCR				c1
+#define XREG_MVFR1				c6
+#define XREG_MVFR0				c7
+#define XREG_FPEXC				c8
+#define XREG_FPINST				c9
+#define XREG_FPINST2			c10
+
+/* FPSID bits */
+#define XREG_FPSID_IMPLEMENTER_BIT	(24U)
+#define XREG_FPSID_IMPLEMENTER_MASK	(0x000000FFU << FPSID_IMPLEMENTER_BIT)
+#define XREG_FPSID_SOFTWARE		(0X00000001U << 23U)
+#define XREG_FPSID_ARCH_BIT		(16U)
+#define XREG_FPSID_ARCH_MASK		(0x0000000FU  << FPSID_ARCH_BIT)
+#define XREG_FPSID_PART_BIT		(8U)
+#define XREG_FPSID_PART_MASK		(0x000000FFU << FPSID_PART_BIT)
+#define XREG_FPSID_VARIANT_BIT		(4U)
+#define XREG_FPSID_VARIANT_MASK		(0x0000000FU  << FPSID_VARIANT_BIT)
+#define XREG_FPSID_REV_BIT		(0U)
+#define XREG_FPSID_REV_MASK		(0x0000000FU  << FPSID_REV_BIT)
+
+/* FPSCR bits */
+#define XREG_FPSCR_N_BIT		(0X00000001U << 31U)
+#define XREG_FPSCR_Z_BIT		(0X00000001U << 30U)
+#define XREG_FPSCR_C_BIT		(0X00000001U << 29U)
+#define XREG_FPSCR_V_BIT		(0X00000001U << 28U)
+#define XREG_FPSCR_QC			(0X00000001U << 27U)
+#define XREG_FPSCR_AHP			(0X00000001U << 26U)
+#define XREG_FPSCR_DEFAULT_NAN		(0X00000001U << 25U)
+#define XREG_FPSCR_FLUSHTOZERO		(0X00000001U << 24U)
+#define XREG_FPSCR_ROUND_NEAREST	(0X00000000U << 22U)
+#define XREG_FPSCR_ROUND_PLUSINF	(0X00000001U << 22U)
+#define XREG_FPSCR_ROUND_MINUSINF	(0X00000002U << 22U)
+#define XREG_FPSCR_ROUND_TOZERO		(0X00000003U << 22U)
+#define XREG_FPSCR_RMODE_BIT		(22U)
+#define XREG_FPSCR_RMODE_MASK		(0X00000003U << FPSCR_RMODE_BIT)
+#define XREG_FPSCR_STRIDE_BIT		(20U)
+#define XREG_FPSCR_STRIDE_MASK		(0X00000003U << FPSCR_STRIDE_BIT)
+#define XREG_FPSCR_LENGTH_BIT		(16U)
+#define XREG_FPSCR_LENGTH_MASK		(0X00000007U << FPSCR_LENGTH_BIT)
+#define XREG_FPSCR_IDC			(0X00000001U << 7U)
+#define XREG_FPSCR_IXC			(0X00000001U << 4U)
+#define XREG_FPSCR_UFC			(0X00000001U << 3U)
+#define XREG_FPSCR_OFC			(0X00000001U << 2U)
+#define XREG_FPSCR_DZC			(0X00000001U << 1U)
+#define XREG_FPSCR_IOC			(0X00000001U << 0U)
+
+/* MVFR0 bits */
+#define XREG_MVFR0_RMODE_BIT		(28U)
+#define XREG_MVFR0_RMODE_MASK		(0x0000000FU << XREG_MVFR0_RMODE_BIT)
+#define XREG_MVFR0_SHORT_VEC_BIT	(24U)
+#define XREG_MVFR0_SHORT_VEC_MASK	(0x0000000FU << XREG_MVFR0_SHORT_VEC_BIT)
+#define XREG_MVFR0_SQRT_BIT		(20U)
+#define XREG_MVFR0_SQRT_MASK		(0x0000000FU << XREG_MVFR0_SQRT_BIT)
+#define XREG_MVFR0_DIVIDE_BIT		(16U)
+#define XREG_MVFR0_DIVIDE_MASK		(0x0000000FU << XREG_MVFR0_DIVIDE_BIT)
+#define XREG_MVFR0_EXEC_TRAP_BIT	(12U)
+#define XREG_MVFR0_EXEC_TRAP_MASK	(0x0000000FU << XREG_MVFR0_EXEC_TRAP_BIT)
+#define XREG_MVFR0_DP_BIT		(8U)
+#define XREG_MVFR0_DP_MASK		(0x0000000FU << XREG_MVFR0_DP_BIT)
+#define XREG_MVFR0_SP_BIT		(4U)
+#define XREG_MVFR0_SP_MASK		(0x0000000FU << XREG_MVFR0_SP_BIT)
+#define XREG_MVFR0_A_SIMD_BIT		(0U)
+#define XREG_MVFR0_A_SIMD_MASK		(0x0000000FU << MVFR0_A_SIMD_BIT)
+
+/* FPEXC bits */
+#define XREG_FPEXC_EX			(0X00000001U << 31U)
+#define XREG_FPEXC_EN			(0X00000001U << 30U)
+#define XREG_FPEXC_DEX			(0X00000001U << 29U)
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XREG_CORTEXR5_H */
+
+/**
+ *@endcond
+ */
diff --git a/bsps/include/xil/bspconfig.h b/bsps/include/xil/bspconfig.h
new file mode 100644
index 0000000000..55a8df3add
--- /dev/null
+++ b/bsps/include/xil/bspconfig.h
@@ -0,0 +1 @@
+/* Intentional blank stub file for Xilinx driver compatibility. */
diff --git a/bsps/include/xil/microblaze/xil_cache.h b/bsps/include/xil/microblaze/xil_cache.h
new file mode 100644
index 0000000000..d279665751
--- /dev/null
+++ b/bsps/include/xil/microblaze/xil_cache.h
@@ -0,0 +1,392 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup microblaze_cache_apis Microblaze Cache APIs
+* @{
+*
+*
+* The xil_cache.h file contains cache related driver functions (or macros)
+* that can be used to access the device.  The user should refer to the
+* hardware device specification for more details of the device operation.
+* The functions in this header file can be used across all Xilinx supported
+* processors.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00  hbm  07/28/09 Initial release
+* 3.02a sdm  10/24/11 Updated the file to include xparameters.h so that
+*                     the correct cache flush routines are used based on
+*                     whether the write-back or write-through caches are
+*                     used (cr #630532).
+* 3.10a asa  05/04/13 This version of MicroBlaze BSP adds support for system
+*					  cache/L2 cache. The existing/old APIs/macros in this
+*					  file are renamed to imply that they deal with L1 cache.
+*					  New macros/APIs are added to address similar features for
+*					  L2 cache. Users can include this file in their application
+*					  to use the various cache related APIs. These changes are
+*					  done for implementing PR #697214.
+*
+* </pre>
+*
+*
+******************************************************************************/
+
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#if defined XENV_VXWORKS
+/* VxWorks environment */
+#error "Unknown processor / architecture. Must be PPC for VxWorks."
+#else
+/* standalone environment */
+
+#include "mb_interface.h"
+#include "xil_types.h"
+#include "xparameters.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the entire L1 data cache. If the cacheline is modified
+*            (dirty), the modified contents are lost.
+*
+*
+* @return   None.
+*
+* @note		Processor must be in real mode.
+****************************************************************************/
+#define Xil_L1DCacheInvalidate() microblaze_invalidate_dcache()
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the entire L2 data cache. If the cacheline is modified
+*           (dirty),the modified contents are lost.
+*
+* @return   None.
+*
+* @note		Processor must be in real mode.
+****************************************************************************/
+#define Xil_L2CacheInvalidate() microblaze_invalidate_cache_ext()
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the L1 data cache for the given address range.
+*           If the bytes specified by the address (Addr) are cached by the L1
+*           data cache, the cacheline containing that byte is invalidated.If
+*           the cacheline is modified (dirty), the modified contents are lost.
+*
+* @param    Addr is address of range to be invalidated.
+* @param    Len is the length in bytes to be invalidated.
+*
+* @return   None.
+*
+* @note     Processor must be in real mode.
+****************************************************************************/
+#define Xil_L1DCacheInvalidateRange(Addr, Len) \
+			microblaze_invalidate_dcache_range((Addr), (Len))
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the L1 data cache for the given address range.
+*           If the bytes specified by the address (Addr) are cached by the
+*           L1 data cache, the cacheline containing that byte is invalidated.
+*           If the cacheline is modified (dirty), the modified contents are lost.
+*
+* @param    Addr: address of range to be invalidated.
+* @param    Len: length in bytes to be invalidated.
+*
+* @return   None.
+*
+* @note     Processor must be in real mode.
+****************************************************************************/
+#define Xil_L2CacheInvalidateRange(Addr, Len) \
+		microblaze_invalidate_cache_ext_range((Addr), (Len))
+
+/****************************************************************************/
+/**
+* @brief   Flush the L1 data cache for the given address range.
+*          If the bytes specified by the address (Addr) are cached by the
+*          data cache, and is modified (dirty), the cacheline will be written
+*          to system memory.The cacheline will also be invalidated.
+*
+* @param    Addr: the starting address of the range to be flushed.
+* @param    Len: length in byte to be flushed.
+*
+* @return   None.
+*
+****************************************************************************/
+#if (XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK == 1)
+#   define Xil_L1DCacheFlushRange(Addr, Len) \
+		microblaze_flush_dcache_range((Addr), (Len))
+#else
+#   define Xil_L1DCacheFlushRange(Addr, Len) \
+		microblaze_invalidate_dcache_range((Addr), (Len))
+#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK */
+
+/****************************************************************************/
+/**
+* @brief    Flush the L2 data cache for the given address range.
+*           If the bytes specified by the address (Addr) are cached by the
+*           data cache, and is modified (dirty), the cacheline will be
+*           written to system memory. The cacheline will also be invalidated.
+*
+* @param   Addr: the starting address of the range to be flushed.
+* @param   Len: length in byte to be flushed.
+*
+* @return   None.
+*
+****************************************************************************/
+#define Xil_L2CacheFlushRange(Addr, Len) \
+		microblaze_flush_cache_ext_range((Addr), (Len))
+
+/****************************************************************************/
+/**
+* @brief    Flush the entire L1 data cache. If any cacheline is dirty, the
+*           cacheline will be written to system memory. The entire data cache
+*           will be invalidated.
+*
+* @return   None.
+*
+****************************************************************************/
+#if (XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK == 1)
+#   define Xil_L1DCacheFlush() microblaze_flush_dcache()
+#else
+#   define Xil_L1DCacheFlush() microblaze_invalidate_dcache()
+#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK */
+
+/****************************************************************************/
+/**
+* @brief    Flush the entire L2 data cache. If any cacheline is dirty, the
+*           cacheline will be written to system memory. The entire data cache
+*           will be invalidated.
+*
+* @return   None.
+*
+****************************************************************************/
+#define Xil_L2CacheFlush() microblaze_flush_cache_ext()
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the instruction cache for the given address range.
+*
+* @param    Addr is address of ragne to be invalidated.
+* @param    Len is the length in bytes to be invalidated.
+*
+* @return   None.
+*
+****************************************************************************/
+#define Xil_L1ICacheInvalidateRange(Addr, Len) \
+			microblaze_invalidate_icache_range((Addr), (Len))
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the entire instruction cache.
+*
+* @return   None.
+*
+****************************************************************************/
+#define Xil_L1ICacheInvalidate() \
+			microblaze_invalidate_icache()
+
+
+/****************************************************************************/
+/**
+*
+* @brief    Enable the L1 data cache.
+*
+* @return   None.
+*
+* @note     This is processor specific.
+*
+****************************************************************************/
+#define Xil_L1DCacheEnable() \
+			microblaze_enable_dcache()
+
+/****************************************************************************/
+/**
+*
+* @brief    Disable the L1 data cache.
+*
+* @return   None.
+*
+* @note     This is processor specific.
+*
+****************************************************************************/
+#define Xil_L1DCacheDisable() \
+			microblaze_disable_dcache()
+
+/****************************************************************************/
+/**
+*
+* @brief    Enable the instruction cache.
+*
+* @return   None.
+*
+* @note     This is processor specific.
+*
+****************************************************************************/
+#define Xil_L1ICacheEnable() \
+			microblaze_enable_icache()
+
+/****************************************************************************/
+/**
+*
+* @brief    Disable the L1 Instruction cache.
+*
+* @return   None.
+*
+* @note     This is processor specific.
+*
+****************************************************************************/
+#define Xil_L1ICacheDisable() \
+			microblaze_disable_icache()
+
+/****************************************************************************/
+/**
+*
+* @brief    Enable the data cache.
+*
+* @return   None.
+*
+****************************************************************************/
+#define Xil_DCacheEnable() Xil_L1DCacheEnable()
+
+/****************************************************************************/
+/**
+*
+* @brief    Enable the instruction cache.
+*
+* @return   None.
+*
+*
+****************************************************************************/
+#define Xil_ICacheEnable() Xil_L1ICacheEnable()
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the entire Data cache.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_DCacheInvalidate() \
+	Xil_L2CacheInvalidate(); \
+	Xil_L1DCacheInvalidate();
+
+
+/****************************************************************************/
+/**
+*
+* @brief    Invalidate the Data cache for the given address range.
+*           If the bytes specified by the address (adr) are cached by the
+*           Data cache, the cacheline containing that byte is invalidated.
+*           If the cacheline is modified (dirty), the modified contents are
+*           lost and are NOT written to system memory before the line is
+*           invalidated.
+*
+* @param	Addr: Start address of range to be invalidated.
+* @param	Len: Length of range to be invalidated in bytes.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_DCacheInvalidateRange(Addr, Len) \
+	Xil_L2CacheInvalidateRange((Addr), (Len)); \
+	Xil_L1DCacheInvalidateRange((Addr), (Len));
+
+
+/****************************************************************************/
+/**
+*
+* @brief    Flush the entire Data cache.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_DCacheFlush() \
+	Xil_L2CacheFlush(); \
+	Xil_L1DCacheFlush();
+
+/****************************************************************************/
+/**
+* @brief     Flush the Data cache for the given address range.
+*            If the bytes specified by the address (adr) are cached by the
+*            Data cache, the cacheline containing that byte is invalidated.
+*            If the cacheline is modified (dirty), the written to system
+*            memory first before the before the line is invalidated.
+*
+* @param	Addr: Start address of range to be flushed.
+* @param	Len: Length of range to be flushed in bytes.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_DCacheFlushRange(Addr, Len) \
+	Xil_L2CacheFlushRange((Addr), (Len)); \
+	Xil_L1DCacheFlushRange((Addr), (Len));
+
+
+/****************************************************************************/
+/**
+* @brief    Invalidate the entire instruction cache.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_ICacheInvalidate() \
+	Xil_L2CacheInvalidate(); \
+	Xil_L1ICacheInvalidate();
+
+
+/****************************************************************************/
+/**
+* @brief     Invalidate the instruction cache for the given address range.
+*            If the bytes specified by the address (adr) are cached by the
+*            Data cache, the cacheline containing that byte is invalidated.
+*            If the cacheline is modified (dirty), the modified contents are
+*            lost and are NOT written to system memory before the line is
+*            invalidated.
+*
+* @param	Addr: Start address of ragne to be invalidated.
+* @param	Len: Length of range to be invalidated in bytes.
+*
+* @return	None.
+*
+****************************************************************************/
+#define Xil_ICacheInvalidateRange(Addr, Len) \
+	Xil_L2CacheInvalidateRange((Addr), (Len)); \
+	Xil_L1ICacheInvalidateRange((Addr), (Len));
+
+void Xil_DCacheDisable(void);
+void Xil_ICacheDisable(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+#endif
+/**
+* @} End of "addtogroup microblaze_cache_apis".
+*/
diff --git a/bsps/include/xil/sleep.h b/bsps/include/xil/sleep.h
new file mode 100644
index 0000000000..73b2ea026d
--- /dev/null
+++ b/bsps/include/xil/sleep.h
@@ -0,0 +1,99 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2022 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+
+/*****************************************************************************/
+/**
+* @file sleep.h
+*
+*  This header file contains ARM Cortex A53,A9,R5,Microblaze specific sleep
+*  related APIs.
+*
+* <pre>
+* MODIFICATION HISTORY :
+*
+* Ver   Who  Date	 Changes
+* ----- ---- -------- -------------------------------------------------------
+* 6.6   srm  11/02/17 Added processor specific sleep routines
+*								 function prototypes.
+* 7.7	sk   01/10/22 Typecast sleep declaration argument from unsigned int to
+* 		      u32 to fix misra_c_2012_directive_4_6 violation.
+* 7.7	sk   01/10/22 Modify the return type of sleep_R5 and usleep_R5 from
+* 		      unsigned to void to fix misra_c_2012_rule_17_7 violation.
+* 7.7	sk   03/02/22 Update usleep_R5 and usleep parameter types to fix misra_
+*		      c_2012_directive_4_6 violation.
+*
+* </pre>
+*
+******************************************************************************/
+
+#ifndef SLEEP_H
+#define SLEEP_H
+
+#include "xil_types.h"
+#include "xil_io.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*****************************************************************************/
+/**
+*
+* This macro polls an address periodically until a condition is met or till the
+* timeout occurs.
+* The minimum timeout for calling this macro is 100us. If the timeout is less
+* than 100us, it still waits for 100us. Also the unit for the timeout is 100us.
+* If the timeout is not a multiple of 100us, it waits for a timeout of
+* the next usec value which is a multiple of 100us.
+*
+* @param            IO_func - accessor function to read the register contents.
+*                   Depends on the register width.
+* @param            ADDR - Address to be polled
+* @param            VALUE - variable to read the value
+* @param            COND - Condition to checked (usually involves VALUE)
+* @param            TIMEOUT_US - timeout in micro seconds
+*
+* @return           0 - when the condition is met
+*                   -1 - when the condition is not met till the timeout period
+*
+* @note             none
+*
+*****************************************************************************/
+#define Xil_poll_timeout(IO_func, ADDR, VALUE, COND, TIMEOUT_US) \
+ ( {	  \
+	u64 timeout = TIMEOUT_US/100;    \
+	if(TIMEOUT_US%100!=0)	\
+		timeout++;   \
+	for(;;) { \
+		VALUE = IO_func(ADDR); \
+		if(COND) \
+			break; \
+		else {    \
+			usleep(100);  \
+			timeout--; \
+			if(timeout==0) \
+			break;  \
+		}  \
+	}    \
+	(timeout>0) ? 0 : -1;  \
+ }  )
+
+void usleep(ULONG useconds);
+void sleep(u32 seconds);
+void usleep_R5(ULONG useconds);
+void sleep_R5(u32 seconds);
+int usleep_MB(unsigned long useconds);
+unsigned sleep_MB(unsigned int seconds);
+int usleep_A53(unsigned long useconds);
+unsigned sleep_A53(unsigned int seconds);
+int usleep_A9(unsigned long useconds);
+unsigned sleep_A9(unsigned int seconds);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/bsps/include/xil/xbasic_types.h b/bsps/include/xil/xbasic_types.h
new file mode 100644
index 0000000000..99b137503e
--- /dev/null
+++ b/bsps/include/xil/xbasic_types.h
@@ -0,0 +1,113 @@
+/******************************************************************************
+* Copyright (c) 2010 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xbasic_types.h
+*
+*
+* @note  Dummy File for backwards compatibility
+*
+
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a adk   1/31/14  Added in bsp common folder for backward compatibility
+* 7.0   aru   01/21/19 Modified the typedef of u32,u16,u8
+* 7.0 	aru   02/06/19 Included stdint.h and stddef.h
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XBASIC_TYPES_H	/* prevent circular inclusions */
+#define XBASIC_TYPES_H	/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stddef.h>
+
+/** @name Legacy types
+ * Deprecated legacy types.
+ * @{
+ */
+typedef uint8_t	Xuint8;		/**< unsigned 8-bit */
+typedef char		Xint8;		/**< signed 8-bit */
+typedef uint16_t	Xuint16;	/**< unsigned 16-bit */
+typedef short		Xint16;		/**< signed 16-bit */
+typedef uint32_t	Xuint32;	/**< unsigned 32-bit */
+typedef long		Xint32;		/**< signed 32-bit */
+typedef float		Xfloat32;	/**< 32-bit floating point */
+typedef double		Xfloat64;	/**< 64-bit double precision FP */
+typedef unsigned long	Xboolean;	/**< boolean (XTRUE or XFALSE) */
+
+#if !defined __XUINT64__
+typedef struct
+{
+	Xuint32 Upper;
+	Xuint32 Lower;
+} Xuint64;
+#endif
+
+/** @name New types
+ * New simple types.
+ * @{
+ */
+#ifndef __KERNEL__
+#ifndef XIL_TYPES_H
+typedef Xuint32         u32;
+typedef Xuint16         u16;
+typedef Xuint8          u8;
+#endif
+#else
+#include <linux/types.h>
+#endif
+
+#ifndef TRUE
+#  define TRUE		1U
+#endif
+
+#ifndef FALSE
+#  define FALSE		0U
+#endif
+
+#ifndef NULL
+#define NULL		0U
+#endif
+
+/*
+ * Xilinx NULL, TRUE and FALSE legacy support. Deprecated.
+ * Please use NULL, TRUE and FALSE
+ */
+#define XNULL		NULL
+#define XTRUE		TRUE
+#define XFALSE		FALSE
+
+/*
+ * This file is deprecated and users
+ * should use xil_types.h and xil_assert.h\n\r
+ */
+#warning  The xbasics_type.h file is deprecated and users should use xil_types.h and xil_assert.
+#warning  Please refer the Standalone BSP UG647 for further details
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* end of protection macro */
+
+/**
+ *@endcond
+ */
diff --git a/bsps/include/xil/xil_assert.h b/bsps/include/xil/xil_assert.h
new file mode 100644
index 0000000000..e8b87b59f2
--- /dev/null
+++ b/bsps/include/xil/xil_assert.h
@@ -0,0 +1,176 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.h
+*
+* @addtogroup common_assert_apis Assert APIs and Macros
+*
+* The xil_assert.h file contains assert related functions and macros.
+* Assert APIs/Macros specifies that a application program satisfies certain
+* conditions at particular points in its execution. These function can be
+* used by application programs to ensure that, application code is satisfying
+* certain conditions.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 First release
+* 6.0   kvn  05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XIL_ASSERT_H	/* prevent circular inclusions */
+#define XIL_ASSERT_H	/* by using protection macros */
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+
+/************************** Constant Definitions *****************************/
+
+#define XIL_ASSERT_NONE     0U
+#define XIL_ASSERT_OCCURRED 1U
+#define XNULL NULL
+
+extern u32 Xil_AssertStatus;
+extern s32 Xil_AssertWait;
+extern void Xil_Assert(const char8 *File, s32 Line);
+/**
+ *@endcond
+ */
+void XNullHandler(void *NullParameter);
+
+/**
+ * This data type defines a callback to be invoked when an
+ * assert occurs. The callback is invoked only when asserts are enabled
+ */
+typedef void (*Xil_AssertCallback) (const char8 *File, s32 Line);
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+#ifndef NDEBUG
+
+/*****************************************************************************/
+/**
+* @brief    This assert macro is to be used for void functions. This in
+*           conjunction with the Xil_AssertWait boolean can be used to
+*           accommodate tests so that asserts which fail allow execution to
+*           continue.
+*
+* @param    Expression: expression to be evaluated. If it evaluates to
+*           false, the assert occurs.
+*
+* @return   Returns void unless the Xil_AssertWait variable is true, in which
+*           case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertVoid(Expression)                \
+{                                                  \
+    if (Expression) {                              \
+        Xil_AssertStatus = XIL_ASSERT_NONE;       \
+    } else {                                       \
+        Xil_Assert(__FILE__, __LINE__);            \
+        Xil_AssertStatus = XIL_ASSERT_OCCURRED;   \
+        return;                                    \
+    }                                              \
+}
+
+/*****************************************************************************/
+/**
+* @brief    This assert macro is to be used for functions that do return a
+*           value. This in conjunction with the Xil_AssertWait boolean can be
+*           used to accommodate tests so that asserts which fail allow execution
+*           to continue.
+*
+* @param    Expression: expression to be evaluated. If it evaluates to false,
+*           the assert occurs.
+*
+* @return   Returns 0 unless the Xil_AssertWait variable is true, in which
+* 	        case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertNonvoid(Expression)             \
+{                                                  \
+    if (Expression) {                              \
+        Xil_AssertStatus = XIL_ASSERT_NONE;       \
+    } else {                                       \
+        Xil_Assert(__FILE__, __LINE__);            \
+        Xil_AssertStatus = XIL_ASSERT_OCCURRED;   \
+        return 0;                                  \
+    }                                              \
+}
+
+/*****************************************************************************/
+/**
+* @brief     Always assert. This assert macro is to be used for void functions.
+*            Use for instances where an assert should always occur.
+*
+* @return    Returns void unless the Xil_AssertWait variable is true, in which
+*	         case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertVoidAlways()                   \
+{                                                  \
+   Xil_Assert(__FILE__, __LINE__);                 \
+   Xil_AssertStatus = XIL_ASSERT_OCCURRED;        \
+   return;                                         \
+}
+
+/*****************************************************************************/
+/**
+* @brief   Always assert. This assert macro is to be used for functions that
+*          do return a value. Use for instances where an assert should always
+*          occur.
+*
+* @return Returns void unless the Xil_AssertWait variable is true, in which
+*	      case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertNonvoidAlways()                \
+{                                                  \
+   Xil_Assert(__FILE__, __LINE__);                 \
+   Xil_AssertStatus = XIL_ASSERT_OCCURRED;        \
+   return 0;                                       \
+}
+
+
+#else
+
+#define Xil_AssertVoid(Expression)
+#define Xil_AssertVoidAlways()
+#define Xil_AssertNonvoid(Expression)
+#define Xil_AssertNonvoidAlways()
+
+#endif
+
+/************************** Function Prototypes ******************************/
+
+void Xil_AssertSetCallback(Xil_AssertCallback Routine);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* end of protection macro */
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/bsps/include/xil/xil_exception.h b/bsps/include/xil/xil_exception.h
new file mode 100644
index 0000000000..55a8df3add
--- /dev/null
+++ b/bsps/include/xil/xil_exception.h
@@ -0,0 +1 @@
+/* Intentional blank stub file for Xilinx driver compatibility. */
diff --git a/bsps/include/xil/xil_io.h b/bsps/include/xil/xil_io.h
new file mode 100644
index 0000000000..853ef6bc76
--- /dev/null
+++ b/bsps/include/xil/xil_io.h
@@ -0,0 +1,412 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_io.h
+*
+* @addtogroup common_io_interfacing_apis Register IO interfacing APIs
+*
+* The xil_io.h file contains the interface for the general I/O component, which
+* encapsulates the Input/Output functions for the processors that do not
+* require any special I/O handling.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.00 	pkp  	 05/29/14 First release
+* 6.00  mus      08/19/16 Remove checking of __LITTLE_ENDIAN__ flag for
+*                         ARM processors
+* 7.20  har      01/03/20 Added Xil_SecureOut32 for avoiding blindwrite for
+*                         CR-1049218
+* 7.30  kpt      09/21/20 Moved Xil_EndianSwap16 and Xil_EndianSwap32 to
+*                         xil_io.h and made them as static inline
+*       am       10/13/20 Changed the return type of Xil_SecureOut32 function
+*                         from u32 to int
+* 7.50  dp       02/12/21 Fix compilation error in Xil_EndianSwap32() that occur
+*                         when -Werror=conversion compiler flag is enabled
+* 7.5   mus      05/17/21 Update the functions with comments. It fixes CR#1067739.
+*
+* </pre>
+******************************************************************************/
+
+#ifndef XIL_IO_H           /* prevent circular inclusions */
+#define XIL_IO_H           /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_printf.h"
+#include "xstatus.h"
+
+#if defined (__MICROBLAZE__)
+#include "mb_interface.h"
+#else
+#include "xpseudo_asm.h"
+#endif
+
+/************************** Function Prototypes ******************************/
+#ifdef ENABLE_SAFETY
+extern u32 XStl_RegUpdate(u32 RegAddr, u32 RegVal);
+#endif
+
+/***************** Macros (Inline Functions) Definitions *********************/
+#if defined __GNUC__
+#if defined (__MICROBLAZE__)
+#  define INST_SYNC		mbar(0)
+#  define DATA_SYNC		mbar(1)
+# else
+#  define SYNCHRONIZE_IO	dmb()
+#  define INST_SYNC		isb()
+#  define DATA_SYNC		dsb()
+# endif
+#else
+# define SYNCHRONIZE_IO
+# define INST_SYNC
+# define DATA_SYNC
+# define INST_SYNC
+# define DATA_SYNC
+#endif
+
+#if defined (__GNUC__) || defined (__ICCARM__) || defined (__MICROBLAZE__)
+#define INLINE inline
+#else
+#define INLINE __inline
+#endif
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by reading
+*           from the specified address and returning the 8 bit Value read from
+*            that address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 8 bit Value read from the specified input address.
+
+*
+******************************************************************************/
+static INLINE u8 Xil_In8(UINTPTR Addr)
+{
+	return *(volatile u8 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by reading from
+*           the specified address and returning the 16 bit Value read from that
+*           address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 16 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u16 Xil_In16(UINTPTR Addr)
+{
+	return *(volatile u16 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by
+*           reading from the specified address and returning the 32 bit Value
+*           read  from that address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 32 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u32 Xil_In32(UINTPTR Addr)
+{
+	return *(volatile u32 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief     Performs an input operation for a memory location by reading the
+*            64 bit Value read  from that address.
+*
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 64 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u64 Xil_In64(UINTPTR Addr)
+{
+	return *(volatile u64 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for an memory location by
+*           writing the 8 bit Value to the the specified address.
+*
+* @param	Addr: contains the address to perform the output operation
+* @param	Value: contains the 8 bit Value to be written at the specified
+*           address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out8(UINTPTR Addr, u8 Value)
+{
+	/* write 8 bit value to specified address */
+	volatile u8 *LocalAddr = (volatile u8 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*            16 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains the Value to be written at the specified address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out16(UINTPTR Addr, u16 Value)
+{
+	/* write 16 bit value to specified address */
+	volatile u16 *LocalAddr = (volatile u16 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*           32 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains the 32 bit Value to be written at the specified
+*           address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out32(UINTPTR Addr, u32 Value)
+{
+	/* write 32 bit value to specified address */
+#ifndef ENABLE_SAFETY
+	volatile u32 *LocalAddr = (volatile u32 *)Addr;
+	*LocalAddr = Value;
+#else
+	XStl_RegUpdate(Addr, Value);
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*           64 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains 64 bit Value to be written at the specified address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out64(UINTPTR Addr, u64 Value)
+{
+	/* write 64 bit value to specified address */
+	volatile u64 *LocalAddr = (volatile u64 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+ *
+ * @brief	Performs an output operation for a memory location by writing the
+ *       	32 bit Value to the the specified address and then reading it
+ *       	back to verify the value written in the register.
+ *
+ * @param	Addr contains the address to perform the output operation
+ * @param	Value contains 32 bit Value to be written at the specified address
+ *
+ * @return	Returns Status
+ *        	- XST_SUCCESS on success
+ *        	- XST_FAILURE on failure
+ *
+ *****************************************************************************/
+static INLINE int Xil_SecureOut32(UINTPTR Addr, u32 Value)
+{
+	int Status = XST_FAILURE;
+	u32 ReadReg;
+	u32 ReadRegTemp;
+
+	/* writing 32 bit value to specified address */
+	Xil_Out32(Addr, Value);
+
+	/* verify value written to specified address with multiple reads */
+	ReadReg = Xil_In32(Addr);
+	ReadRegTemp = Xil_In32(Addr);
+
+	if( (ReadReg == Value) && (ReadRegTemp == Value) ) {
+		Status = XST_SUCCESS;
+	}
+
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Perform a 16-bit endian conversion.
+*
+* @param	Data: 16 bit value to be converted
+*
+* @return	16 bit Data with converted endianness
+*
+******************************************************************************/
+static INLINE __attribute__((always_inline)) u16 Xil_EndianSwap16(u16 Data)
+{
+	return (u16) (((Data & 0xFF00U) >> 8U) | ((Data & 0x00FFU) << 8U));
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Perform a 32-bit endian conversion.
+*
+* @param	Data: 32 bit value to be converted
+*
+* @return	32 bit data with converted endianness
+*
+******************************************************************************/
+static INLINE __attribute__((always_inline)) u32 Xil_EndianSwap32(u32 Data)
+{
+	u16 LoWord;
+	u16 HiWord;
+
+	/* get each of the half words from the 32 bit word */
+
+	LoWord = (u16) (Data & 0x0000FFFFU);
+	HiWord = (u16) ((Data & 0xFFFF0000U) >> 16U);
+
+	/* byte swap each of the 16 bit half words */
+
+	LoWord = (u16)(((LoWord & 0xFF00U) >> 8U) | ((LoWord & 0x00FFU) << 8U));
+	HiWord = (u16)(((HiWord & 0xFF00U) >> 8U) | ((HiWord & 0x00FFU) << 8U));
+
+	/* swap the half words before returning the value */
+
+	return ((((u32)LoWord) << (u32)16U) | (u32)HiWord);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+# define Xil_In16LE	Xil_In16
+# define Xil_In32LE	Xil_In32
+# define Xil_Out16LE	Xil_Out16
+# define Xil_Out32LE	Xil_Out32
+# define Xil_Htons	Xil_EndianSwap16
+# define Xil_Htonl	Xil_EndianSwap32
+# define Xil_Ntohs	Xil_EndianSwap16
+# define Xil_Ntohl	Xil_EndianSwap32
+# else
+# define Xil_In16BE	Xil_In16
+# define Xil_In32BE	Xil_In32
+# define Xil_Out16BE	Xil_Out16
+# define Xil_Out32BE	Xil_Out32
+# define Xil_Htons(Data) (Data)
+# define Xil_Htonl(Data) (Data)
+# define Xil_Ntohs(Data) (Data)
+# define Xil_Ntohl(Data) (Data)
+#endif
+#else
+# define Xil_In16LE	Xil_In16
+# define Xil_In32LE	Xil_In32
+# define Xil_Out16LE	Xil_Out16
+# define Xil_Out32LE	Xil_Out32
+# define Xil_Htons	Xil_EndianSwap16
+# define Xil_Htonl	Xil_EndianSwap32
+# define Xil_Ntohs	Xil_EndianSwap16
+# define Xil_Ntohl	Xil_EndianSwap32
+#endif
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE u16 Xil_In16BE(UINTPTR Addr)
+#else
+static INLINE u16 Xil_In16LE(UINTPTR Addr)
+#endif
+#else
+static INLINE u16 Xil_In16BE(UINTPTR Addr)
+#endif
+{
+	u16 value = Xil_In16(Addr);
+	return Xil_EndianSwap16(value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE u32 Xil_In32BE(UINTPTR Addr)
+#else
+static INLINE u32 Xil_In32LE(UINTPTR Addr)
+#endif
+#else
+static INLINE u32 Xil_In32BE(UINTPTR Addr)
+#endif
+{
+	u32 value = Xil_In32(Addr);
+	return Xil_EndianSwap32(value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
+#else
+static INLINE void Xil_Out16LE(UINTPTR Addr, u16 Value)
+#endif
+#else
+static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
+#endif
+{
+	Value = Xil_EndianSwap16(Value);
+	Xil_Out16(Addr, Value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
+#else
+static INLINE void Xil_Out32LE(UINTPTR Addr, u32 Value)
+#endif
+#else
+static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
+#endif
+{
+	Value = Xil_EndianSwap32(Value);
+	Xil_Out32(Addr, Value);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+/**
+* @} End of "addtogroup common_io_interfacing_apis".
+*/
diff --git a/bsps/include/xil/xil_mem.h b/bsps/include/xil/xil_mem.h
new file mode 100644
index 0000000000..d6bc637a94
--- /dev/null
+++ b/bsps/include/xil/xil_mem.h
@@ -0,0 +1,47 @@
+/******************************************************************************/
+/**
+* Copyright (c) 2015 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/****************************************************************************/
+/**
+* @file xil_mem.h
+*
+* @addtogroup common_mem_operation_api Customized APIs for Memory Operations
+*
+* The xil_mem.h file contains prototype for functions related
+* to memory operations. These APIs are applicable for all processors supported
+* by Xilinx.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 6.1   nsk      11/07/16 First release.
+* 7.0   mus      01/07/19 Add cpp extern macro
+*
+* </pre>
+*
+*****************************************************************************/
+#ifndef XIL_MEM_H		/* prevent circular inclusions */
+#define XIL_MEM_H		/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Function Prototypes *****************************/
+
+void Xil_MemCpy(void* dst, const void* src, u32 cnt);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* XIL_MEM_H */
+/**
+* @} End of "addtogroup common_mem_operation_api".
+*/
\ No newline at end of file
diff --git a/bsps/include/xil/xil_printf.h b/bsps/include/xil/xil_printf.h
new file mode 100644
index 0000000000..462b7c50db
--- /dev/null
+++ b/bsps/include/xil/xil_printf.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2022 On-Line Applications Research Corporation (OAR)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XIL_PRINTF_H
+#define XIL_PRINTF_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+
+#define xil_printf(args...) printf(args)
+#define print(args...) printf(args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* XIL_PRINTF_H */
diff --git a/bsps/include/xil/xil_smc.h b/bsps/include/xil/xil_smc.h
new file mode 100644
index 0000000000..55a8df3add
--- /dev/null
+++ b/bsps/include/xil/xil_smc.h
@@ -0,0 +1 @@
+/* Intentional blank stub file for Xilinx driver compatibility. */
diff --git a/bsps/include/xil/xil_types.h b/bsps/include/xil/xil_types.h
new file mode 100644
index 0000000000..1d18bfbbca
--- /dev/null
+++ b/bsps/include/xil/xil_types.h
@@ -0,0 +1,203 @@
+/******************************************************************************
+* Copyright (c) 2010 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_types.h
+*
+* @addtogroup common_types Basic Data types for Xilinx® Software IP
+*
+* The xil_types.h file contains basic types for Xilinx software IP. These data types
+* are applicable for all processors supported by Xilinx.
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 First release
+* 3.03a sdm  05/30/11 Added Xuint64 typedef and XUINT64_MSW/XUINT64_LSW macros
+* 5.00 	pkp  05/29/14 Made changes for 64 bit architecture
+*	srt  07/14/14 Use standard definitions from stdint.h and stddef.h
+*		      Define LONG and ULONG datatypes and mask values
+* 7.00  mus  01/07/19 Add cpp extern macro
+* 7.1   aru  08/19/19 Shift the value in UPPER_32_BITS only if it
+*                     is 64-bit processor
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+#ifndef XIL_TYPES_H	/* prevent circular inclusions */
+#define XIL_TYPES_H	/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stddef.h>
+
+/************************** Constant Definitions *****************************/
+
+#ifndef TRUE
+#  define TRUE		1U
+#endif
+
+#ifndef FALSE
+#  define FALSE		0U
+#endif
+
+#ifndef NULL
+#define NULL		0U
+#endif
+
+#define XIL_COMPONENT_IS_READY     0x11111111U  /**< In device drivers, This macro will be
+                                                 assigend to "IsReady" member of driver
+												 instance to indicate that driver
+												 instance is initialized and ready to use. */
+#define XIL_COMPONENT_IS_STARTED   0x22222222U  /**< In device drivers, This macro will be assigend to
+                                                 "IsStarted" member of driver instance
+												 to indicate that driver instance is
+												 started and it can be enabled. */
+
+/* @name New types
+ * New simple types.
+ * @{
+ */
+#ifndef __KERNEL__
+#ifndef XBASIC_TYPES_H
+/*
+ * guarded against xbasic_types.h.
+ */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+/** @}*/
+#define __XUINT64__
+typedef struct
+{
+	u32 Upper;
+	u32 Lower;
+} Xuint64;
+
+
+/*****************************************************************************/
+/**
+* @brief    Return the most significant half of the 64 bit data type.
+*
+* @param    x is the 64 bit word.
+*
+* @return   The upper 32 bits of the 64 bit word.
+*
+******************************************************************************/
+#define XUINT64_MSW(x) ((x).Upper)
+
+/*****************************************************************************/
+/**
+* @brief    Return the least significant half of the 64 bit data type.
+*
+* @param    x is the 64 bit word.
+*
+* @return   The lower 32 bits of the 64 bit word.
+*
+******************************************************************************/
+#define XUINT64_LSW(x) ((x).Lower)
+
+#endif /* XBASIC_TYPES_H */
+
+/*
+ * xbasic_types.h does not typedef s* or u64
+ */
+/** @{ */
+typedef char char8;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+typedef int sint32;
+
+typedef intptr_t INTPTR;
+typedef uintptr_t UINTPTR;
+typedef ptrdiff_t PTRDIFF;
+/** @}*/
+#if !defined(LONG) || !defined(ULONG)
+typedef long LONG;
+typedef unsigned long ULONG;
+#endif
+
+#define ULONG64_HI_MASK	0xFFFFFFFF00000000U
+#define ULONG64_LO_MASK	~ULONG64_HI_MASK
+
+#else
+#include <linux/types.h>
+#endif
+
+/** @{ */
+/**
+ * This data type defines an interrupt handler for a device.
+ * The argument points to the instance of the component
+ */
+typedef void (*XInterruptHandler) (void *InstancePtr);
+
+/**
+ * This data type defines an exception handler for a processor.
+ * The argument points to the instance of the component
+ */
+typedef void (*XExceptionHandler) (void *InstancePtr);
+
+/**
+ * @brief  Returns 32-63 bits of a number.
+ * @param  n : Number being accessed.
+ * @return Bits 32-63 of number.
+ *
+ * @note    A basic shift-right of a 64- or 32-bit quantity.
+ *          Use this to suppress the "right shift count >= width of type"
+ *          warning when that quantity is 32-bits.
+ */
+#if defined (__aarch64__) || defined (__arch64__)
+#define UPPER_32_BITS(n) ((u32)(((n) >> 16) >> 16))
+#else
+#define UPPER_32_BITS(n) 0U
+#endif
+/**
+ * @brief  Returns 0-31 bits of a number
+ * @param  n : Number being accessed.
+ * @return Bits 0-31 of number
+ */
+#define LOWER_32_BITS(n) ((u32)(n))
+
+
+
+
+/************************** Constant Definitions *****************************/
+
+#ifndef TRUE
+#define TRUE		1U
+#endif
+
+#ifndef FALSE
+#define FALSE		0U
+#endif
+
+#ifndef NULL
+#define NULL		0U
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* end of protection macro */
+/**
+ *@endcond
+ */
+/**
+* @} End of "addtogroup common_types".
+*/
diff --git a/bsps/include/xil/xparameters.h b/bsps/include/xil/xparameters.h
new file mode 100644
index 0000000000..b665810643
--- /dev/null
+++ b/bsps/include/xil/xparameters.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2022 On-Line Applications Research Corporation (OAR)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XPARAMETERS_H
+#define XPARAMETERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <bspopts.h>
+
+#define EL3 1
+#define EL1_NONSECURE 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* XIL_PRINTF_H */
diff --git a/bsps/include/xil/xpseudo_asm_gcc.h b/bsps/include/xil/xpseudo_asm_gcc.h
new file mode 100644
index 0000000000..d986349072
--- /dev/null
+++ b/bsps/include/xil/xpseudo_asm_gcc.h
@@ -0,0 +1,240 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xpseudo_asm_gcc.h
+*
+* This header file contains macros for using inline assembler code. It is
+* written specifically for the GNU compiler.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.00 	pkp		 05/21/14 First release
+* 6.0   mus      07/27/16 Consolidated file for a53,a9 and r5 processors
+* 7.2   asa      04/03/20 Renamed the str macro to strw.
+* 7.2   dp       04/30/20 Added clobber "cc" to mtcpsr for aarch32 processors
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XPSEUDO_ASM_GCC_H  /* prevent circular inclusions */
+#define XPSEUDO_ASM_GCC_H  /* by using protection macros */
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/************************** Constant Definitions ****************************/
+
+/**************************** Type Definitions ******************************/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/* necessary for pre-processor */
+#define stringify(s)	tostring(s)
+#define tostring(s)	#s
+
+#if defined (__aarch64__)
+/* pseudo assembler instructions */
+#define mfcpsr()	({u32 rval = 0U; \
+			   asm volatile("mrs %0,  DAIF" : "=r" (rval));\
+			  rval;\
+			 })
+
+#define mtcpsr(v) __asm__ __volatile__ ("msr DAIF, %0" : : "r" (v))
+
+#define cpsiei()	//__asm__ __volatile__("cpsie	i\n")
+#define cpsidi()	//__asm__ __volatile__("cpsid	i\n")
+
+#define cpsief()	//__asm__ __volatile__("cpsie	f\n")
+#define cpsidf()	//__asm__ __volatile__("cpsid	f\n")
+
+
+
+#define mtgpr(rn, v)	/*__asm__ __volatile__(\
+			  "mov r" stringify(rn) ", %0 \n"\
+			  : : "r" (v)\
+			)*/
+
+#define mfgpr(rn)	/*({u32 rval; \
+			  __asm__ __volatile__(\
+			    "mov %0,r" stringify(rn) "\n"\
+			    : "=r" (rval)\
+			  );\
+			  rval;\
+			 })*/
+
+/* memory synchronization operations */
+
+/* Instruction Synchronization Barrier */
+#define isb() __asm__ __volatile__ ("isb sy")
+
+/* Data Synchronization Barrier */
+#define dsb() __asm__ __volatile__("dsb sy")
+
+/* Data Memory Barrier */
+#define dmb() __asm__ __volatile__("dmb sy")
+
+
+/* Memory Operations */
+#define ldr(adr)	({u64 rval; \
+			  __asm__ __volatile__(\
+			    "ldr	%0,[%1]"\
+			    : "=r" (rval) : "r" (adr)\
+			  );\
+			  rval;\
+			 })
+
+#define mfelrel3() ({u64 rval = 0U; \
+                   asm volatile("mrs %0,  ELR_EL3" : "=r" (rval));\
+                  rval;\
+                 })
+
+#define mtelrel3(v) __asm__ __volatile__ ("msr ELR_EL3, %0" : : "r" (v))
+
+#else
+
+/* pseudo assembler instructions */
+#define mfcpsr()	({u32 rval = 0U; \
+			  __asm__ __volatile__(\
+			    "mrs	%0, cpsr\n"\
+			    : "=r" (rval)\
+			  );\
+			  rval;\
+			 })
+
+#define mtcpsr(v)	__asm__ __volatile__(\
+			  "msr	cpsr,%0\n"\
+			  : : "r" (v) : "cc" \
+			)
+
+#define cpsiei()	__asm__ __volatile__("cpsie	i\n")
+#define cpsidi()	__asm__ __volatile__("cpsid	i\n")
+
+#define cpsief()	__asm__ __volatile__("cpsie	f\n")
+#define cpsidf()	__asm__ __volatile__("cpsid	f\n")
+
+
+
+#define mtgpr(rn, v)	__asm__ __volatile__(\
+			  "mov r" stringify(rn) ", %0 \n"\
+			  : : "r" (v)\
+			)
+
+#define mfgpr(rn)	({u32 rval; \
+			  __asm__ __volatile__(\
+			    "mov %0,r" stringify(rn) "\n"\
+			    : "=r" (rval)\
+			  );\
+			  rval;\
+			 })
+
+/* memory synchronization operations */
+
+/* Instruction Synchronization Barrier */
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+
+/* Data Synchronization Barrier */
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+
+/* Data Memory Barrier */
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+
+/* Memory Operations */
+#define ldr(adr)	({u32 rval; \
+			  __asm__ __volatile__(\
+			    "ldr	%0,[%1]"\
+			    : "=r" (rval) : "r" (adr)\
+			  );\
+			  rval;\
+			 })
+
+#endif
+
+#define ldrb(adr)	({u8 rval; \
+			  __asm__ __volatile__(\
+			    "ldrb	%0,[%1]"\
+			    : "=r" (rval) : "r" (adr)\
+			  );\
+			  rval;\
+			 })
+
+#define strw(adr, val)	__asm__ __volatile__(\
+			  "str	%0,[%1]\n"\
+			  : : "r" (val), "r" (adr)\
+			)
+
+#define strb(adr, val)	__asm__ __volatile__(\
+			  "strb	%0,[%1]\n"\
+			  : : "r" (val), "r" (adr)\
+			)
+
+/* Count leading zeroes (clz) */
+#define clz(arg)	({u8 rval; \
+			  __asm__ __volatile__(\
+			    "clz	%0,%1"\
+			    : "=r" (rval) : "r" (arg)\
+			  );\
+			  rval;\
+			 })
+
+#if defined (__aarch64__)
+#define mtcpdc(reg,val)	__asm__ __volatile__("dc " #reg ",%0"  : : "r" (val))
+#define mtcpic(reg,val)	__asm__ __volatile__("ic " #reg ",%0"  : : "r" (val))
+
+#define mtcpicall(reg)	__asm__ __volatile__("ic " #reg)
+#define mtcptlbi(reg)	__asm__ __volatile__("tlbi " #reg)
+#define mtcpat(reg,val)	__asm__ __volatile__("at " #reg ",%0"  : : "r" (val))
+/* CP15 operations */
+#define mfcp(reg)	({u64 rval = 0U;\
+			__asm__ __volatile__("mrs	%0, " #reg : "=r" (rval));\
+			rval;\
+			})
+
+#define mtcp(reg,val)	__asm__ __volatile__("msr " #reg ",%0"  : : "r" (val))
+
+#else
+/* CP15 operations */
+#define mtcp(rn, v)	__asm__ __volatile__(\
+			 "mcr " rn "\n"\
+			 : : "r" (v)\
+			);
+
+#define mfcp(rn)	({u32 rval = 0U; \
+			 __asm__ __volatile__(\
+			   "mrc " rn "\n"\
+			   : "=r" (rval)\
+			 );\
+			 rval;\
+			 })
+#endif
+
+/************************** Variable Definitions ****************************/
+
+/************************** Function Prototypes *****************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+/**
+ *@endcond
+ */
+
+#endif /* XPSEUDO_ASM_GCC_H */
diff --git a/bsps/include/xil/xstatus.h b/bsps/include/xil/xstatus.h
new file mode 100644
index 0000000000..1e9e6fbffc
--- /dev/null
+++ b/bsps/include/xil/xstatus.h
@@ -0,0 +1,522 @@
+/******************************************************************************
+* Copyright (c) 2002 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xstatus.h
+*
+* @addtogroup common_status_codes Xilinx software status codes
+*
+* The xstatus.h file contains the Xilinx software status codes.These codes are
+* used throughout the Xilinx device drivers.
+*
+* @{
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XSTATUS_H		/* prevent circular inclusions */
+#define XSTATUS_H		/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/*********************** Common statuses 0 - 500 *****************************/
+/**
+ at name Common Status Codes for All Device Drivers
+@{
+*/
+#define XST_SUCCESS                     0L
+#define XST_FAILURE                     1L
+#define XST_DEVICE_NOT_FOUND            2L
+#define XST_DEVICE_BLOCK_NOT_FOUND      3L
+#define XST_INVALID_VERSION             4L
+#define XST_DEVICE_IS_STARTED           5L
+#define XST_DEVICE_IS_STOPPED           6L
+#define XST_FIFO_ERROR                  7L	/*!< An error occurred during an
+						   operation with a FIFO such as
+						   an underrun or overrun, this
+						   error requires the device to
+						   be reset */
+#define XST_RESET_ERROR                 8L	/*!< An error occurred which requires
+						   the device to be reset */
+#define XST_DMA_ERROR                   9L	/*!< A DMA error occurred, this error
+						   typically requires the device
+						   using the DMA to be reset */
+#define XST_NOT_POLLED                  10L	/*!< The device is not configured for
+						   polled mode operation */
+#define XST_FIFO_NO_ROOM                11L	/*!< A FIFO did not have room to put
+						   the specified data into */
+#define XST_BUFFER_TOO_SMALL            12L	/*!< The buffer is not large enough
+						   to hold the expected data */
+#define XST_NO_DATA                     13L	/*!< There was no data available */
+#define XST_REGISTER_ERROR              14L	/*!< A register did not contain the
+						   expected value */
+#define XST_INVALID_PARAM               15L	/*!< An invalid parameter was passed
+						   into the function */
+#define XST_NOT_SGDMA                   16L	/*!< The device is not configured for
+						   scatter-gather DMA operation */
+#define XST_LOOPBACK_ERROR              17L	/*!< A loopback test failed */
+#define XST_NO_CALLBACK                 18L	/*!< A callback has not yet been
+						   registered */
+#define XST_NO_FEATURE                  19L	/*!< Device is not configured with
+						   the requested feature */
+#define XST_NOT_INTERRUPT               20L	/*!< Device is not configured for
+						   interrupt mode operation */
+#define XST_DEVICE_BUSY                 21L	/*!< Device is busy */
+#define XST_ERROR_COUNT_MAX             22L	/*!< The error counters of a device
+						   have maxed out */
+#define XST_IS_STARTED                  23L	/*!< Used when part of device is
+						   already started i.e.
+						   sub channel */
+#define XST_IS_STOPPED                  24L	/*!< Used when part of device is
+						   already stopped i.e.
+						   sub channel */
+#define XST_DATA_LOST                   26L	/*!< Driver defined error */
+#define XST_RECV_ERROR                  27L	/*!< Generic receive error */
+#define XST_SEND_ERROR                  28L	/*!< Generic transmit error */
+#define XST_NOT_ENABLED                 29L	/*!< A requested service is not
+						   available because it has not
+						   been enabled */
+#define XST_NO_ACCESS			30L	/* Generic access error */
+#define XST_TIMEOUT                     31L	/*!< Event timeout occurred */
+
+/** @} */
+/***************** Utility Component statuses 401 - 500  *********************/
+/**
+ at name Utility Component Status Codes 401 - 500
+@{
+*/
+#define XST_MEMTEST_FAILED              401L	/*!< Memory test failed */
+
+/** @} */
+/***************** Common Components statuses 501 - 1000 *********************/
+/**
+ at name Packet Fifo Status Codes 501 - 510
+@{
+*/
+/********************* Packet Fifo statuses 501 - 510 ************************/
+
+#define XST_PFIFO_LACK_OF_DATA          501L	/*!< Not enough data in FIFO   */
+#define XST_PFIFO_NO_ROOM               502L	/*!< Not enough room in FIFO   */
+#define XST_PFIFO_BAD_REG_VALUE         503L	/*!< Self test, a register value
+						   was invalid after reset */
+#define XST_PFIFO_ERROR                 504L	/*!< Generic packet FIFO error */
+#define XST_PFIFO_DEADLOCK              505L	/*!< Packet FIFO is reporting
+						 * empty and full simultaneously
+						 */
+/** @} */
+/**
+ at name DMA Status Codes 511 - 530
+@{
+*/
+/************************** DMA statuses 511 - 530 ***************************/
+
+#define XST_DMA_TRANSFER_ERROR          511L	/*!< Self test, DMA transfer
+						   failed */
+#define XST_DMA_RESET_REGISTER_ERROR    512L	/*!< Self test, a register value
+						   was invalid after reset */
+#define XST_DMA_SG_LIST_EMPTY           513L	/*!< Scatter gather list contains
+						   no buffer descriptors ready
+						   to be processed */
+#define XST_DMA_SG_IS_STARTED           514L	/*!< Scatter gather not stopped */
+#define XST_DMA_SG_IS_STOPPED           515L	/*!< Scatter gather not running */
+#define XST_DMA_SG_LIST_FULL            517L	/*!< All the buffer descriptors of
+						   the scatter gather list are
+						   being used */
+#define XST_DMA_SG_BD_LOCKED            518L	/*!< The scatter gather buffer
+						   descriptor which is to be
+						   copied over in the scatter
+						   list is locked */
+#define XST_DMA_SG_NOTHING_TO_COMMIT    519L	/*!< No buffer descriptors have been
+						   put into the scatter gather
+						   list to be committed */
+#define XST_DMA_SG_COUNT_EXCEEDED       521L	/*!< The packet count threshold
+						   specified was larger than the
+						   total # of buffer descriptors
+						   in the scatter gather list */
+#define XST_DMA_SG_LIST_EXISTS          522L	/*!< The scatter gather list has
+						   already been created */
+#define XST_DMA_SG_NO_LIST              523L	/*!< No scatter gather list has
+						   been created */
+#define XST_DMA_SG_BD_NOT_COMMITTED     524L	/*!< The buffer descriptor which was
+						   being started was not committed
+						   to the list */
+#define XST_DMA_SG_NO_DATA              525L	/*!< The buffer descriptor to start
+						   has already been used by the
+						   hardware so it can't be reused
+						 */
+#define XST_DMA_SG_LIST_ERROR           526L	/*!< General purpose list access
+						   error */
+#define XST_DMA_BD_ERROR                527L	/*!< General buffer descriptor
+						   error */
+/** @} */
+/**
+ at name IPIF Status Codes Codes 531 - 550
+@{
+*/
+/************************** IPIF statuses 531 - 550 ***************************/
+
+#define XST_IPIF_REG_WIDTH_ERROR        531L	/*!< An invalid register width
+						   was passed into the function */
+#define XST_IPIF_RESET_REGISTER_ERROR   532L	/*!< The value of a register at
+						   reset was not valid */
+#define XST_IPIF_DEVICE_STATUS_ERROR    533L	/*!< A write to the device interrupt
+						   status register did not read
+						   back correctly */
+#define XST_IPIF_DEVICE_ACK_ERROR       534L	/*!< The device interrupt status
+						   register did not reset when
+						   acked */
+#define XST_IPIF_DEVICE_ENABLE_ERROR    535L	/*!< The device interrupt enable
+						   register was not updated when
+						   other registers changed */
+#define XST_IPIF_IP_STATUS_ERROR        536L	/*!< A write to the IP interrupt
+						   status register did not read
+						   back correctly */
+#define XST_IPIF_IP_ACK_ERROR           537L	/*!< The IP interrupt status register
+						   did not reset when acked */
+#define XST_IPIF_IP_ENABLE_ERROR        538L	/*!< IP interrupt enable register was
+						   not updated correctly when other
+						   registers changed */
+#define XST_IPIF_DEVICE_PENDING_ERROR   539L	/*!< The device interrupt pending
+						   register did not indicate the
+						   expected value */
+#define XST_IPIF_DEVICE_ID_ERROR        540L	/*!< The device interrupt ID register
+						   did not indicate the expected
+						   value */
+#define XST_IPIF_ERROR                  541L	/*!< Generic ipif error */
+/** @} */
+
+/****************** Device specific statuses 1001 - 4095 *********************/
+/**
+ at name Ethernet Status Codes 1001 - 1050
+@{
+*/
+/********************* Ethernet statuses 1001 - 1050 *************************/
+
+#define XST_EMAC_MEMORY_SIZE_ERROR  1001L	/*!< Memory space is not big enough
+						 * to hold the minimum number of
+						 * buffers or descriptors */
+#define XST_EMAC_MEMORY_ALLOC_ERROR 1002L	/*!< Memory allocation failed */
+#define XST_EMAC_MII_READ_ERROR     1003L	/*!< MII read error */
+#define XST_EMAC_MII_BUSY           1004L	/*!< An MII operation is in progress */
+#define XST_EMAC_OUT_OF_BUFFERS     1005L	/*!< Driver is out of buffers */
+#define XST_EMAC_PARSE_ERROR        1006L	/*!< Invalid driver init string */
+#define XST_EMAC_COLLISION_ERROR    1007L	/*!< Excess deferral or late
+						 * collision on polled send */
+/** @} */
+/**
+ at name UART Status Codes 1051 - 1075
+@{
+*/
+/*********************** UART statuses 1051 - 1075 ***************************/
+#define XST_UART
+
+#define XST_UART_INIT_ERROR         1051L
+#define XST_UART_START_ERROR        1052L
+#define XST_UART_CONFIG_ERROR       1053L
+#define XST_UART_TEST_FAIL          1054L
+#define XST_UART_BAUD_ERROR         1055L
+#define XST_UART_BAUD_RANGE         1056L
+
+/** @} */
+/**
+ at name IIC Status Codes 1076 - 1100
+@{
+*/
+/************************ IIC statuses 1076 - 1100 ***************************/
+
+#define XST_IIC_SELFTEST_FAILED         1076	/*!< self test failed            */
+#define XST_IIC_BUS_BUSY                1077	/*!< bus found busy              */
+#define XST_IIC_GENERAL_CALL_ADDRESS    1078	/*!< mastersend attempted with   */
+					     /* general call address        */
+#define XST_IIC_STAND_REG_RESET_ERROR   1079	/*!< A non parameterizable reg   */
+					     /* value after reset not valid */
+#define XST_IIC_TX_FIFO_REG_RESET_ERROR 1080	/*!< Tx fifo included in design  */
+					     /* value after reset not valid */
+#define XST_IIC_RX_FIFO_REG_RESET_ERROR 1081	/*!< Rx fifo included in design  */
+					     /* value after reset not valid */
+#define XST_IIC_TBA_REG_RESET_ERROR     1082	/*!< 10 bit addr incl in design  */
+					     /* value after reset not valid */
+#define XST_IIC_CR_READBACK_ERROR       1083	/*!< Read of the control register */
+					     /* didn't return value written */
+#define XST_IIC_DTR_READBACK_ERROR      1084	/*!< Read of the data Tx reg     */
+					     /* didn't return value written */
+#define XST_IIC_DRR_READBACK_ERROR      1085	/*!< Read of the data Receive reg */
+					     /* didn't return value written */
+#define XST_IIC_ADR_READBACK_ERROR      1086	/*!< Read of the data Tx reg     */
+					     /* didn't return value written */
+#define XST_IIC_TBA_READBACK_ERROR      1087	/*!< Read of the 10 bit addr reg */
+					     /* didn't return written value */
+#define XST_IIC_NOT_SLAVE               1088	/*!< The device isn't a slave    */
+#define XST_IIC_ARB_LOST 				1089 	/*!< Arbitration lost for master	*/
+/** @} */
+/**
+ at name ATMC Status Codes 1101 - 1125
+@{
+*/
+/*********************** ATMC statuses 1101 - 1125 ***************************/
+
+#define XST_ATMC_ERROR_COUNT_MAX    1101L	/*!< the error counters in the ATM
+						   controller hit the max value
+						   which requires the statistics
+						   to be cleared */
+/** @} */
+/**
+ at name Flash Status Codes 1126 - 1150
+@{
+*/
+/*********************** Flash statuses 1126 - 1150 **************************/
+
+#define XST_FLASH_BUSY                1126L	/*!< Flash is erasing or programming
+						 */
+#define XST_FLASH_READY               1127L	/*!< Flash is ready for commands */
+#define XST_FLASH_ERROR               1128L	/*!< Flash had detected an internal
+						   error. Use XFlash_DeviceControl
+						   to retrieve device specific codes
+						 */
+#define XST_FLASH_ERASE_SUSPENDED     1129L	/*!< Flash is in suspended erase state
+						 */
+#define XST_FLASH_WRITE_SUSPENDED     1130L	/*!< Flash is in suspended write state
+						 */
+#define XST_FLASH_PART_NOT_SUPPORTED  1131L	/*!< Flash type not supported by
+						   driver */
+#define XST_FLASH_NOT_SUPPORTED       1132L	/*!< Operation not supported */
+#define XST_FLASH_TOO_MANY_REGIONS    1133L	/*!< Too many erase regions */
+#define XST_FLASH_TIMEOUT_ERROR       1134L	/*!< Programming or erase operation
+						   aborted due to a timeout */
+#define XST_FLASH_ADDRESS_ERROR       1135L	/*!< Accessed flash outside its
+						   addressible range */
+#define XST_FLASH_ALIGNMENT_ERROR     1136L	/*!< Write alignment error */
+#define XST_FLASH_BLOCKING_CALL_ERROR 1137L	/*!< Couldn't return immediately from
+						   write/erase function with
+						   XFL_NON_BLOCKING_WRITE/ERASE
+						   option cleared */
+#define XST_FLASH_CFI_QUERY_ERROR     1138L	/*!< Failed to query the device */
+/** @} */
+/**
+ at name SPI Status Codes 1151 - 1175
+@{
+*/
+/*********************** SPI statuses 1151 - 1175 ****************************/
+
+#define XST_SPI_MODE_FAULT          1151	/*!< master was selected as slave */
+#define XST_SPI_TRANSFER_DONE       1152	/*!< data transfer is complete */
+#define XST_SPI_TRANSMIT_UNDERRUN   1153	/*!< slave underruns transmit register */
+#define XST_SPI_RECEIVE_OVERRUN     1154	/*!< device overruns receive register */
+#define XST_SPI_NO_SLAVE            1155	/*!< no slave has been selected yet */
+#define XST_SPI_TOO_MANY_SLAVES     1156	/*!< more than one slave is being
+						 * selected */
+#define XST_SPI_NOT_MASTER          1157	/*!< operation is valid only as master */
+#define XST_SPI_SLAVE_ONLY          1158	/*!< device is configured as slave-only
+						 */
+#define XST_SPI_SLAVE_MODE_FAULT    1159	/*!< slave was selected while disabled */
+#define XST_SPI_SLAVE_MODE          1160	/*!< device has been addressed as slave */
+#define XST_SPI_RECEIVE_NOT_EMPTY   1161	/*!< device received data in slave mode */
+
+#define XST_SPI_COMMAND_ERROR       1162	/*!< unrecognised command - qspi only */
+#define XST_SPI_POLL_DONE           1163        /*!< controller completed polling the
+						   device for status */
+/** @} */
+/**
+ at name OPB Arbiter Status Codes 1176 - 1200
+@{
+*/
+/********************** OPB Arbiter statuses 1176 - 1200 *********************/
+
+#define XST_OPBARB_INVALID_PRIORITY  1176	/*!< the priority registers have either
+						 * one master assigned to two or more
+						 * priorities, or one master not
+						 * assigned to any priority
+						 */
+#define XST_OPBARB_NOT_SUSPENDED     1177	/*!< an attempt was made to modify the
+						 * priority levels without first
+						 * suspending the use of priority
+						 * levels
+						 */
+#define XST_OPBARB_PARK_NOT_ENABLED  1178	/*!< bus parking by id was enabled but
+						 * bus parking was not enabled
+						 */
+#define XST_OPBARB_NOT_FIXED_PRIORITY 1179	/*!< the arbiter must be in fixed
+						 * priority mode to allow the
+						 * priorities to be changed
+						 */
+/** @} */
+/**
+ at name INTC Status Codes 1201 - 1225
+@{
+*/
+/************************ Intc statuses 1201 - 1225 **************************/
+
+#define XST_INTC_FAIL_SELFTEST      1201	/*!< self test failed */
+#define XST_INTC_CONNECT_ERROR      1202	/*!< interrupt already in use */
+/** @} */
+/**
+ at name TmrCtr Status Codes 1226 - 1250
+@{
+*/
+/********************** TmrCtr statuses 1226 - 1250 **************************/
+
+#define XST_TMRCTR_TIMER_FAILED     1226	/*!< self test failed */
+/** @} */
+/**
+ at name WdtTb Status Codes 1251 - 1275
+@{
+*/
+/********************** WdtTb statuses 1251 - 1275 ***************************/
+
+#define XST_WDTTB_TIMER_FAILED      1251L
+/** @} */
+/**
+ at name PlbArb status Codes 1276 - 1300
+@{
+*/
+/********************** PlbArb statuses 1276 - 1300 **************************/
+
+#define XST_PLBARB_FAIL_SELFTEST    1276L
+/** @} */
+/**
+ at name Plb2Opb Status Codes 1301 - 1325
+@{
+*/
+/********************** Plb2Opb statuses 1301 - 1325 *************************/
+
+#define XST_PLB2OPB_FAIL_SELFTEST   1301L
+/** @} */
+/**
+ at name Opb2Plb Status 1326 - 1350
+@{
+*/
+/********************** Opb2Plb statuses 1326 - 1350 *************************/
+
+#define XST_OPB2PLB_FAIL_SELFTEST   1326L
+/** @} */
+/**
+ at name SysAce Status Codes 1351 - 1360
+@{
+*/
+/********************** SysAce statuses 1351 - 1360 **************************/
+
+#define XST_SYSACE_NO_LOCK          1351L	/*!< No MPU lock has been granted */
+/** @} */
+/**
+ at name PCI Bridge Status Codes 1361 - 1375
+@{
+*/
+/********************** PCI Bridge statuses 1361 - 1375 **********************/
+
+#define XST_PCI_INVALID_ADDRESS     1361L
+/** @} */
+/**
+ at name FlexRay Constants 1400 - 1409
+@{
+*/
+/********************** FlexRay constants 1400 - 1409 *************************/
+
+#define XST_FR_TX_ERROR			1400
+#define XST_FR_TX_BUSY			1401
+#define XST_FR_BUF_LOCKED		1402
+#define XST_FR_NO_BUF			1403
+/** @} */
+/**
+ at name USB constants 1410 - 1420
+@{
+*/
+/****************** USB constants 1410 - 1420  *******************************/
+
+#define XST_USB_ALREADY_CONFIGURED	1410
+#define XST_USB_BUF_ALIGN_ERROR		1411
+#define XST_USB_NO_DESC_AVAILABLE	1412
+#define XST_USB_BUF_TOO_BIG		1413
+#define XST_USB_NO_BUF			1414
+/** @} */
+/**
+ at name HWICAP constants 1421 - 1429
+@{
+*/
+/****************** HWICAP constants 1421 - 1429  *****************************/
+
+#define XST_HWICAP_WRITE_DONE		1421
+
+/** @} */
+/**
+ at name AXI VDMA constants 1430 - 1440
+@{
+*/
+/****************** AXI VDMA constants 1430 - 1440  *****************************/
+
+#define XST_VDMA_MISMATCH_ERROR		1430
+/** @} */
+/**
+ at name NAND Flash Status Codes 1441 - 1459
+@{
+*/
+/*********************** NAND Flash statuses 1441 - 1459  *********************/
+
+#define XST_NAND_BUSY			1441L	/*!< Flash is erasing or
+						 * programming
+						 */
+#define XST_NAND_READY			1442L	/*!< Flash is ready for commands
+						 */
+#define XST_NAND_ERROR			1443L	/*!< Flash had detected an
+						 * internal error.
+						 */
+#define XST_NAND_PART_NOT_SUPPORTED	1444L	/*!< Flash type not supported by
+						 * driver
+						 */
+#define XST_NAND_OPT_NOT_SUPPORTED	1445L	/*!< Operation not supported
+						 */
+#define XST_NAND_TIMEOUT_ERROR		1446L	/*!< Programming or erase
+						 * operation aborted due to a
+						 * timeout
+						 */
+#define XST_NAND_ADDRESS_ERROR		1447L	/*!< Accessed flash outside its
+						 * addressible range
+						 */
+#define XST_NAND_ALIGNMENT_ERROR	1448L	/*!< Write alignment error
+						 */
+#define XST_NAND_PARAM_PAGE_ERROR	1449L	/*!< Failed to read parameter
+						 * page of the device
+						 */
+#define XST_NAND_CACHE_ERROR		1450L	/*!< Flash page buffer error
+						 */
+
+#define XST_NAND_WRITE_PROTECTED	1451L	/*!< Flash is write protected
+						 */
+/** @} */
+
+/**************************** Type Definitions *******************************/
+
+typedef s32 XStatus;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+
+/**
+ *@endcond
+ */
+
+/**
+* @} End of "addtogroup common_status_codes".
+*/
diff --git a/bsps/shared/xil/VERSION b/bsps/shared/xil/VERSION
new file mode 100644
index 0000000000..d94f574255
--- /dev/null
+++ b/bsps/shared/xil/VERSION
@@ -0,0 +1,20 @@
+The information in this file describes the source of files in
+bsps/shared/xil/ and bsps/include/xil/.
+
+Import from:
+
+https://github.com/Xilinx/embeddedsw.git
+
+commit 8a89579489c88ea5acd23d7d439ac928659c26cf
+Author:     msreeram <manikanta.sreeram at xilinx.com>
+AuthorDate: Wed Apr 6 23:24:38 2022 -0600
+Commit:     Siva Addepalli <sivaprasad.addepalli at xilinx.com>
+CommitDate: Fri Apr 8 16:47:15 2022 +0530
+
+    update license file for EmbeddedSW 2022.1 release
+
+    Update license file for EmbeddedSW 2022.1 release
+
+    Signed-off-by: Manikanta Sreeram <msreeram at xilinx.com>
+
+    Acked-by : Meena Paleti <meena.paleti at xilinx.com>
diff --git a/bsps/shared/xil/xil_assert.c b/bsps/shared/xil/xil_assert.c
new file mode 100644
index 0000000000..b3dd7e9718
--- /dev/null
+++ b/bsps/shared/xil/xil_assert.c
@@ -0,0 +1,126 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.c
+* @addtogroup common_assert_apis Assert APIs and Macros
+* @{
+*
+* This file contains basic assert related functions for Xilinx software IP.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 Initial release
+* 6.0   kvn  05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Variable Definitions *****************************/
+
+/**
+ * @brief This variable allows testing to be done easier with asserts. An assert
+ * sets this variable such that a driver can evaluate this variable
+ * to determine if an assert occurred.
+ */
+u32 Xil_AssertStatus;
+
+/**
+ * @brief This variable allows the assert functionality to be changed for testing
+ * such that it does not wait infinitely. Use the debugger to disable the
+ * waiting during testing of asserts.
+ */
+s32 Xil_AssertWait = 1;
+
+/* The callback function to be invoked when an assert is taken */
+static Xil_AssertCallback Xil_AssertCallbackRoutine = NULL;
+
+/************************** Function Prototypes ******************************/
+
+/*****************************************************************************/
+/**
+*
+* @brief    Implement assert. Currently, it calls a user-defined callback
+*           function if one has been set.  Then, it potentially enters an
+*           infinite loop depending on the value of the Xil_AssertWait
+*           variable.
+*
+* @param    File: filename of the source
+* @param    Line: linenumber within File
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+void Xil_Assert(const char8 *File, s32 Line)
+{
+	/* if the callback has been set then invoke it */
+	if (Xil_AssertCallbackRoutine != 0) {
+		(*Xil_AssertCallbackRoutine)(File, Line);
+	}
+
+	/* if specified, wait indefinitely such that the assert will show up
+	 * in testing
+	 */
+	while (Xil_AssertWait != 0) {
+	}
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Set up a callback function to be invoked when an assert occurs.
+*           If a callback is already installed, then it will be replaced.
+*
+* @param    Routine: callback to be invoked when an assert is taken
+*
+* @return   None.
+*
+* @note     This function has no effect if NDEBUG is set
+*
+******************************************************************************/
+void Xil_AssertSetCallback(Xil_AssertCallback Routine)
+{
+	Xil_AssertCallbackRoutine = Routine;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Null handler function. This follows the XInterruptHandler
+*           signature for interrupt handlers. It can be used to assign a null
+*           handler (a stub) to an interrupt controller vector table.
+*
+* @param    NullParameter: arbitrary void pointer and not used.
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+void XNullHandler(void *NullParameter)
+{
+	(void) NullParameter;
+}
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/bsps/shared/xil/xil_cache.c b/bsps/shared/xil/xil_cache.c
new file mode 100644
index 0000000000..aef64b310a
--- /dev/null
+++ b/bsps/shared/xil/xil_cache.c
@@ -0,0 +1,732 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.c
+*
+* Contains required functions for the ARM cache functionality.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver    Who Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.0 	pkp  05/29/14 First release
+* 5.5	pkp	 04/15/16 Updated the Xil_DCacheInvalidate,
+*					  Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+*					  functions description for proper explanation
+* 6.2   pkp	 01/22/17 Added support for EL1 non-secure
+* 6.2   asa  01/31/17 The existing Xil_DCacheDisable API first flushes the
+*					  D caches and then disables it. The problem with that is,
+*					  potentially there will be a small window after the cache
+*					  flush operation and before the we disable D caches where
+*					  we might have valid data in cache lines. In such a
+*					  scenario disabling the D cache can lead to unknown behavior.
+*					  The ideal solution to this is to use assembly code for
+*					  the complete API and avoid any memory accesses. But with
+*					  that we will end up having a huge amount on assembly code
+*					  which is not maintainable. Changes are done to use a mix
+*					  of assembly and C code. All local variables are put in
+*					  registers. Also function calls are avoided in the API to
+*					  avoid using stack memory.
+*					  These changes fix CR#966220.
+* 6.2  mus  02/13/17  The new api Xil_ConfigureL1Prefetch is added to disable pre-fetching/configure
+*                     the maximum number of outstanding data prefetches allowed in
+*                     L1 cache system.It fixes CR#967864.
+* 6.6  mus  02/27/18  Updated Xil_DCacheInvalidateRange and
+*					  Xil_ICacheInvalidateRange APIs to change the data type of
+*					  "cacheline" variable as "INTPTR", This change has been done
+*					  to avoid the truncation of upper DDR addresses to 32 bit.It
+*					  fixes CR#995581.
+* 6.6  mus  03/15/18  By default CPUACTLR_EL1 is accessible only from EL3, it
+*					  results into abort if accessed from EL1 non secure privilege
+*					  level. Updated Xil_ConfigureL1Prefetch function to access
+*					  CPUACTLR_EL1 only for EL3.
+* 6.8  mn   08/01/18  Optimize the Xil_DCacheInvalidateRange() function to remove
+*                     redundant operations
+* 6.8  asa  09/15/18  Fix bug in the Xil_DCacheInvalidateRange API introduced while
+*                     making optimizations in the previous patch. This change fixes
+*                     CR-1008926.
+* 7.0 mus  10/12/18  Updated Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
+*                    APIs to replace IVAC instruction with CIVAC. So that, these
+*                    APIs will always do flush + invalidate in case of Cortexa53 as
+*                    well as Cortexa72 processor.
+* 7.1 mus  09/17/19  Xil_DCacheFlushRange and Xil_DCacheInvalidateRange are executing
+*                    same functionality (clean + validate). Removed
+*                    Xil_DCacheFlushRange function implementation and defined it as
+*                    macro. Xil_DCacheFlushRange macro points to the
+*                    Xil_DCacheInvalidateRange API to avoid code duplication.
+*
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_cache.h"
+#include "xil_io.h"
+#include "xpseudo_asm.h"
+#include "xparameters.h"
+#include "xreg_cortexa53.h"
+#include "xil_exception.h"
+#include "bspconfig.h"
+
+/************************** Function Prototypes ******************************/
+
+/************************** Variable Definitions *****************************/
+#define IRQ_FIQ_MASK 0xC0U	/* Mask IRQ and FIQ interrupts in cpsr */
+
+/****************************************************************************/
+/**
+* @brief	Enable the Data cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_DCacheEnable(void)
+{
+	u32 CtrlReg;
+
+	if (EL3 == 1) {
+		CtrlReg = mfcp(SCTLR_EL3);
+	} else if (EL1_NONSECURE == 1) {
+		CtrlReg = mfcp(SCTLR_EL1);
+	} else {
+		CtrlReg = 0U;
+	}
+
+	/* enable caches only if they are disabled */
+	if((CtrlReg & XREG_CONTROL_DCACHE_BIT) == 0X00000000U){
+
+		/* invalidate the Data cache */
+		Xil_DCacheInvalidate();
+
+		CtrlReg |= XREG_CONTROL_DCACHE_BIT;
+
+		if (EL3 == 1) {
+			/* enable the Data cache for el3*/
+			mtcp(SCTLR_EL3,CtrlReg);
+		} else if (EL1_NONSECURE == 1) {
+			/* enable the Data cache for el1*/
+			mtcp(SCTLR_EL1,CtrlReg);
+		}
+	}
+}
+
+/****************************************************************************/
+/**
+* @brief	Disable the Data cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_DCacheDisable(void)
+{
+	register u32 CsidReg;
+	register u32 C7Reg;
+	register u32 LineSize;
+	register u32 NumWays;
+	register u32 Way;
+	register u32 WayIndex;
+	register u32 WayAdjust;
+	register u32 Set;
+	register u32 SetIndex;
+	register u32 NumSet;
+	register u32 CacheLevel;
+
+	dsb();
+	asm(
+	"mov 	x0, #0\n\t"
+#if EL3==1
+	"mrs	x0, sctlr_el3 \n\t"
+	"and	w0, w0, #0xfffffffb\n\t"
+	"msr	sctlr_el3, x0\n\t"
+#elif EL1_NONSECURE==1
+	"mrs	x0, sctlr_el1 \n\t"
+	"and	w0, w0, #0xfffffffb\n\t"
+	"msr	sctlr_el1, x0\n\t"
+#endif
+	"dsb sy\n\t"
+	);
+
+	/* Number of level of cache*/
+	CacheLevel = 0U;
+	/* Select cache level 0 and D cache in CSSR */
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+	LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0x00000001U;
+
+	/*Number of Set*/
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0x00000001U;
+
+	WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Flush all the cachelines */
+	for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(CISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set = 0U;
+		Way += (0x00000001U << WayAdjust);
+	}
+
+	/* Wait for Flush to complete */
+	dsb();
+
+	/* Select cache level 1 and D cache in CSSR */
+	CacheLevel += (0x00000001U << 1U);
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+	LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0x00000001U;
+
+	/* Number of Sets */
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0x00000001U;
+
+	WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Flush all the cachelines */
+	for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(CISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set=0U;
+		Way += (0x00000001U<<WayAdjust);
+	}
+	/* Wait for Flush to complete */
+	dsb();
+
+	asm(
+#if EL3==1
+		"tlbi 	ALLE3\n\t"
+#elif EL1_NONSECURE==1
+		"tlbi 	VMALLE1\n\t"
+#endif
+		"dsb sy\r\n"
+		"isb\n\t"
+	);
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate the Data cache. The contents present in the cache are
+* 			cleaned and invalidated.
+*
+* @return	None.
+*
+* @note		In Cortex-A53, functionality to simply invalid the cachelines
+*  			is not present. Such operations are a problem for an environment
+* 			that supports virtualisation. It would allow one OS to invalidate
+* 			a line belonging to another OS. This could lead to the other OS
+* 			crashing because of the loss of essential data. Hence, such
+* 			operations are promoted to clean and invalidate which avoids such
+*			corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidate(void)
+{
+	register u32 CsidReg, C7Reg;
+	u32 LineSize, NumWays;
+	u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+	u32 currmask;
+
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+	/* Number of level of cache*/
+
+	CacheLevel=0U;
+	/* Select cache level 0 and D cache in CSSR */
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+	LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0X00000001U;
+
+	/*Number of Set*/
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0X00000001U;
+
+	WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Invalidate all the cachelines */
+	for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(ISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set = 0U;
+		Way += (0x00000001U << WayAdjust);
+	}
+
+	/* Wait for invalidate to complete */
+	dsb();
+
+	/* Select cache level 1 and D cache in CSSR */
+	CacheLevel += (0x00000001U<<1U) ;
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+		LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0x00000001U;
+
+	/* Number of Sets */
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0x00000001U;
+
+	WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Invalidate all the cachelines */
+	for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(ISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set = 0U;
+		Way += (0x00000001U << WayAdjust);
+	}
+	/* Wait for invalidate to complete */
+	dsb();
+
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate a Data cache line. The cacheline is cleaned and
+*			invalidated.
+*
+* @param	adr: 64bit address of the data to be flushed.
+*
+* @return	None.
+*
+* @note		In Cortex-A53, functionality to simply invalid the cachelines
+*  			is not present. Such operations are a problem for an environment
+* 			that supports virtualisation. It would allow one OS to invalidate
+* 			a line belonging to another OS. This could lead to the other OS
+* 			crashing because of the loss of essential data. Hence, such
+* 			operations are promoted to clean and invalidate which avoids such
+*			corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateLine(INTPTR adr)
+{
+
+	u32 currmask;
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+
+	/* Select cache level 0 and D cache in CSSR */
+	mtcp(CSSELR_EL1,0x0);
+	mtcpdc(CIVAC,(adr & (~0x3F)));
+	/* Wait for invalidate to complete */
+	dsb();
+	/* Select cache level 1 and D cache in CSSR */
+	mtcp(CSSELR_EL1,0x2);
+	mtcpdc(IVAC,(adr & (~0x3F)));
+	/* Wait for invalidate to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate the Data cache for the given address range.
+* 			The cachelines present in the adderss range are cleaned and
+*			invalidated
+*
+* @param	adr: 64bit start address of the range to be invalidated.
+* @param	len: Length of the range to be invalidated in bytes.
+*
+* @return	None.
+*
+* @note		In Cortex-A53, functionality to simply invalid the cachelines
+*  			is not present. Such operations are a problem for an environment
+* 			that supports virtualisation. It would allow one OS to invalidate
+* 			a line belonging to another OS. This could lead to the other OS
+* 			crashing because of the loss of essential data. Hence, such
+* 			operations are promoted to clean and invalidate which avoids such
+*			corruption.
+*
+****************************************************************************/
+void Xil_DCacheInvalidateRange(INTPTR  adr, INTPTR len)
+{
+	const INTPTR cacheline = 64U;
+	INTPTR end = adr + len;
+	adr = adr & (~0x3F);
+	u32 currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+	if (len != 0U) {
+		while (adr < end) {
+			mtcpdc(CIVAC,adr);
+			adr += cacheline;
+		}
+	}
+	/* Wait for invalidate to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Flush the Data cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_DCacheFlush(void)
+{
+	register u32 CsidReg, C7Reg;
+	u32 LineSize, NumWays;
+	u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
+	u32 currmask;
+
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+
+
+	/* Number of level of cache*/
+
+	CacheLevel = 0U;
+	/* Select cache level 0 and D cache in CSSR */
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+	LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0x00000001U;
+
+	/*Number of Set*/
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0x00000001U;
+
+	WayAdjust = clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Flush all the cachelines */
+	for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(CISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set = 0U;
+		Way += (0x00000001U << WayAdjust);
+	}
+
+	/* Wait for Flush to complete */
+	dsb();
+
+	/* Select cache level 1 and D cache in CSSR */
+	CacheLevel += (0x00000001U << 1U);
+	mtcp(CSSELR_EL1,CacheLevel);
+	isb();
+
+	CsidReg = mfcp(CCSIDR_EL1);
+
+	/* Get the cacheline size, way size, index size from csidr */
+		LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
+
+	/* Number of Ways */
+	NumWays = (CsidReg & 0x00001FFFU) >> 3U;
+	NumWays += 0x00000001U;
+
+	/* Number of Sets */
+	NumSet = (CsidReg >> 13U) & 0x00007FFFU;
+	NumSet += 0x00000001U;
+
+	WayAdjust=clz(NumWays) - (u32)0x0000001FU;
+
+	Way = 0U;
+	Set = 0U;
+
+	/* Flush all the cachelines */
+	for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
+		for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
+			C7Reg = Way | Set | CacheLevel;
+			mtcpdc(CISW,C7Reg);
+			Set += (0x00000001U << LineSize);
+		}
+		Set=0U;
+		Way += (0x00000001U<<WayAdjust);
+	}
+	/* Wait for Flush to complete */
+	dsb();
+
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Flush a Data cache line. If the byte specified by the address (adr)
+* 			is cached by the Data cache, the cacheline containing that byte is
+*			invalidated. If the cacheline is modified (dirty), the entire
+*			contents of the cacheline are written to system memory before the
+* 			line is invalidated.
+*
+* @param	adr: 64bit address of the data to be flushed.
+*
+* @return	None.
+*
+* @note		The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_DCacheFlushLine(INTPTR  adr)
+{
+	u32 currmask;
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+	/* Select cache level 0 and D cache in CSSR */
+	mtcp(CSSELR_EL1,0x0);
+	mtcpdc(CIVAC,(adr & (~0x3F)));
+	/* Wait for flush to complete */
+	dsb();
+	/* Select cache level 1 and D cache in CSSR */
+	mtcp(CSSELR_EL1,0x2);
+	mtcpdc(CIVAC,(adr & (~0x3F)));
+	/* Wait for flush to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Enable the instruction cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_ICacheEnable(void)
+{
+	u32 CtrlReg;
+
+	if (EL3 == 1) {
+		CtrlReg = mfcp(SCTLR_EL3);
+	} else if (EL1_NONSECURE == 1) {
+		CtrlReg = mfcp(SCTLR_EL1);
+	} else {
+		CtrlReg = 0U;
+	}
+
+	/* enable caches only if they are disabled */
+	if((CtrlReg & XREG_CONTROL_ICACHE_BIT)==0x00000000U){
+		/* invalidate the instruction cache */
+		Xil_ICacheInvalidate();
+
+		CtrlReg |= XREG_CONTROL_ICACHE_BIT;
+
+		if (EL3 == 1) {
+			/* enable the instruction cache for el3*/
+			mtcp(SCTLR_EL3,CtrlReg);
+		} else if (EL1_NONSECURE == 1) {
+			/* enable the instruction cache for el1*/
+			mtcp(SCTLR_EL1,CtrlReg);
+		}
+	}
+}
+
+/****************************************************************************/
+/**
+* @brief	Disable the instruction cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_ICacheDisable(void)
+{
+	u32 CtrlReg;
+
+	if (EL3 == 1) {
+		CtrlReg = mfcp(SCTLR_EL3);
+	} else if (EL1_NONSECURE == 1) {
+		CtrlReg = mfcp(SCTLR_EL1);
+	} else {
+		CtrlReg = 0U;
+	}
+	/* invalidate the instruction cache */
+	Xil_ICacheInvalidate();
+	CtrlReg &= ~(XREG_CONTROL_ICACHE_BIT);
+
+	if (EL3 == 1) {
+		/* disable the instruction cache */
+		mtcp(SCTLR_EL3,CtrlReg);
+	} else if (EL1_NONSECURE == 1) {
+		/* disable the instruction cache */
+		mtcp(SCTLR_EL1,CtrlReg);
+	}
+
+
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate the entire instruction cache.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidate(void)
+{
+	unsigned int currmask;
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+	mtcp(CSSELR_EL1,0x1);
+	dsb();
+	/* invalidate the instruction cache */
+	mtcpicall(IALLU);
+	/* Wait for invalidate to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate an instruction cache line. If the instruction specified
+*			by the parameter adr is cached by the instruction cache, the
+*			cacheline containing that instruction is invalidated.
+*
+* @param	adr: 64bit address of the instruction to be invalidated.
+*
+* @return	None.
+*
+* @note		The bottom 6 bits are set to 0, forced by architecture.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateLine(INTPTR  adr)
+{
+	u32 currmask;
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+
+	mtcp(CSSELR_EL1,0x1);
+	/*Invalidate I Cache line*/
+	mtcpic(IVAU,adr & (~0x3F));
+	/* Wait for invalidate to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Invalidate the instruction cache for the given address range.
+* 			If the instructions specified by the address range are cached by
+* 			the instrunction cache, the cachelines containing those
+*			instructions are invalidated.
+*
+* @param	adr: 64bit start address of the range to be invalidated.
+* @param	len: Length of the range to be invalidated in bytes.
+*
+* @return	None.
+*
+****************************************************************************/
+void Xil_ICacheInvalidateRange(INTPTR  adr, INTPTR len)
+{
+	const INTPTR cacheline = 64U;
+	INTPTR end;
+	INTPTR tempadr = adr;
+	INTPTR tempend;
+	u32 currmask;
+	currmask = mfcpsr();
+	mtcpsr(currmask | IRQ_FIQ_MASK);
+
+	if (len != 0x00000000U) {
+		end = tempadr + len;
+		tempend = end;
+		tempadr &= ~(cacheline - 0x00000001U);
+
+		/* Select cache Level 0 I-cache in CSSR */
+		mtcp(CSSELR_EL1,0x1);
+		while (tempadr < tempend) {
+			/*Invalidate I Cache line*/
+			mtcpic(IVAU,adr & (~0x3F));
+
+			tempadr += cacheline;
+		}
+	}
+/* Wait for invalidate to complete */
+	dsb();
+	mtcpsr(currmask);
+}
+
+/****************************************************************************/
+/**
+* @brief	Configure the maximum number of outstanding data prefetches
+*               allowed in L1 cache.
+*
+* @param	num: maximum number of outstanding data prefetches allowed,
+*                    valid values are 0-7.
+*
+* @return	None.
+*
+* @note		This function is implemented only for EL3 privilege level.
+*
+*****************************************************************************/
+void Xil_ConfigureL1Prefetch (u8 num) {
+#if EL3
+       u64 val=0;
+
+       val= mfcp(S3_1_C15_C2_0 );
+       val &= ~(L1_DATA_PREFETCH_CONTROL_MASK);
+       val |=  (num << L1_DATA_PREFETCH_CONTROL_SHIFT);
+       mtcp(S3_1_C15_C2_0,val);
+#endif
+}
diff --git a/bsps/shared/xil/xil_mem.c b/bsps/shared/xil/xil_mem.c
new file mode 100644
index 0000000000..44e7d9a0c4
--- /dev/null
+++ b/bsps/shared/xil/xil_mem.c
@@ -0,0 +1,70 @@
+/******************************************************************************/
+/**
+* Copyright (c) 2015 - 2022 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/****************************************************************************/
+/**
+* @file xil_mem.c
+*
+* This file contains xil mem copy function to use in case of word aligned
+* data copies.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 6.1   nsk      11/07/16 First release.
+* 7.7	sk	 01/10/22 Update Xil_MemCpy functions variables typecast
+* 			  from int to s32 to fix misra_c_2012_directive_4_6
+* 			  violations.
+* 7.7	sk	 01/10/22 Include xil_mem.h header file to fix Xil_MemCpy
+* 			  prototype misra_c_2012_rule_8_4 violation.
+*
+* </pre>
+*
+*****************************************************************************/
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+#include "xil_mem.h"
+
+/***************** Inline Functions Definitions ********************/
+/*****************************************************************************/
+/**
+* @brief       This  function copies memory from once location to other.
+*
+* @param       dst: pointer pointing to destination memory
+*
+* @param       src: pointer pointing to source memory
+*
+* @param       cnt: 32 bit length of bytes to be copied
+*
+*****************************************************************************/
+void Xil_MemCpy(void* dst, const void* src, u32 cnt)
+{
+	char *d = (char*)(void *)dst;
+	const char *s = src;
+
+	while (cnt >= sizeof (s32)) {
+		*(s32*)d = *(s32*)s;
+		d += sizeof (s32);
+		s += sizeof (s32);
+		cnt -= sizeof (s32);
+	}
+	while (cnt >= sizeof (u16)) {
+		*(u16*)d = *(u16*)s;
+		d += sizeof (u16);
+		s += sizeof (u16);
+		cnt -= sizeof (u16);
+	}
+	while ((cnt) > 0U){
+		*d = *s;
+		d += 1U;
+		s += 1U;
+		cnt -= 1U;
+	}
+}
diff --git a/spec/build/bsps/objxilinxsupport.yml b/spec/build/bsps/objxilinxsupport.yml
new file mode 100644
index 0000000000..cf70d57c9b
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupport.yml
@@ -0,0 +1,45 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by: true
+includes:
+- bsps/include/xil/
+- bsps/include/xil/${XIL_SUPPORT_PATH}/
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/bspconfig.h
+  - bsps/include/xil/sleep.h
+  - bsps/include/xil/xbasic_types.h
+  - bsps/include/xil/xil_assert.h
+  - bsps/include/xil/xil_exception.h
+  - bsps/include/xil/xil_io.h
+  - bsps/include/xil/xil_mem.h
+  - bsps/include/xil/xil_printf.h
+  - bsps/include/xil/xil_smc.h
+  - bsps/include/xil/xil_types.h
+  - bsps/include/xil/xparameters.h
+  - bsps/include/xil/xpseudo_asm_gcc.h
+  - bsps/include/xil/xstatus.h
+links:
+- role: build-dependency
+  uid: objxilinxsupportmb
+- role: build-dependency
+  uid: objxilinxsupportr5
+- role: build-dependency
+  uid: objxilinxsupporta9
+- role: build-dependency
+  uid: objxilinxsupportilp32
+- role: build-dependency
+  uid: objxilinxsupportlp64
+- role: build-dependency
+  uid: optxilsupportpath
+source:
+- bsps/shared/xil/xil_cache.c
+- bsps/shared/xil/xil_assert.c
+- bsps/shared/xil/xil_mem.c
+type: build
diff --git a/spec/build/bsps/objxilinxsupporta9.yml b/spec/build/bsps/objxilinxsupporta9.yml
new file mode 100644
index 0000000000..291545bb69
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupporta9.yml
@@ -0,0 +1,20 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by:
+- bsps/arm/xilinx-zynq
+- bsps/arm/xilinx-zynqmp
+includes: []
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/arm/cortexa9/xil_cache.h
+  - bsps/include/xil/arm/cortexa9/xpseudo_asm.h
+  - bsps/include/xil/arm/cortexa9/xreg_cortexa9.h
+links: []
+source: []
+type: build
diff --git a/spec/build/bsps/objxilinxsupportilp32.yml b/spec/build/bsps/objxilinxsupportilp32.yml
new file mode 100644
index 0000000000..f82094d3db
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupportilp32.yml
@@ -0,0 +1,20 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by:
+- aarch64/xilinx_zynqmp_ilp32_qemu
+- aarch64/xilinx_zynqmp_ilp32_zu3eg
+includes: []
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/arm/ARMv8/32bit/xil_cache.h
+  - bsps/include/xil/arm/ARMv8/32bit/xpseudo_asm.h
+  - bsps/include/xil/arm/ARMv8/32bit/xreg_cortexa53.h
+links: []
+source: []
+type: build
diff --git a/spec/build/bsps/objxilinxsupportlp64.yml b/spec/build/bsps/objxilinxsupportlp64.yml
new file mode 100644
index 0000000000..b1aa5c86fc
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupportlp64.yml
@@ -0,0 +1,22 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by:
+- bsps/aarch64/xilinx_versal
+- aarch64/xilinx_zynqmp_lp64_cfc400x
+- aarch64/xilinx_zynqmp_lp64_qemu
+- aarch64/xilinx_zynqmp_lp64_zu3eg
+includes: []
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/arm/ARMv8/64bit/xil_cache.h
+  - bsps/include/xil/arm/ARMv8/64bit/xpseudo_asm.h
+  - bsps/include/xil/arm/ARMv8/64bit/xreg_cortexa53.h
+links: []
+source: []
+type: build
diff --git a/spec/build/bsps/objxilinxsupportmb.yml b/spec/build/bsps/objxilinxsupportmb.yml
new file mode 100644
index 0000000000..4efac3bd5f
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupportmb.yml
@@ -0,0 +1,17 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by:
+- microblaze
+includes: []
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/microblaze/xil_cache.h
+links: []
+source: []
+type: build
diff --git a/spec/build/bsps/objxilinxsupportr5.yml b/spec/build/bsps/objxilinxsupportr5.yml
new file mode 100644
index 0000000000..40bbcbd6bf
--- /dev/null
+++ b/spec/build/bsps/objxilinxsupportr5.yml
@@ -0,0 +1,18 @@
+SPDX-License-Identifier: CC-BY-SA-5.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by: false
+includes: []
+install:
+- destination: ${BSP_INCLUDEDIR}
+  source:
+  - bsps/include/xil/arm/cortexr5/xil_cache.h
+  - bsps/include/xil/arm/cortexr5/xpseudo_asm.h
+  - bsps/include/xil/arm/cortexr5/xreg_cortexr5.h
+links: []
+source: []
+type: build
diff --git a/spec/build/bsps/optxilsupportpath.yml b/spec/build/bsps/optxilsupportpath.yml
new file mode 100644
index 0000000000..5ddf3d1cd5
--- /dev/null
+++ b/spec/build/bsps/optxilsupportpath.yml
@@ -0,0 +1,34 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+actions:
+- get-string: null
+- env-assign: null
+build-type: option
+copyrights:
+- Copyright (C) 2022 On-Line Applications Research (OAR)
+default: null
+default-by-variant:
+- value: arm/cortexr5
+  variants: []
+- value: microblaze
+  variants:
+  - bsps/microblaze/microblaze_fpga
+- value: arm/cortexa9
+  variants:
+  - bsps/arm/xilinx_zynq
+  - bsps/arm/xilinx_zynqmp
+- value: arm/ARMv8/32bit
+  variants:
+  - aarch64/xilinx_zynqmp_ilp32_qemu
+  - aarch64/xilinx_zynqmp_ilp32_zu3eg
+- value: arm/ARMv8/64bit
+  variants:
+  - bsps/aarch64/xilinx_versal
+  - aarch64/xilinx_zynqmp_lp64_cfc400x
+  - aarch64/xilinx_zynqmp_lp64_qemu
+  - aarch64/xilinx_zynqmp_lp64_zu3eg
+description: 'Set the Xilinx support path'
+enabled-by: true
+format: '{}'
+links: []
+name: XIL_SUPPORT_PATH
+type: build



More information about the vc mailing list