[PATCH rtems-lwip v1 5/9] embeddedsw: Import Xilinx driver source

Kinsey Moore kinsey.moore at oarcorp.com
Fri Jul 1 22:31:08 UTC 2022


Import source for axi_ethernetlite, axi_ethernet, and CGEM. CGEM is also
known as emacps and ps7_ethernet. These sources were imported at
revision b9b64f53e11723c8df0dfda1c59742428b6f1df1.
---
 COPYING.embeddedsw                            |   29 +
 ORIGIN.embeddedsw                             |    2 +
 .../ports/xilinx/include/netif/xadapter.h     |   87 ++
 .../ports/xilinx/include/netif/xemacpsif.h    |  166 +++
 .../ports/xilinx/include/netif/xpqueue.h      |   54 +
 .../ports/xilinx/include/netif/xtopology.h    |   60 +
 .../src/contrib/ports/xilinx/netif/xadapter.c |  425 +++++++
 .../ports/xilinx/netif/xemac_ieee_reg.h       |  101 ++
 .../contrib/ports/xilinx/netif/xemacpsif.c    |  795 ++++++++++++
 .../ports/xilinx/netif/xemacpsif_dma.c        |  930 ++++++++++++++
 .../contrib/ports/xilinx/netif/xemacpsif_hw.c |  276 ++++
 .../ports/xilinx/netif/xemacpsif_physpeed.c   | 1037 +++++++++++++++
 .../src/contrib/ports/xilinx/netif/xpqueue.c  |   93 ++
 .../drivers/emacps/src/xemacps.c              |  487 +++++++
 .../drivers/emacps/src/xemacps.h              |  843 ++++++++++++
 .../drivers/emacps/src/xemacps_bd.h           |  762 +++++++++++
 .../drivers/emacps/src/xemacps_bdring.c       | 1076 ++++++++++++++++
 .../drivers/emacps/src/xemacps_bdring.h       |  215 ++++
 .../drivers/emacps/src/xemacps_control.c      | 1133 +++++++++++++++++
 .../drivers/emacps/src/xemacps_hw.h           |  646 ++++++++++
 .../drivers/emacps/src/xemacps_intr.c         |  242 ++++
 .../drivers/scugic/src/xscugic.h              |  601 +++++++++
 .../drivers/scugic/src/xscugic_hw.h           |  697 ++++++++++
 .../src/arm/ARMv8/64bit/xil_cache.h           |   75 ++
 .../standalone/src/arm/ARMv8/64bit/xil_mmu.h  |   94 ++
 .../standalone/src/arm/common/xil_exception.h |  408 ++++++
 .../bsp/standalone/src/common/xil_assert.c    |  126 ++
 .../bsp/standalone/src/common/xil_assert.h    |  176 +++
 .../lib/bsp/standalone/src/common/xil_io.h    |  412 ++++++
 .../lib/bsp/standalone/src/common/xil_types.h |  203 +++
 .../lib/bsp/standalone/src/common/xstatus.h   |  522 ++++++++
 31 files changed, 12773 insertions(+)
 create mode 100644 COPYING.embeddedsw
 create mode 100644 ORIGIN.embeddedsw
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xadapter.h
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xemacpsif.h
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xpqueue.h
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xtopology.h
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xadapter.c
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemac_ieee_reg.h
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif.c
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_dma.c
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_hw.c
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_physpeed.c
 create mode 100644 embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xpqueue.c
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.c
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.h
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bd.h
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.c
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.h
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_control.c
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_hw.h
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_intr.c
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic.h
 create mode 100644 embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic_hw.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_cache.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_mmu.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/arm/common/xil_exception.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/common/xil_assert.c
 create mode 100644 embeddedsw/lib/bsp/standalone/src/common/xil_assert.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/common/xil_io.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/common/xil_types.h
 create mode 100644 embeddedsw/lib/bsp/standalone/src/common/xstatus.h

diff --git a/COPYING.embeddedsw b/COPYING.embeddedsw
new file mode 100644
index 0000000..814be7b
--- /dev/null
+++ b/COPYING.embeddedsw
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2007 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
diff --git a/ORIGIN.embeddedsw b/ORIGIN.embeddedsw
new file mode 100644
index 0000000..b5ad15e
--- /dev/null
+++ b/ORIGIN.embeddedsw
@@ -0,0 +1,2 @@
+The files under the embeddedsw/ directory are sourced from:
+https://github.com/Xilinx/embeddedsw.git
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xadapter.h b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xadapter.h
new file mode 100644
index 0000000..4d32b7f
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xadapter.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2007 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#ifndef __XADAPTER_H_
+#define __XADAPTER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "lwipopts.h"
+
+#if !NO_SYS
+#include "lwip/sys.h"
+#endif
+
+#include "lwip/netif.h"
+#include "lwip/ip.h"
+
+#include "netif/xtopology.h"
+
+struct xemac_s {
+	enum xemac_types type;
+	int  topology_index;
+	void *state;
+#if !NO_SYS
+    sys_sem_t sem_rx_data_available;
+#if defined(__arm__) && !defined(ARMR5)
+	TimerHandle_t xTimer;
+#endif
+#endif
+};
+
+enum ethernet_link_status {
+	ETH_LINK_UNDEFINED = 0,
+	ETH_LINK_UP,
+	ETH_LINK_DOWN,
+	ETH_LINK_NEGOTIATING
+};
+
+void eth_link_detect(struct netif *netif);
+void 		lwip_raw_init();
+int 		xemacif_input(struct netif *netif);
+void 		xemacif_input_thread(struct netif *netif);
+struct netif *	xemac_add(struct netif *netif,
+	ip_addr_t *ipaddr, ip_addr_t *netmask, ip_addr_t *gw,
+	unsigned char *mac_ethernet_address,
+	UINTPTR mac_baseaddr);
+#if defined (__arm__) || defined (__aarch64__)
+void xemacpsif_resetrx_on_no_rxdata(struct netif *netif);
+#endif
+
+/* global lwip debug variable used for debugging */
+extern int lwip_runtime_debug;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xemacpsif.h b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xemacpsif.h
new file mode 100644
index 0000000..f108920
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xemacpsif.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#ifndef __NETIF_XEMACPSIF_H__
+#define __NETIF_XEMACPSIF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xlwipconfig.h"
+#include "lwip/netif.h"
+#include "netif/etharp.h"
+#include "lwip/sys.h"
+#include "netif/xadapter.h"
+
+#include "xstatus.h"
+#include "sleep.h"
+#include "xparameters.h"
+#include "xparameters_ps.h"	/* defines XPAR values */
+#include "xil_types.h"
+#include "xil_assert.h"
+#include "xil_io.h"
+#include "xil_exception.h"
+#include "xpseudo_asm.h"
+#include "xil_cache.h"
+#include "xil_printf.h"
+#include "xscugic.h"
+#include "xemacps.h"		/* defines XEmacPs API */
+
+#include "netif/xpqueue.h"
+#include "xlwipconfig.h"
+
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+#include "xil_smc.h"
+#endif
+
+#define ZYNQ_EMACPS_0_BASEADDR 0xE000B000
+#define ZYNQ_EMACPS_1_BASEADDR 0xE000C000
+
+#define ZYNQMP_EMACPS_0_BASEADDR 0xFF0B0000
+#define ZYNQMP_EMACPS_1_BASEADDR 0xFF0C0000
+#define ZYNQMP_EMACPS_2_BASEADDR 0xFF0D0000
+#define ZYNQMP_EMACPS_3_BASEADDR 0xFF0E0000
+
+#define CRL_APB_GEM0_REF_CTRL	0xFF5E0050
+#define CRL_APB_GEM1_REF_CTRL	0xFF5E0054
+#define CRL_APB_GEM2_REF_CTRL	0xFF5E0058
+#define CRL_APB_GEM3_REF_CTRL	0xFF5E005C
+
+#define CRL_APB_GEM_DIV0_MASK	0x00003F00
+#define CRL_APB_GEM_DIV0_SHIFT	8
+#define CRL_APB_GEM_DIV1_MASK	0x003F0000
+#define CRL_APB_GEM_DIV1_SHIFT	16
+
+#define VERSAL_EMACPS_0_BASEADDR 0xFF0C0000
+#define VERSAL_EMACPS_1_BASEADDR 0xFF0D0000
+
+#define VERSAL_CRL_GEM0_REF_CTRL	0xFF5E0118
+#define VERSAL_CRL_GEM1_REF_CTRL	0xFF5E011C
+
+#define VERSAL_CRL_GEM_DIV_MASK		0x0003FF00
+#define VERSAL_CRL_APB_GEM_DIV_SHIFT	8
+
+#if defined (ARMR5) || (__aarch64__) || (ARMA53_32) || (__MICROBLAZE__)
+#if defined (USE_JUMBO_FRAMES)
+#define ZYNQMP_USE_JUMBO
+#endif
+#endif
+
+#define GEM_VERSION_ZYNQMP	7
+#define GEM_VERSION_VERSAL	0x107
+
+#define MAX_FRAME_SIZE_JUMBO (XEMACPS_MTU_JUMBO + XEMACPS_HDR_SIZE + XEMACPS_TRL_SIZE)
+
+void 	xemacpsif_setmac(u32_t index, u8_t *addr);
+u8_t*	xemacpsif_getmac(u32_t index);
+err_t 	xemacpsif_init(struct netif *netif);
+s32_t 	xemacpsif_input(struct netif *netif);
+
+/* xaxiemacif_hw.c */
+void 	xemacps_error_handler(XEmacPs * Temac);
+
+/* structure within each netif, encapsulating all information required for
+ * using a particular temac instance
+ */
+typedef struct {
+	XEmacPs emacps;
+
+	/* queue to store overflow packets */
+	pq_queue_t *recv_q;
+	pq_queue_t *send_q;
+
+	/* pointers to memory holding buffer descriptors (used only with SDMA) */
+	void *rx_bdspace;
+	void *tx_bdspace;
+
+	unsigned int last_rx_frms_cntr;
+
+} xemacpsif_s;
+
+extern xemacpsif_s xemacpsif;
+
+s32_t	is_tx_space_available(xemacpsif_s *emac);
+
+/* xemacpsif_dma.c */
+
+void  process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring);
+u32_t phy_setup_emacps (XEmacPs *xemacpsp, u32_t phy_addr);
+void detect_phy(XEmacPs *xemacpsp);
+void emacps_send_handler(void *arg);
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p,
+		u32_t block_till_tx_complete, u32_t *to_block_index);
+#else
+XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p);
+#endif
+void emacps_recv_handler(void *arg);
+void emacps_error_handler(void *arg,u8 Direction, u32 ErrorWord);
+void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring);
+void HandleTxErrors(struct xemac_s *xemac);
+void HandleEmacPsError(struct xemac_s *xemac);
+XEmacPs_Config *xemacps_lookup_config(unsigned mac_base);
+void init_emacps(xemacpsif_s *xemacps, struct netif *netif);
+void setup_isr (struct xemac_s *xemac);
+XStatus init_dma(struct xemac_s *xemac);
+void start_emacps (xemacpsif_s *xemacps);
+void free_txrx_pbufs(xemacpsif_s *xemacpsif);
+void free_onlytx_pbufs(xemacpsif_s *xemacpsif);
+void init_emacps_on_error (xemacpsif_s *xemacps, struct netif *netif);
+void clean_dma_txdescs(struct xemac_s *xemac);
+void resetrx_on_no_rxdata(xemacpsif_s *xemacpsif);
+void reset_dma(struct xemac_s *xemac);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __NETIF_XAXIEMACIF_H__ */
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xpqueue.h b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xpqueue.h
new file mode 100644
index 0000000..132f02f
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xpqueue.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2007 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#ifndef __LWIP_PBUF_QUEUE_H_
+#define __LWIP_PBUF_QUEUE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PQ_QUEUE_SIZE 4096
+
+typedef struct {
+	void *data[PQ_QUEUE_SIZE];
+	int head, tail, len;
+} pq_queue_t;
+
+pq_queue_t*	pq_create_queue();
+int 		pq_enqueue(pq_queue_t *q, void *p);
+void*		pq_dequeue(pq_queue_t *q);
+int		pq_qlength(pq_queue_t *q);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xtopology.h b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xtopology.h
new file mode 100644
index 0000000..51718f1
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/include/netif/xtopology.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2007 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#ifndef __XTOPOLOGY_H_
+#define __XTOPOLOGY_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "xil_types.h"
+
+enum xemac_types { xemac_type_unknown = -1, xemac_type_xps_emaclite, xemac_type_xps_ll_temac, xemac_type_axi_ethernet, xemac_type_emacps };
+
+struct xtopology_t {
+	UINTPTR emac_baseaddr;
+	enum xemac_types emac_type;
+	UINTPTR intc_baseaddr;
+	unsigned intc_emac_intr;	/* valid only for xemac_type_xps_emaclite */
+	UINTPTR scugic_baseaddr; /* valid only for Zynq */
+	unsigned scugic_emac_intr; /* valid only for GEM */
+};
+
+extern int xtopology_n_emacs;
+extern struct xtopology_t xtopology[];
+
+int xtopology_find_index(unsigned base);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xadapter.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xadapter.c
new file mode 100644
index 0000000..9594ff5
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xadapter.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2007 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "lwipopts.h"
+#include "xlwipconfig.h"
+#include "xemac_ieee_reg.h"
+
+#if !NO_SYS
+#endif
+
+#include "lwip/mem.h"
+#include "lwip/stats.h"
+#include "lwip/sys.h"
+#include "lwip/ip.h"
+#include "lwip/tcp.h"
+#include "lwip/udp.h"
+#include "lwip/priv/tcp_priv.h"
+
+#include "netif/etharp.h"
+#include "netif/xadapter.h"
+
+#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
+#include "netif/xemacliteif.h"
+#endif
+
+#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
+#include "netif/xaxiemacif.h"
+#endif
+
+#ifdef XLWIP_CONFIG_INCLUDE_GEM
+#include "netif/xemacpsif.h"
+#endif
+
+#if !NO_SYS
+#include "lwip/tcpip.h"
+
+#define THREAD_STACKSIZE 256
+#define LINK_DETECT_THREAD_INTERVAL 1000 /* one second */
+
+void link_detect_thread(void *p);
+#endif
+
+/* global lwip debug variable used for debugging */
+int lwip_runtime_debug = 0;
+
+enum ethernet_link_status eth_link_status = ETH_LINK_UNDEFINED;
+u32_t phyaddrforemac;
+
+void
+lwip_raw_init()
+{
+	ip_init();	/* Doesn't do much, it should be called to handle future changes. */
+#if LWIP_UDP
+	udp_init();	/* Clears the UDP PCB list. */
+#endif
+#if LWIP_TCP
+	tcp_init();	/* Clears the TCP PCB list and clears some internal TCP timers. */
+			/* Note: you must call tcp_fasttmr() and tcp_slowtmr() at the */
+			/* predefined regular intervals after this initialization. */
+#endif
+}
+
+static enum xemac_types
+find_mac_type(unsigned base)
+{
+	int i;
+
+	for (i = 0; i < xtopology_n_emacs; i++) {
+		if (xtopology[i].emac_baseaddr == base)
+			return xtopology[i].emac_type;
+	}
+
+	return xemac_type_unknown;
+}
+
+int
+xtopology_find_index(unsigned base)
+{
+	int i;
+
+	for (i = 0; i < xtopology_n_emacs; i++) {
+		if (xtopology[i].emac_baseaddr == base)
+			return i;
+	}
+
+	return -1;
+}
+
+/*
+ * xemac_add: this is a wrapper around lwIP's netif_add function.
+ * The objective is to provide portability between the different Xilinx MAC's
+ * This function can be used to add both xps_ethernetlite and xps_ll_temac
+ * based interfaces
+ */
+struct netif *
+xemac_add(struct netif *netif,
+	ip_addr_t *ipaddr, ip_addr_t *netmask, ip_addr_t *gw,
+	unsigned char *mac_ethernet_address,
+	UINTPTR mac_baseaddr)
+{
+	int i;
+
+#if !NO_SYS
+	/* Start thread to detect link periodically for Hot Plug autodetect */
+	sys_thread_new("link_detect_thread", link_detect_thread, netif,
+			THREAD_STACKSIZE, tskIDLE_PRIORITY);
+#endif
+
+	/* set mac address */
+	netif->hwaddr_len = 6;
+	for (i = 0; i < 6; i++)
+		netif->hwaddr[i] = mac_ethernet_address[i];
+
+	/* initialize based on MAC type */
+		switch (find_mac_type(mac_baseaddr)) {
+			case xemac_type_xps_emaclite:
+#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
+				return netif_add(netif, ipaddr, netmask, gw,
+					(void*)mac_baseaddr,
+					xemacliteif_init,
+#if NO_SYS
+					ethernet_input
+#else
+					tcpip_input
+#endif
+					);
+#else
+				return NULL;
+#endif
+			case xemac_type_axi_ethernet:
+#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
+				return netif_add(netif, ipaddr, netmask, gw,
+					(void*)mac_baseaddr,
+					xaxiemacif_init,
+#if NO_SYS
+					ethernet_input
+#else
+					tcpip_input
+#endif
+					);
+#else
+				return NULL;
+#endif
+#if defined (__arm__) || defined (__aarch64__)
+			case xemac_type_emacps:
+#ifdef XLWIP_CONFIG_INCLUDE_GEM
+				return netif_add(netif, ipaddr, netmask, gw,
+						(void*)mac_baseaddr,
+						xemacpsif_init,
+#if NO_SYS
+						ethernet_input
+#else
+						tcpip_input
+#endif
+
+						);
+#endif
+#endif
+			default:
+				xil_printf("unable to determine type of EMAC with baseaddress 0x%08x\r\n",
+						mac_baseaddr);
+				return NULL;
+	}
+}
+
+#if !NO_SYS
+/*
+ * The input thread calls lwIP to process any received packets.
+ * This thread waits until a packet is received (sem_rx_data_available),
+ * and then calls xemacif_input which processes 1 packet at a time.
+ */
+void
+xemacif_input_thread(struct netif *netif)
+{
+	struct xemac_s *emac = (struct xemac_s *)netif->state;
+	while (1) {
+		/* sleep until there are packets to process
+		 * This semaphore is set by the packet receive interrupt
+		 * routine.
+		 */
+		sys_sem_wait(&emac->sem_rx_data_available);
+
+		/* move all received packets to lwIP */
+		xemacif_input(netif);
+	}
+}
+#endif
+
+int
+xemacif_input(struct netif *netif)
+{
+	struct xemac_s *emac = (struct xemac_s *)netif->state;
+
+	int n_packets = 0;
+
+	switch (emac->type) {
+		case xemac_type_xps_emaclite:
+#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
+			n_packets = xemacliteif_input(netif);
+			break;
+#else
+			print("incorrect configuration: xps_ethernetlite drivers not present?");
+			while(1);
+			return 0;
+#endif
+		case xemac_type_axi_ethernet:
+#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
+			n_packets = xaxiemacif_input(netif);
+			break;
+#else
+			print("incorrect configuration: axi_ethernet drivers not present?");
+			while(1);
+			return 0;
+#endif
+#if defined (__arm__) || defined (__aarch64__)
+		case xemac_type_emacps:
+#ifdef XLWIP_CONFIG_INCLUDE_GEM
+			n_packets = xemacpsif_input(netif);
+			break;
+#else
+			xil_printf("incorrect configuration: ps7_ethernet drivers not present?\r\n");
+			while(1);
+			return 0;
+#endif
+#endif
+		default:
+			print("incorrect configuration: unknown temac type");
+			while(1);
+			return 0;
+	}
+
+	return n_packets;
+}
+
+#if defined(XLWIP_CONFIG_INCLUDE_GEM)
+static u32_t phy_link_detect(XEmacPs *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XEmacPs_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XEmacPs_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_LINK_STATUS)
+		return 1;
+	return 0;
+}
+#elif defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
+static u32_t phy_link_detect(XAxiEthernet *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XAxiEthernet_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XAxiEthernet_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_LINK_STATUS)
+		return 1;
+	return 0;
+}
+#elif defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
+static u32_t phy_link_detect(XEmacLite *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XEmacLite_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XEmacLite_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_LINK_STATUS)
+		return 1;
+	return 0;
+}
+#endif
+
+#if defined(XLWIP_CONFIG_INCLUDE_GEM)
+static u32_t phy_autoneg_status(XEmacPs *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XEmacPs_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XEmacPs_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
+		return 1;
+	return 0;
+}
+#elif defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
+static u32_t phy_autoneg_status(XAxiEthernet *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XAxiEthernet_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XAxiEthernet_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
+		return 1;
+	return 0;
+}
+#elif defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
+static u32_t phy_autoneg_status(XEmacLite *xemacp, u32_t phy_addr)
+{
+	u16_t status;
+
+	/* Read Phy Status register twice to get the confirmation of the current
+	 * link status.
+	 */
+	XEmacLite_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	XEmacLite_PhyRead(xemacp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	if (status & IEEE_STAT_AUTONEGOTIATE_COMPLETE)
+		return 1;
+	return 0;
+}
+#endif
+
+void eth_link_detect(struct netif *netif)
+{
+	u32_t link_speed, phy_link_status;
+	struct xemac_s *xemac = (struct xemac_s *)(netif->state);
+
+#if defined(XLWIP_CONFIG_INCLUDE_GEM)
+	xemacpsif_s *xemacs = (xemacpsif_s *)(xemac->state);
+	XEmacPs *xemacp = &xemacs->emacps;
+#elif defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
+	xaxiemacif_s *xemacs = (xaxiemacif_s *)(xemac->state);
+	XAxiEthernet *xemacp = &xemacs->axi_ethernet;
+#elif defined(XLWIP_CONFIG_INCLUDE_EMACLITE)
+	xemacliteif_s *xemacs = (xemacliteif_s *)(xemac->state);
+	XEmacLite *xemacp = xemacs->instance;
+#endif
+
+	if ((xemacp->IsReady != (u32)XIL_COMPONENT_IS_READY) ||
+			(eth_link_status == ETH_LINK_UNDEFINED))
+		return;
+
+	phy_link_status = phy_link_detect(xemacp, phyaddrforemac);
+
+	if ((eth_link_status == ETH_LINK_UP) && (!phy_link_status))
+		eth_link_status = ETH_LINK_DOWN;
+
+	switch (eth_link_status) {
+		case ETH_LINK_UNDEFINED:
+		case ETH_LINK_UP:
+			return;
+		case ETH_LINK_DOWN:
+			netif_set_link_down(netif);
+			eth_link_status = ETH_LINK_NEGOTIATING;
+			xil_printf("Ethernet Link down\r\n");
+			break;
+		case ETH_LINK_NEGOTIATING:
+			if (phy_link_status &&
+				phy_autoneg_status(xemacp, phyaddrforemac)) {
+
+				/* Initiate Phy setup to get link speed */
+#if defined(XLWIP_CONFIG_INCLUDE_GEM)
+				link_speed = phy_setup_emacps(xemacp,
+								phyaddrforemac);
+				XEmacPs_SetOperatingSpeed(xemacp, link_speed);
+#elif defined(XLWIP_CONFIG_INCLUDE_AXI_ETHERNET)
+				link_speed = phy_setup_axiemac(xemacp);
+				XAxiEthernet_SetOperatingSpeed(xemacp,
+							       link_speed);
+#endif
+				netif_set_link_up(netif);
+				eth_link_status = ETH_LINK_UP;
+				xil_printf("Ethernet Link up\r\n");
+			}
+			break;
+	}
+}
+
+#if !NO_SYS
+void link_detect_thread(void *p)
+{
+	struct netif *netif = (struct netif *) p;
+
+	while (1) {
+		/* Call eth_link_detect() every second to detect Ethernet link
+		 * change.
+		 */
+		eth_link_detect(netif);
+		vTaskDelay(LINK_DETECT_THREAD_INTERVAL / portTICK_RATE_MS);
+	}
+}
+#endif
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemac_ieee_reg.h b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemac_ieee_reg.h
new file mode 100644
index 0000000..4c240a9
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemac_ieee_reg.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2018 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#ifndef __XEMAC_IEEE_REGS_H_
+#define __XEMAC_IEEE_REGS_H_
+
+/* Advertisement control register. */
+#define ADVERTISE_10HALF            0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_1000XFULL         0x0020  /* Try for 1000BASE-X full-duplex */
+#define ADVERTISE_10FULL            0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_1000XHALF         0x0040  /* Try for 1000BASE-X half-duplex */
+#define ADVERTISE_100HALF           0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_1000XPAUSE        0x0080  /* Try for 1000BASE-X pause    */
+#define ADVERTISE_100FULL           0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_1000XPSE_ASYM     0x0100  /* Try for 1000BASE-X asym pause */
+#define ADVERTISE_100BASE4          0x0200  /* Try for 100mbps 4k packets  */
+
+
+#define ADVERTISE_100_AND_10        (ADVERTISE_10FULL | ADVERTISE_100FULL | \
+					ADVERTISE_10HALF | ADVERTISE_100HALF)
+#define ADVERTISE_100               (ADVERTISE_100FULL | ADVERTISE_100HALF)
+#define ADVERTISE_10                (ADVERTISE_10FULL | ADVERTISE_10HALF)
+
+#define ADVERTISE_1000              0x0300
+
+
+#define IEEE_CONTROL_REG_OFFSET                    0
+#define IEEE_STATUS_REG_OFFSET                     1
+#define IEEE_AUTONEGO_ADVERTISE_REG                4
+#define IEEE_PARTNER_ABILITIES_1_REG_OFFSET        5
+#define IEEE_PARTNER_ABILITIES_2_REG_OFFSET        8
+#define IEEE_PARTNER_ABILITIES_3_REG_OFFSET        10
+#define IEEE_1000_ADVERTISE_REG_OFFSET             9
+#define IEEE_MMD_ACCESS_CONTROL_REG                13
+#define IEEE_MMD_ACCESS_ADDRESS_DATA_REG           14
+#define IEEE_COPPER_SPECIFIC_CONTROL_REG           16
+#define IEEE_SPECIFIC_STATUS_REG                   17
+#define IEEE_COPPER_SPECIFIC_STATUS_REG_2          19
+#define IEEE_EXT_PHY_SPECIFIC_CONTROL_REG          20
+#define IEEE_CONTROL_REG_MAC                       21
+#define IEEE_PAGE_ADDRESS_REGISTER                 22
+
+#define IEEE_CTRL_1GBPS_LINKSPEED_MASK             0x2040
+#define IEEE_CTRL_LINKSPEED_MASK                   0x0040
+#define IEEE_CTRL_LINKSPEED_1000M                  0x0040
+#define IEEE_CTRL_LINKSPEED_100M                   0x2000
+#define IEEE_CTRL_LINKSPEED_10M                    0x0000
+#define IEEE_CTRL_FULL_DUPLEX                      0x100
+#define IEEE_CTRL_RESET_MASK                       0x8000
+#define IEEE_CTRL_AUTONEGOTIATE_ENABLE             0x1000
+#define IEEE_STAT_AUTONEGOTIATE_CAPABLE            0x0008
+#define IEEE_STAT_AUTONEGOTIATE_COMPLETE           0x0020
+#define IEEE_STAT_AUTONEGOTIATE_RESTART            0x0200
+#define IEEE_STAT_LINK_STATUS                      0x0004
+#define IEEE_STAT_1GBPS_EXTENSIONS                 0x0100
+#define IEEE_AN1_ABILITY_MASK                      0x1FE0
+#define IEEE_AN3_ABILITY_MASK_1GBPS                0x0C00
+#define IEEE_AN1_ABILITY_MASK_100MBPS              0x0380
+#define IEEE_AN1_ABILITY_MASK_10MBPS               0x0060
+#define IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK         0x0030
+
+#define IEEE_SPEED_MASK                            0xC000
+#define IEEE_SPEED_1000                            0x8000
+#define IEEE_SPEED_100                             0x4000
+
+#define IEEE_ASYMMETRIC_PAUSE_MASK                 0x0800
+#define IEEE_PAUSE_MASK                            0x0400
+#define IEEE_AUTONEG_ERROR_MASK                    0x8000
+
+#define IEEE_MMD_ACCESS_CTRL_DEVAD_MASK            0x1F
+#define IEEE_MMD_ACCESS_CTRL_PIDEVAD_MASK          0x801F
+#define IEEE_MMD_ACCESS_CTRL_NOPIDEVAD_MASK        0x401F
+
+#endif /* __XEMAC_IEEE_REGS_H_ */
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif.c
new file mode 100644
index 0000000..1bf3abb
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (C) 2010 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <xparameters.h>
+#include "lwipopts.h"
+#include "xlwipconfig.h"
+#include "lwip/opt.h"
+#include "lwip/def.h"
+#include "lwip/mem.h"
+#include "lwip/pbuf.h"
+#include "lwip/sys.h"
+#include "lwip/stats.h"
+#include "lwip/igmp.h"
+
+#include "netif/etharp.h"
+#include "netif/xemacpsif.h"
+#include "netif/xadapter.h"
+#include "netif/xpqueue.h"
+#include "xparameters.h"
+#include "xscugic.h"
+#include "xemacps.h"
+
+#if LWIP_IPV6
+#include "lwip/ethip6.h"
+#endif
+
+
+/* Define those to better describe your network interface. */
+#define IFNAME0 't'
+#define IFNAME1 'e'
+
+#if LWIP_IGMP
+static err_t xemacpsif_mac_filter_update (struct netif *netif,
+							ip_addr_t *group, u8_t action);
+
+static u8_t xemacps_mcast_entry_mask = 0;
+#endif
+
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+static err_t xemacpsif_mld6_mac_filter_update (struct netif *netif,
+							ip_addr_t *group, u8_t action);
+
+static u8_t xemacps_mld6_mcast_entry_mask;
+#endif
+
+XEmacPs_Config *mac_config;
+struct netif *NetIf;
+
+#if !NO_SYS
+#if defined(__arm__) && !defined(ARMR5)
+int32_t lExpireCounter = 0;
+#define RESETRXTIMEOUT 10
+#endif
+#endif
+
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+extern volatile u32_t notifyinfo[4*XLWIP_CONFIG_N_TX_DESC];
+#endif
+
+/*
+ * this function is always called with interrupts off
+ * this function also assumes that there are available BD's
+ */
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+static err_t _unbuffered_low_level_output(xemacpsif_s *xemacpsif,
+		struct pbuf *p, u32_t block_till_tx_complete, u32_t *to_block_index )
+#else
+static err_t _unbuffered_low_level_output(xemacpsif_s *xemacpsif,
+													struct pbuf *p)
+#endif
+{
+	XStatus status = 0;
+	err_t err = ERR_MEM;
+
+#if ETH_PAD_SIZE
+	pbuf_header(p, -ETH_PAD_SIZE);	/* drop the padding word */
+#endif
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	if (block_till_tx_complete == 1) {
+		status = emacps_sgsend(xemacpsif, p, 1, to_block_index);
+	} else {
+		status = emacps_sgsend(xemacpsif, p, 0, to_block_index);
+	}
+#else
+	status = emacps_sgsend(xemacpsif, p);
+#endif
+	if (status != XST_SUCCESS) {
+#if LINK_STATS
+		lwip_stats.link.drop++;
+#endif
+	} else {
+		err = ERR_OK;
+	}
+
+#if ETH_PAD_SIZE
+	pbuf_header(p, ETH_PAD_SIZE);	/* reclaim the padding word */
+#endif
+
+#if LINK_STATS
+	lwip_stats.link.xmit++;
+#endif /* LINK_STATS */
+
+	return err;
+
+}
+
+/*
+ * low_level_output():
+ *
+ * Should do the actual transmission of the packet. The packet is
+ * contained in the pbuf that is passed to the function. This pbuf
+ * might be chained.
+ *
+ */
+
+static err_t low_level_output(struct netif *netif, struct pbuf *p)
+{
+    err_t err = ERR_MEM;
+    s32_t freecnt;
+    XEmacPs_BdRing *txring;
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	u32_t notfifyblocksleepcntr;
+	u32_t to_block_index;
+#endif
+
+	SYS_ARCH_DECL_PROTECT(lev);
+	struct xemac_s *xemac = (struct xemac_s *)(netif->state);
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+
+	SYS_ARCH_PROTECT(lev);
+	/* check if space is available to send */
+    freecnt = is_tx_space_available(xemacpsif);
+    if (freecnt <= 5) {
+	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
+		process_sent_bds(xemacpsif, txring);
+	}
+
+    if (is_tx_space_available(xemacpsif)) {
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+		if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
+			err = _unbuffered_low_level_output(xemacpsif, p, 1, &to_block_index);
+		} else {
+			err = _unbuffered_low_level_output(xemacpsif, p, 0, &to_block_index);
+		}
+#else
+		err = _unbuffered_low_level_output(xemacpsif, p);
+#endif
+	} else {
+#if LINK_STATS
+		lwip_stats.link.drop++;
+#endif
+		printf("pack dropped, no space\r\n");
+		SYS_ARCH_UNPROTECT(lev);
+		goto return_pack_dropped;
+	}
+	SYS_ARCH_UNPROTECT(lev);
+
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	if (netif_is_opt_block_tx_set(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET)) {
+		/* Wait for approx 1 second before timing out */
+		notfifyblocksleepcntr = 900000;
+		while(notifyinfo[to_block_index] == 1) {
+			usleep(1);
+			notfifyblocksleepcntr--;
+			if (notfifyblocksleepcntr <= 0) {
+				err = ERR_TIMEOUT;
+				break;
+			}
+		}
+	}
+	netif_clear_opt_block_tx(netif, NETIF_ENABLE_BLOCKING_TX_FOR_PACKET);
+#endif
+return_pack_dropped:
+	return err;
+}
+
+/*
+ * low_level_input():
+ *
+ * Should allocate a pbuf and transfer the bytes of the incoming
+ * packet from the interface into the pbuf.
+ *
+ */
+static struct pbuf * low_level_input(struct netif *netif)
+{
+	struct xemac_s *xemac = (struct xemac_s *)(netif->state);
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+	struct pbuf *p;
+
+	/* see if there is data to process */
+	if (pq_qlength(xemacpsif->recv_q) == 0)
+		return NULL;
+
+	/* return one packet from receive q */
+	p = (struct pbuf *)pq_dequeue(xemacpsif->recv_q);
+	return p;
+}
+
+/*
+ * xemacpsif_output():
+ *
+ * This function is called by the TCP/IP stack when an IP packet
+ * should be sent. It calls the function called low_level_output() to
+ * do the actual transmission of the packet.
+ *
+ */
+
+static err_t xemacpsif_output(struct netif *netif, struct pbuf *p,
+		const ip_addr_t *ipaddr)
+{
+	/* resolve hardware address, then send (or queue) packet */
+	return etharp_output(netif, p, ipaddr);
+}
+
+/*
+ * xemacpsif_input():
+ *
+ * This function should be called when a packet is ready to be read
+ * from the interface. It uses the function low_level_input() that
+ * should handle the actual reception of bytes from the network
+ * interface.
+ *
+ * Returns the number of packets read (max 1 packet on success,
+ * 0 if there are no packets)
+ *
+ */
+
+s32_t xemacpsif_input(struct netif *netif)
+{
+	struct eth_hdr *ethhdr;
+	struct pbuf *p;
+	SYS_ARCH_DECL_PROTECT(lev);
+
+#if !NO_SYS
+	while (1)
+#endif
+	{
+		/* move received packet into a new pbuf */
+		SYS_ARCH_PROTECT(lev);
+		p = low_level_input(netif);
+		SYS_ARCH_UNPROTECT(lev);
+
+		/* no packet could be read, silently ignore this */
+		if (p == NULL) {
+			return 0;
+		}
+
+		/* points to packet payload, which starts with an Ethernet header */
+		ethhdr = p->payload;
+
+	#if LINK_STATS
+		lwip_stats.link.recv++;
+	#endif /* LINK_STATS */
+
+		switch (htons(ethhdr->type)) {
+			/* IP or ARP packet? */
+			case ETHTYPE_IP:
+			case ETHTYPE_ARP:
+	#if LWIP_IPV6
+			/*IPv6 Packet?*/
+			case ETHTYPE_IPV6:
+	#endif
+	#if PPPOE_SUPPORT
+				/* PPPoE packet? */
+			case ETHTYPE_PPPOEDISC:
+			case ETHTYPE_PPPOE:
+	#endif /* PPPOE_SUPPORT */
+				/* full packet send to tcpip_thread to process */
+				if (netif->input(p, netif) != ERR_OK) {
+					LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_input: IP input error\r\n"));
+					pbuf_free(p);
+					p = NULL;
+				}
+				break;
+
+			default:
+				pbuf_free(p);
+				p = NULL;
+				break;
+		}
+	}
+
+	return 1;
+}
+
+#if !NO_SYS
+#if defined(__arm__) && !defined(ARMR5)
+void vTimerCallback( TimerHandle_t pxTimer )
+{
+	/* Do something if the pxTimer parameter is NULL */
+	configASSERT(pxTimer);
+
+	lExpireCounter++;
+	/* If the timer has expired 100 times then reset RX */
+	if(lExpireCounter >= RESETRXTIMEOUT) {
+		lExpireCounter = 0;
+		xemacpsif_resetrx_on_no_rxdata(NetIf);
+	}
+}
+#endif
+#endif
+
+static err_t low_level_init(struct netif *netif)
+{
+	UINTPTR mac_address = (UINTPTR)(netif->state);
+	struct xemac_s *xemac;
+	xemacpsif_s *xemacpsif;
+	u32 dmacrreg;
+
+	s32_t status = XST_SUCCESS;
+
+	NetIf = netif;
+
+	xemacpsif = mem_malloc(sizeof *xemacpsif);
+	if (xemacpsif == NULL) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_init: out of memory\r\n"));
+		return ERR_MEM;
+	}
+
+	xemac = mem_malloc(sizeof *xemac);
+	if (xemac == NULL) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("xemacpsif_init: out of memory\r\n"));
+		return ERR_MEM;
+	}
+
+	xemac->state = (void *)xemacpsif;
+	xemac->topology_index = xtopology_find_index(mac_address);
+	xemac->type = xemac_type_emacps;
+
+	xemacpsif->send_q = NULL;
+	xemacpsif->recv_q = pq_create_queue();
+	if (!xemacpsif->recv_q)
+		return ERR_MEM;
+
+	/* maximum transfer unit */
+#ifdef ZYNQMP_USE_JUMBO
+	netif->mtu = XEMACPS_MTU_JUMBO - XEMACPS_HDR_SIZE;
+#else
+	netif->mtu = XEMACPS_MTU - XEMACPS_HDR_SIZE;
+#endif
+
+#if LWIP_IGMP
+	netif->igmp_mac_filter = xemacpsif_mac_filter_update;
+#endif
+
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+ netif->mld_mac_filter = xemacpsif_mld6_mac_filter_update;
+#endif
+
+	netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP |
+											NETIF_FLAG_LINK_UP;
+
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+	netif->flags |= NETIF_FLAG_MLD6;
+#endif
+
+#if LWIP_IGMP
+	netif->flags |= NETIF_FLAG_IGMP;
+#endif
+
+#if !NO_SYS
+	sys_sem_new(&xemac->sem_rx_data_available, 0);
+#endif
+	/* obtain config of this emac */
+	mac_config = (XEmacPs_Config *)xemacps_lookup_config((unsigned)(UINTPTR)netif->state);
+
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+	/* Request device to indicate that this library is using it */
+	if (mac_config->BaseAddress == VERSAL_EMACPS_0_BASEADDR) {
+		Xil_Smc(PM_REQUEST_DEVICE_SMC_FID, DEV_GEM_0, 1, 0, 100, 1, 0, 0);
+	}
+	if (mac_config->BaseAddress == VERSAL_EMACPS_0_BASEADDR) {
+		Xil_Smc(PM_REQUEST_DEVICE_SMC_FID, DEV_GEM_1, 1, 0, 100, 1, 0, 0);
+	}
+#endif
+
+	status = XEmacPs_CfgInitialize(&xemacpsif->emacps, mac_config,
+						mac_config->BaseAddress);
+	if (status != XST_SUCCESS) {
+		xil_printf("In %s:EmacPs Configuration Failed....\r\n", __func__);
+	}
+
+	/* initialize the mac */
+	init_emacps(xemacpsif, netif);
+
+	dmacrreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
+														XEMACPS_DMACR_OFFSET);
+	dmacrreg = dmacrreg | (0x00000010);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
+											XEMACPS_DMACR_OFFSET, dmacrreg);
+#if !NO_SYS
+#if defined(__arm__) && !defined(ARMR5)
+	/* Freertos tick is 10ms by default; set period to the same */
+	xemac->xTimer = xTimerCreate("Timer", 10, pdTRUE, ( void * ) 1, vTimerCallback);
+	if (xemac->xTimer == NULL) {
+		xil_printf("In %s:Timer creation failed....\r\n", __func__);
+	} else {
+		if(xTimerStart(xemac->xTimer, 0) != pdPASS) {
+			xil_printf("In %s:Timer start failed....\r\n", __func__);
+		}
+	}
+#endif
+#endif
+	setup_isr(xemac);
+	init_dma(xemac);
+	start_emacps(xemacpsif);
+
+	/* replace the state in netif (currently the emac baseaddress)
+	 * with the mac instance pointer.
+	 */
+	netif->state = (void *)xemac;
+
+	return ERR_OK;
+}
+
+void HandleEmacPsError(struct xemac_s *xemac)
+{
+	xemacpsif_s   *xemacpsif;
+	s32_t status = XST_SUCCESS;
+	u32 dmacrreg;
+
+	SYS_ARCH_DECL_PROTECT(lev);
+	SYS_ARCH_PROTECT(lev);
+
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	free_txrx_pbufs(xemacpsif);
+	status = XEmacPs_CfgInitialize(&xemacpsif->emacps, mac_config,
+						mac_config->BaseAddress);
+	if (status != XST_SUCCESS) {
+		xil_printf("In %s:EmacPs Configuration Failed....\r\n", __func__);
+	}
+	/* initialize the mac */
+	init_emacps_on_error(xemacpsif, NetIf);
+	dmacrreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
+														XEMACPS_DMACR_OFFSET);
+	dmacrreg = dmacrreg | (0x01000000);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
+											XEMACPS_DMACR_OFFSET, dmacrreg);
+	setup_isr(xemac);
+	init_dma(xemac);
+	start_emacps(xemacpsif);
+
+	SYS_ARCH_UNPROTECT(lev);
+}
+
+void HandleTxErrors(struct xemac_s *xemac)
+{
+	xemacpsif_s   *xemacpsif;
+	u32 netctrlreg;
+
+	SYS_ARCH_DECL_PROTECT(lev);
+	SYS_ARCH_PROTECT(lev);
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	netctrlreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
+												XEMACPS_NWCTRL_OFFSET);
+    netctrlreg = netctrlreg & (~XEMACPS_NWCTRL_TXEN_MASK);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
+									XEMACPS_NWCTRL_OFFSET, netctrlreg);
+	free_onlytx_pbufs(xemacpsif);
+
+	clean_dma_txdescs(xemac);
+	netctrlreg = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
+													XEMACPS_NWCTRL_OFFSET);
+	netctrlreg = netctrlreg | (XEMACPS_NWCTRL_TXEN_MASK);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
+										XEMACPS_NWCTRL_OFFSET, netctrlreg);
+	SYS_ARCH_UNPROTECT(lev);
+}
+
+#if LWIP_IPV6 && LWIP_IPV6_MLD
+static u8_t xemacpsif_ip6_addr_ismulticast(ip6_addr_t* ip_addr)
+{
+	if(ip6_addr_ismulticast_linklocal(ip_addr)||
+           ip6_addr_ismulticast_iflocal(ip_addr)   ||
+           ip6_addr_ismulticast_adminlocal(ip_addr)||
+           ip6_addr_ismulticast_sitelocal(ip_addr) ||
+           ip6_addr_ismulticast_orglocal(ip_addr)  ||
+           ip6_addr_ismulticast_global(ip_addr)) {
+	/*Return TRUE if IPv6 is Multicast type*/
+	return TRUE;
+	} else {
+	return FALSE;
+	}
+}
+
+static void xemacpsif_mld6_mac_hash_update (struct netif *netif, u8_t *ip_addr,
+		u8_t action)
+{
+	u8_t multicast_mac_addr[6];
+	struct xemac_s *xemac = (struct xemac_s *) (netif->state);
+	xemacpsif_s *xemacpsif = (xemacpsif_s *) (xemac->state);
+	XEmacPs_BdRing *txring;
+	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
+
+	multicast_mac_addr[0] = LL_IP6_MULTICAST_ADDR_0;
+	multicast_mac_addr[1] = LL_IP6_MULTICAST_ADDR_1;
+	multicast_mac_addr[2] = ip_addr[12];
+	multicast_mac_addr[3] = ip_addr[13];
+	multicast_mac_addr[4] = ip_addr[14];
+	multicast_mac_addr[5] = ip_addr[15];
+
+	/* Wait till all sent packets are acknowledged from HW */
+	while(txring->HwCnt);
+
+	SYS_ARCH_DECL_PROTECT(lev);
+
+	SYS_ARCH_PROTECT(lev);
+
+	/* Stop Ethernet */
+	XEmacPs_Stop(&xemacpsif->emacps);
+
+	if (action == NETIF_ADD_MAC_FILTER) {
+		/* Set Mulitcast mac address in hash table */
+		XEmacPs_SetHash(&xemacpsif->emacps, multicast_mac_addr);
+
+	} else if (action == NETIF_DEL_MAC_FILTER) {
+		/* Remove Mulitcast mac address in hash table */
+		XEmacPs_DeleteHash(&xemacpsif->emacps, multicast_mac_addr);
+	}
+
+	/* Reset DMA */
+	reset_dma(xemac);
+
+	/* Start Ethernet */
+	XEmacPs_Start(&xemacpsif->emacps);
+
+	SYS_ARCH_UNPROTECT(lev);
+}
+
+static err_t xemacpsif_mld6_mac_filter_update (struct netif *netif, ip_addr_t *group,
+		u8_t action)
+{
+	u8_t temp_mask;
+	unsigned int i;
+	u8_t * ip_addr = (u8_t *) group;
+
+	if(!(xemacpsif_ip6_addr_ismulticast((ip6_addr_t*) ip_addr))) {
+		LWIP_DEBUGF(NETIF_DEBUG,
+                                ("%s: The requested MAC address is not a multicast address.\r\n", __func__));								 LWIP_DEBUGF(NETIF_DEBUG,
+		                ("Multicast address add operation failure !!\r\n"));
+                        return ERR_ARG;
+	}
+	if (action == NETIF_ADD_MAC_FILTER) {
+		for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
+			temp_mask = (0x01) << i;
+			if ((xemacps_mld6_mcast_entry_mask & temp_mask) == temp_mask) {
+				continue;
+			}
+			xemacps_mld6_mcast_entry_mask |= temp_mask;
+
+			/* Update mac address in hash table */
+			xemacpsif_mld6_mac_hash_update(netif, ip_addr, action);
+
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: Multicast MAC address successfully added.\r\n", __func__));
+
+			return ERR_OK;
+		}
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("%s: No multicast address registers left.\r\n", __func__));
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("Multicast MAC address add operation failure !!\r\n"));
+		return ERR_MEM;
+	} else if (action == NETIF_DEL_MAC_FILTER) {
+		for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
+			temp_mask = (0x01) << i;
+			if ((xemacps_mld6_mcast_entry_mask & temp_mask) != temp_mask) {
+				continue;
+			}
+			xemacps_mld6_mcast_entry_mask &= (~temp_mask);
+
+			/* Update mac address in hash table */
+			xemacpsif_mld6_mac_hash_update(netif, ip_addr, action);
+
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: Multicast MAC address successfully removed.\r\n", __func__));
+
+			return ERR_OK;
+		}
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("%s: No multicast address registers present with\r\n", __func__));
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("the requested Multicast MAC address.\r\n"));
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("Multicast MAC address removal failure!!.\r\n"));
+		return ERR_MEM;
+	}
+	return ERR_ARG;
+}
+#endif
+
+#if LWIP_IGMP
+static void xemacpsif_mac_hash_update (struct netif *netif, u8_t *ip_addr,
+		u8_t action)
+{
+	u8_t multicast_mac_addr[6];
+	struct xemac_s *xemac = (struct xemac_s *) (netif->state);
+	xemacpsif_s *xemacpsif = (xemacpsif_s *) (xemac->state);
+	XEmacPs_BdRing *txring;
+	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
+
+	multicast_mac_addr[0] = 0x01;
+	multicast_mac_addr[1] = 0x00;
+	multicast_mac_addr[2] = 0x5E;
+	multicast_mac_addr[3] = ip_addr[1] & 0x7F;
+	multicast_mac_addr[4] = ip_addr[2];
+	multicast_mac_addr[5] = ip_addr[3];
+
+	/* Wait till all sent packets are acknowledged from HW */
+	while(txring->HwCnt);
+
+	SYS_ARCH_DECL_PROTECT(lev);
+
+	SYS_ARCH_PROTECT(lev);
+
+	/* Stop Ethernet */
+	XEmacPs_Stop(&xemacpsif->emacps);
+
+	if (action == IGMP_ADD_MAC_FILTER) {
+		/* Set Mulitcast mac address in hash table */
+		XEmacPs_SetHash(&xemacpsif->emacps, multicast_mac_addr);
+
+	} else if (action == IGMP_DEL_MAC_FILTER) {
+		/* Remove Mulitcast mac address in hash table */
+		XEmacPs_DeleteHash(&xemacpsif->emacps, multicast_mac_addr);
+	}
+
+	/* Reset DMA */
+	reset_dma(xemac);
+
+	/* Start Ethernet */
+	XEmacPs_Start(&xemacpsif->emacps);
+
+	SYS_ARCH_UNPROTECT(lev);
+}
+
+static err_t xemacpsif_mac_filter_update (struct netif *netif, ip_addr_t *group,
+		u8_t action)
+{
+	u8_t temp_mask;
+	unsigned int i;
+	u8_t * ip_addr = (u8_t *) group;
+
+	if ((ip_addr[0] < 224) && (ip_addr[0] > 239)) {
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("%s: The requested MAC address is not a multicast address.\r\n", __func__));
+		LWIP_DEBUGF(NETIF_DEBUG,
+				("Multicast address add operation failure !!\r\n"));
+
+		return ERR_ARG;
+	}
+
+	if (action == IGMP_ADD_MAC_FILTER) {
+
+		for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
+			temp_mask = (0x01) << i;
+			if ((xemacps_mcast_entry_mask & temp_mask) == temp_mask) {
+				continue;
+			}
+			xemacps_mcast_entry_mask |= temp_mask;
+
+			/* Update mac address in hash table */
+			xemacpsif_mac_hash_update(netif, ip_addr, action);
+
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: Multicast MAC address successfully added.\r\n", __func__));
+
+			return ERR_OK;
+		}
+		if (i == XEMACPS_MAX_MAC_ADDR) {
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: No multicast address registers left.\r\n", __func__));
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("Multicast MAC address add operation failure !!\r\n"));
+
+			return ERR_MEM;
+		}
+	} else if (action == IGMP_DEL_MAC_FILTER) {
+		for (i = 0; i < XEMACPS_MAX_MAC_ADDR; i++) {
+			temp_mask = (0x01) << i;
+			if ((xemacps_mcast_entry_mask & temp_mask) != temp_mask) {
+				continue;
+			}
+			xemacps_mcast_entry_mask &= (~temp_mask);
+
+			/* Update mac address in hash table */
+			xemacpsif_mac_hash_update(netif, ip_addr, action);
+
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: Multicast MAC address successfully removed.\r\n", __func__));
+
+			return ERR_OK;
+		}
+		if (i == XEMACPS_MAX_MAC_ADDR) {
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("%s: No multicast address registers present with\r\n", __func__));
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("the requested Multicast MAC address.\r\n"));
+			LWIP_DEBUGF(NETIF_DEBUG,
+					("Multicast MAC address removal failure!!.\r\n"));
+
+			return ERR_MEM;
+		}
+	}
+	return ERR_OK;
+}
+#endif
+
+/*
+ * xemacpsif_init():
+ *
+ * Should be called at the beginning of the program to set up the
+ * network interface. It calls the function low_level_init() to do the
+ * actual setup of the hardware.
+ *
+ */
+
+err_t xemacpsif_init(struct netif *netif)
+{
+#if LWIP_SNMP
+	/* ifType ethernetCsmacd(6) @see RFC1213 */
+	netif->link_type = 6;
+	/* your link speed here */
+	netif->link_speed = ;
+	netif->ts = 0;
+	netif->ifinoctets = 0;
+	netif->ifinucastpkts = 0;
+	netif->ifinnucastpkts = 0;
+	netif->ifindiscards = 0;
+	netif->ifoutoctets = 0;
+	netif->ifoutucastpkts = 0;
+	netif->ifoutnucastpkts = 0;
+	netif->ifoutdiscards = 0;
+#endif
+
+	netif->name[0] = IFNAME0;
+	netif->name[1] = IFNAME1;
+	netif->output = xemacpsif_output;
+	netif->linkoutput = low_level_output;
+#if LWIP_IPV6
+	netif->output_ip6 = ethip6_output;
+#endif
+
+	low_level_init(netif);
+	return ERR_OK;
+}
+
+/*
+ * xemacpsif_resetrx_on_no_rxdata():
+ *
+ * Should be called by the user at regular intervals, typically
+ * from a timer (100 msecond). This is to provide a SW workaround
+ * for the HW bug (SI #692601). Please refer to the function header
+ * for the function resetrx_on_no_rxdata in xemacpsif_dma.c to
+ * know more about the SI.
+ *
+ */
+
+void xemacpsif_resetrx_on_no_rxdata(struct netif *netif)
+{
+	struct xemac_s *xemac = (struct xemac_s *)(netif->state);
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+
+	resetrx_on_no_rxdata(xemacpsif);
+}
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_dma.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_dma.c
new file mode 100644
index 0000000..2da3566
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_dma.c
@@ -0,0 +1,930 @@
+/*
+ * Copyright (C) 2010 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "lwipopts.h"
+#include "lwip/stats.h"
+#include "lwip/sys.h"
+#include "lwip/inet_chksum.h"
+
+#include "netif/xadapter.h"
+#include "netif/xemacpsif.h"
+#include "xstatus.h"
+
+#include "xlwipconfig.h"
+#include "xparameters.h"
+#include "xparameters_ps.h"
+#include "xil_exception.h"
+#include "xil_mmu.h"
+#if defined (ARMR5)
+#include "xreg_cortexr5.h"
+#endif
+#ifdef CONFIG_XTRACE
+#include "xtrace.h"
+#endif
+#if !NO_SYS
+#include "FreeRTOS.h"
+#include "semphr.h"
+#include "timers.h"
+#endif
+
+
+#define INTC_BASE_ADDR		XPAR_SCUGIC_0_CPU_BASEADDR
+#define INTC_DIST_BASE_ADDR	XPAR_SCUGIC_0_DIST_BASEADDR
+
+/* Byte alignment of BDs */
+#define BD_ALIGNMENT (XEMACPS_DMABD_MINIMUM_ALIGNMENT*2)
+
+/* A max of 4 different ethernet interfaces are supported */
+static UINTPTR tx_pbufs_storage[4*XLWIP_CONFIG_N_TX_DESC];
+static UINTPTR rx_pbufs_storage[4*XLWIP_CONFIG_N_RX_DESC];
+
+static s32_t emac_intr_num;
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+volatile u32_t notifyinfo[4*XLWIP_CONFIG_N_TX_DESC];
+#endif
+
+/******************************************************************************
+ * Each BD is of 8 bytes of size and the BDs (BD chain) need to be  put
+ * at uncached memory location. If they are not put at uncached
+ * locations, the user needs to flush or invalidate for each BD/packet.
+ * However, the flush or invalidate can happen over a cache line which can
+ * span multiple BDs. This means a flush or invalidate of one BD can actually
+ * flush/invalidate multiple BDs adjacent to the targeted BD.Assuming that
+ * the user and hardware both update the BD fields, this operation from user
+ * can potentially overwrite the updates done by hardware or user.
+ * To avoid this, it is always safe to put the BD chains for Rx and tx side
+ * at uncached memory location.
+ *
+ * The Xilinx standalone BSP for Cortex A9 implements only primary page tables.
+ * Each table entry corresponds to 1 MB of address map. This means, if a memory
+ * region has to be made uncached, the minimum granularity will be of 1 MB.
+ *
+ * The implementation below allocates a 1 MB of u8 array aligned to 1 MB.
+ * This ensures that this array is put at 1 MB aligned memory (e.g. 0x1200000)
+ * and accupies memory of 1 MB. The init_dma function then changes 1 MB of this
+ * region to make it uncached (strongly ordered).
+ * This increases the bss section of the program significantly and can be a
+ * wastage of memory. The reason beings, BDs will hardly occupy few KBs of
+ * memory and the rest of 1 MB of memory will be unused.
+ *
+ * If a program uses other peripherals that have DMAs/bus masters and need
+ * uncached memory, they may also end of following the same approach. This
+ * definitely aggravates the memory wastage issue. To avoid all this, the user
+ * can create a new 1 MB section in the linker script and reserve it for such
+ * use cases that need uncached memory location. They can then have their own
+ * memory allocation logic in their application that allocates uncached memory
+ * from this 1 MB location. For such a case, changes need to be done in this
+ * file and appropriate uncached memory allocated through other means can be
+ * used.
+ *
+ * The present implementation here allocates 1 MB of uncached memory. It
+ * reserves of 64 KB of memory for each BD chain. 64 KB of memory means 8192 of
+ * BDs for each BD chain which is more than enough for any application.
+ * Assuming that both emac0 and emac1 are present, 256 KB of memory is allocated
+ * for BDs. The rest 768 KB of memory is just unused.
+ *********************************************************************************/
+
+#if defined __aarch64__
+u8_t bd_space[0x200000] __attribute__ ((aligned (0x200000)));
+#else
+u8_t bd_space[0x100000] __attribute__ ((aligned (0x100000)));
+#endif
+static volatile u32_t bd_space_index = 0;
+static volatile u32_t bd_space_attr_set = 0;
+
+#if !NO_SYS
+long xInsideISR = 0;
+#endif
+
+#define XEMACPS_BD_TO_INDEX(ringptr, bdptr)				\
+	(((UINTPTR)bdptr - (UINTPTR)(ringptr)->BaseBdAddr) / (ringptr)->Separation)
+
+
+s32_t is_tx_space_available(xemacpsif_s *emac)
+{
+	XEmacPs_BdRing *txring;
+	s32_t freecnt = 0;
+
+	txring = &(XEmacPs_GetTxRing(&emac->emacps));
+
+	/* tx space is available as long as there are valid BD's */
+	freecnt = XEmacPs_BdRingGetFreeCnt(txring);
+	return freecnt;
+}
+
+
+static inline
+u32_t get_base_index_txpbufsstorage (xemacpsif_s *xemacpsif)
+{
+	u32_t index;
+#ifdef XPAR_XEMACPS_0_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
+		index = 0;
+	}
+#endif
+#ifdef XPAR_XEMACPS_1_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
+		index = XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_2_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
+		index = 2 * XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_3_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
+		index = 3 * XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+	return index;
+}
+
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+static inline
+u32_t get_base_index_tasknotifyinfo (xemacpsif_s *xemacpsif)
+{
+	u32_t index;
+#ifdef XPAR_XEMACPS_0_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
+		index = 0;
+	}
+#endif
+#ifdef XPAR_XEMACPS_1_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
+		index = XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_2_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
+		index = 2 * XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_3_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
+		index = 3 * XLWIP_CONFIG_N_TX_DESC;
+	}
+#endif
+	return index;
+}
+#endif
+
+static inline
+u32_t get_base_index_rxpbufsstorage (xemacpsif_s *xemacpsif)
+{
+	u32_t index;
+#ifdef XPAR_XEMACPS_0_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
+		index = 0;
+	}
+#endif
+#ifdef XPAR_XEMACPS_1_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_1_BASEADDR) {
+		index = XLWIP_CONFIG_N_RX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_2_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_2_BASEADDR) {
+		index = 2 * XLWIP_CONFIG_N_RX_DESC;
+	}
+#endif
+#ifdef XPAR_XEMACPS_3_BASEADDR
+	if (xemacpsif->emacps.Config.BaseAddress == XPAR_XEMACPS_3_BASEADDR) {
+		index = 3 * XLWIP_CONFIG_N_RX_DESC;
+	}
+#endif
+	return index;
+}
+
+void process_sent_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *txring)
+{
+	XEmacPs_Bd *txbdset;
+	XEmacPs_Bd *curbdpntr;
+	s32_t n_bds;
+	XStatus status;
+	s32_t n_pbufs_freed = 0;
+	u32_t bdindex;
+	struct pbuf *p;
+	u32 *temp;
+	u32_t index;
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	u32_t tx_task_notifier_index;
+#endif
+
+	index = get_base_index_txpbufsstorage (xemacpsif);
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	tx_task_notifier_index = get_base_index_tasknotifyinfo (xemacpsif);
+#endif
+
+	while (1) {
+		/* obtain processed BD's */
+		n_bds = XEmacPs_BdRingFromHwTx(txring,
+								XLWIP_CONFIG_N_TX_DESC, &txbdset);
+		if (n_bds == 0)  {
+			return;
+		}
+		/* free the processed BD's */
+		n_pbufs_freed = n_bds;
+		curbdpntr = txbdset;
+		while (n_pbufs_freed > 0) {
+			bdindex = XEMACPS_BD_TO_INDEX(txring, curbdpntr);
+			temp = (u32 *)curbdpntr;
+			*temp = 0;
+			temp++;
+			if (bdindex == (XLWIP_CONFIG_N_TX_DESC - 1)) {
+				*temp = 0xC0000000;
+			} else {
+				*temp = 0x80000000;
+			}
+			dsb();
+			p = (struct pbuf *)tx_pbufs_storage[index + bdindex];
+			if (p != NULL) {
+				pbuf_free(p);
+			}
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+			notifyinfo[tx_task_notifier_index + bdindex] = 0;
+#endif
+			tx_pbufs_storage[index + bdindex] = 0;
+			curbdpntr = XEmacPs_BdRingNext(txring, curbdpntr);
+			n_pbufs_freed--;
+			dsb();
+		}
+
+		status = XEmacPs_BdRingFree(txring, n_bds, txbdset);
+		if (status != XST_SUCCESS) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("Failure while freeing in Tx Done ISR\r\n"));
+		}
+	}
+	return;
+}
+
+void emacps_send_handler(void *arg)
+{
+	struct xemac_s *xemac;
+	xemacpsif_s   *xemacpsif;
+	XEmacPs_BdRing *txringptr;
+	u32_t regval;
+#if !NO_SYS
+	xInsideISR++;
+#endif
+	xemac = (struct xemac_s *)(arg);
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	txringptr = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
+	regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_TXSR_OFFSET);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,XEMACPS_TXSR_OFFSET, regval);
+
+	/* If Transmit done interrupt is asserted, process completed BD's */
+	process_sent_bds(xemacpsif, txringptr);
+#if !NO_SYS
+	xInsideISR--;
+#endif
+}
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p,
+					u32_t block_till_tx_complete, u32_t *to_block_index)
+#else
+XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
+#endif
+{
+	struct pbuf *q;
+	s32_t n_pbufs;
+	XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
+	XEmacPs_Bd *temp_txbd;
+	XStatus status;
+	XEmacPs_BdRing *txring;
+	u32_t bdindex = 0;
+	u32_t index;
+	u32_t max_fr_size;
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	u32_t tx_task_notifier_index;
+#endif
+
+	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));
+
+	index = get_base_index_txpbufsstorage (xemacpsif);
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+	tx_task_notifier_index = get_base_index_tasknotifyinfo (xemacpsif);
+#endif
+
+	/* first count the number of pbufs */
+	for (q = p, n_pbufs = 0; q != NULL; q = q->next)
+		n_pbufs++;
+
+	/* obtain as many BD's */
+	status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
+	if (status != XST_SUCCESS) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
+		return XST_FAILURE;
+	}
+
+	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
+		bdindex = XEMACPS_BD_TO_INDEX(txring, txbd);
+		if (tx_pbufs_storage[index + bdindex] != 0) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
+			return XST_FAILURE;
+		}
+
+		/* Send the data from the pbuf to the interface, one pbuf at a
+		   time. The size of the data in each pbuf is kept in the ->len
+		   variable. */
+		if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+			Xil_DCacheFlushRange((UINTPTR)q->payload, (UINTPTR)q->len);
+		}
+
+		XEmacPs_BdSetAddressTx(txbd, (UINTPTR)q->payload);
+
+#ifdef ZYNQMP_USE_JUMBO
+		max_fr_size = MAX_FRAME_SIZE_JUMBO - 18;
+#else
+		max_fr_size = XEMACPS_MAX_FRAME_SIZE - 18;
+#endif
+		if (q->len > max_fr_size)
+			XEmacPs_BdSetLength(txbd, max_fr_size & 0x3FFF);
+		else
+			XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);
+
+		tx_pbufs_storage[index + bdindex] = (UINTPTR)q;
+
+		pbuf_ref(q);
+		last_txbd = txbd;
+		XEmacPs_BdClearLast(txbd);
+		txbd = XEmacPs_BdRingNext(txring, txbd);
+	}
+#if LWIP_UDP_OPT_BLOCK_TX_TILL_COMPLETE
+    if (block_till_tx_complete == 1) {
+		notifyinfo[tx_task_notifier_index + bdindex] = 1;
+		*to_block_index = tx_task_notifier_index + bdindex;
+	}
+#endif
+	XEmacPs_BdSetLast(last_txbd);
+	/* For fragmented packets, remember the 1st BD allocated for the 1st
+	   packet fragment. The used bit for this BD should be cleared at the end
+	   after clearing out used bits for other fragments. For packets without
+	   just remember the allocated BD. */
+	temp_txbd = txbdset;
+	txbd = txbdset;
+	txbd = XEmacPs_BdRingNext(txring, txbd);
+	q = p->next;
+	for(; q != NULL; q = q->next) {
+		XEmacPs_BdClearTxUsed(txbd);
+		txbd = XEmacPs_BdRingNext(txring, txbd);
+	}
+	XEmacPs_BdClearTxUsed(temp_txbd);
+	dsb();
+
+	status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
+	if (status != XST_SUCCESS) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
+		return XST_FAILURE;
+	}
+	/* Start transmit */
+	XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
+	XEMACPS_NWCTRL_OFFSET,
+	(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
+	XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
+	return status;
+}
+
+void setup_rx_bds(xemacpsif_s *xemacpsif, XEmacPs_BdRing *rxring)
+{
+	XEmacPs_Bd *rxbd;
+	XStatus status;
+	struct pbuf *p;
+	u32_t freebds;
+	u32_t bdindex;
+	u32 *temp;
+	u32_t index;
+
+	index = get_base_index_rxpbufsstorage (xemacpsif);
+
+	freebds = XEmacPs_BdRingGetFreeCnt (rxring);
+	while (freebds > 0) {
+		freebds--;
+#ifdef ZYNQMP_USE_JUMBO
+		p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
+#else
+		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
+#endif
+		if (!p) {
+#if LINK_STATS
+			lwip_stats.link.memerr++;
+			lwip_stats.link.drop++;
+#endif
+			printf("unable to alloc pbuf in recv_handler\r\n");
+			return;
+		}
+		status = XEmacPs_BdRingAlloc(rxring, 1, &rxbd);
+		if (status != XST_SUCCESS) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("setup_rx_bds: Error allocating RxBD\r\n"));
+			pbuf_free(p);
+			return;
+		}
+		status = XEmacPs_BdRingToHw(rxring, 1, rxbd);
+		if (status != XST_SUCCESS) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("Error committing RxBD to hardware: "));
+			if (status == XST_DMA_SG_LIST_ERROR) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("XST_DMA_SG_LIST_ERROR: this function was called out of sequence with XEmacPs_BdRingAlloc()\r\n"));
+			}
+			else {
+				LWIP_DEBUGF(NETIF_DEBUG, ("set of BDs was rejected because the first BD did not have its start-of-packet bit set, or the last BD did not have its end-of-packet bit set, or any one of the BD set has 0 as length value\r\n"));
+			}
+
+			pbuf_free(p);
+			XEmacPs_BdRingUnAlloc(rxring, 1, rxbd);
+			return;
+		}
+#ifdef ZYNQMP_USE_JUMBO
+		if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+			Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
+		}
+#else
+		if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+			Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
+		}
+#endif
+		bdindex = XEMACPS_BD_TO_INDEX(rxring, rxbd);
+		temp = (u32 *)rxbd;
+		temp++;
+		/* Status field should be cleared first to avoid drops */
+		*temp = 0;
+		dsb();
+
+		/* Set high address when required */
+#ifdef __aarch64__
+		XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_HI_OFFSET,
+			(((UINTPTR)p->payload) & ULONG64_HI_MASK) >> 32U);
+#endif
+		/* Set address field; add WRAP bit on last descriptor  */
+		if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
+			XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_OFFSET, ((UINTPTR)p->payload | XEMACPS_RXBUF_WRAP_MASK));
+		} else {
+			XEmacPs_BdWrite(rxbd, XEMACPS_BD_ADDR_OFFSET, (UINTPTR)p->payload);
+		}
+
+		rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
+	}
+}
+
+void emacps_recv_handler(void *arg)
+{
+	struct pbuf *p;
+	XEmacPs_Bd *rxbdset, *curbdptr;
+	struct xemac_s *xemac;
+	xemacpsif_s *xemacpsif;
+	XEmacPs_BdRing *rxring;
+	volatile s32_t bd_processed;
+	s32_t rx_bytes, k;
+	u32_t bdindex;
+	u32_t regval;
+	u32_t index;
+	u32_t gigeversion;
+
+	xemac = (struct xemac_s *)(arg);
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps);
+
+#if !NO_SYS
+	xInsideISR++;
+#endif
+
+	gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
+	index = get_base_index_rxpbufsstorage (xemacpsif);
+	/*
+	 * If Reception done interrupt is asserted, call RX call back function
+	 * to handle the processed BDs and then raise the according flag.
+	 */
+	regval = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET);
+	XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXSR_OFFSET, regval);
+	if (gigeversion <= 2) {
+			resetrx_on_no_rxdata(xemacpsif);
+	}
+
+	while(1) {
+
+		bd_processed = XEmacPs_BdRingFromHwRx(rxring, XLWIP_CONFIG_N_RX_DESC, &rxbdset);
+		if (bd_processed <= 0) {
+			break;
+		}
+
+		for (k = 0, curbdptr=rxbdset; k < bd_processed; k++) {
+
+			bdindex = XEMACPS_BD_TO_INDEX(rxring, curbdptr);
+			p = (struct pbuf *)rx_pbufs_storage[index + bdindex];
+
+			/*
+			 * Adjust the buffer size to the actual number of bytes received.
+			 */
+#ifdef ZYNQMP_USE_JUMBO
+			rx_bytes = XEmacPs_GetRxFrameSize(&xemacpsif->emacps, curbdptr);
+#else
+			rx_bytes = XEmacPs_BdGetLength(curbdptr);
+#endif
+			pbuf_realloc(p, rx_bytes);
+
+			/* Invalidate RX frame before queuing to handle
+			 * L1 cache prefetch conditions on any architecture.
+			 */
+			if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+				Xil_DCacheInvalidateRange((UINTPTR)p->payload, rx_bytes);
+			}
+
+			/* store it in the receive queue,
+			 * where it'll be processed by a different handler
+			 */
+			if (pq_enqueue(xemacpsif->recv_q, (void*)p) < 0) {
+#if LINK_STATS
+				lwip_stats.link.memerr++;
+				lwip_stats.link.drop++;
+#endif
+				pbuf_free(p);
+			}
+			curbdptr = XEmacPs_BdRingNext( rxring, curbdptr);
+		}
+		/* free up the BD's */
+		XEmacPs_BdRingFree(rxring, bd_processed, rxbdset);
+		setup_rx_bds(xemacpsif, rxring);
+	}
+#if !NO_SYS
+	sys_sem_signal(&xemac->sem_rx_data_available);
+	xInsideISR--;
+#endif
+
+	return;
+}
+
+void clean_dma_txdescs(struct xemac_s *xemac)
+{
+	XEmacPs_Bd bdtemplate;
+	XEmacPs_BdRing *txringptr;
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+
+	txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
+
+	XEmacPs_BdClear(&bdtemplate);
+	XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
+
+	/*
+	 * Create the TxBD ring
+	 */
+	XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
+			(UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
+				 XLWIP_CONFIG_N_TX_DESC);
+	XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
+}
+
+XStatus init_dma(struct xemac_s *xemac)
+{
+	XEmacPs_Bd bdtemplate;
+	XEmacPs_BdRing *rxringptr, *txringptr;
+	XEmacPs_Bd *rxbd;
+	struct pbuf *p;
+	XStatus status;
+	s32_t i;
+	u32_t bdindex;
+	volatile UINTPTR tempaddress;
+	u32_t index;
+	u32_t gigeversion;
+	XEmacPs_Bd *bdtxterminate = NULL;
+	XEmacPs_Bd *bdrxterminate = NULL;
+	u32 *temp;
+
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+	struct xtopology_t *xtopologyp = &xtopology[xemac->topology_index];
+
+	index = get_base_index_rxpbufsstorage (xemacpsif);
+	gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
+	/*
+	 * The BDs need to be allocated in uncached memory. Hence the 1 MB
+	 * address range allocated for Bd_Space is made uncached
+	 * by setting appropriate attributes in the translation table.
+	 * The Bd_Space is aligned to 1MB and has a size of 1 MB. This ensures
+	 * a reserved uncached area used only for BDs.
+	 */
+	if (bd_space_attr_set == 0) {
+#if defined (ARMR5)
+	Xil_SetTlbAttributes((s32_t)bd_space, STRONG_ORDERD_SHARED | PRIV_RW_USER_RW); // addr, attr
+#else
+#if defined __aarch64__
+	Xil_SetTlbAttributes((u64)bd_space, NORM_NONCACHE | INNER_SHAREABLE);
+#else
+	Xil_SetTlbAttributes((s32_t)bd_space, DEVICE_MEMORY); // addr, attr
+#endif
+#endif
+		bd_space_attr_set = 1;
+	}
+
+	rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
+	txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
+	LWIP_DEBUGF(NETIF_DEBUG, ("rxringptr: 0x%08x\r\n", rxringptr));
+	LWIP_DEBUGF(NETIF_DEBUG, ("txringptr: 0x%08x\r\n", txringptr));
+
+	/* Allocate 64k for Rx and Tx bds each to take care of extreme cases */
+	tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
+	xemacpsif->rx_bdspace = (void *)tempaddress;
+	bd_space_index += 0x10000;
+	tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
+	xemacpsif->tx_bdspace = (void *)tempaddress;
+	bd_space_index += 0x10000;
+	if (gigeversion > 2) {
+		tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
+		bdrxterminate = (XEmacPs_Bd *)tempaddress;
+		bd_space_index += 0x10000;
+		tempaddress = (UINTPTR)&(bd_space[bd_space_index]);
+		bdtxterminate = (XEmacPs_Bd *)tempaddress;
+		bd_space_index += 0x10000;
+	}
+
+	LWIP_DEBUGF(NETIF_DEBUG, ("rx_bdspace: %p \r\n", xemacpsif->rx_bdspace));
+	LWIP_DEBUGF(NETIF_DEBUG, ("tx_bdspace: %p \r\n", xemacpsif->tx_bdspace));
+
+	if (!xemacpsif->rx_bdspace || !xemacpsif->tx_bdspace) {
+		xil_printf("%s@%d: Error: Unable to allocate memory for TX/RX buffer descriptors",
+				__FILE__, __LINE__);
+		return ERR_IF;
+	}
+
+	/*
+	 * Setup RxBD space.
+	 *
+	 * Setup a BD template for the Rx channel. This template will be copied to
+	 * every RxBD. We will not have to explicitly set these again.
+	 */
+	XEmacPs_BdClear(&bdtemplate);
+
+	/*
+	 * Create the RxBD ring
+	 */
+
+	status = XEmacPs_BdRingCreate(rxringptr, (UINTPTR) xemacpsif->rx_bdspace,
+				(UINTPTR) xemacpsif->rx_bdspace, BD_ALIGNMENT,
+				     XLWIP_CONFIG_N_RX_DESC);
+
+	if (status != XST_SUCCESS) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("Error setting up RxBD space\r\n"));
+		return ERR_IF;
+	}
+
+	status = XEmacPs_BdRingClone(rxringptr, &bdtemplate, XEMACPS_RECV);
+	if (status != XST_SUCCESS) {
+		LWIP_DEBUGF(NETIF_DEBUG, ("Error initializing RxBD space\r\n"));
+		return ERR_IF;
+	}
+
+	XEmacPs_BdClear(&bdtemplate);
+	XEmacPs_BdSetStatus(&bdtemplate, XEMACPS_TXBUF_USED_MASK);
+	/*
+	 * Create the TxBD ring
+	 */
+	status = XEmacPs_BdRingCreate(txringptr, (UINTPTR) xemacpsif->tx_bdspace,
+				(UINTPTR) xemacpsif->tx_bdspace, BD_ALIGNMENT,
+				     XLWIP_CONFIG_N_TX_DESC);
+
+	if (status != XST_SUCCESS) {
+		return ERR_IF;
+	}
+
+	/* We reuse the bd template, as the same one will work for both rx and tx. */
+	status = XEmacPs_BdRingClone(txringptr, &bdtemplate, XEMACPS_SEND);
+	if (status != XST_SUCCESS) {
+		return ERR_IF;
+	}
+
+	/*
+	 * Allocate RX descriptors, 1 RxBD at a time.
+	 */
+	for (i = 0; i < XLWIP_CONFIG_N_RX_DESC; i++) {
+#ifdef ZYNQMP_USE_JUMBO
+		p = pbuf_alloc(PBUF_RAW, MAX_FRAME_SIZE_JUMBO, PBUF_POOL);
+#else
+		p = pbuf_alloc(PBUF_RAW, XEMACPS_MAX_FRAME_SIZE, PBUF_POOL);
+#endif
+		if (!p) {
+#if LINK_STATS
+			lwip_stats.link.memerr++;
+			lwip_stats.link.drop++;
+#endif
+			printf("unable to alloc pbuf in init_dma\r\n");
+			return ERR_IF;
+		}
+		status = XEmacPs_BdRingAlloc(rxringptr, 1, &rxbd);
+		if (status != XST_SUCCESS) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("init_dma: Error allocating RxBD\r\n"));
+			pbuf_free(p);
+			return ERR_IF;
+		}
+		/* Enqueue to HW */
+		status = XEmacPs_BdRingToHw(rxringptr, 1, rxbd);
+		if (status != XST_SUCCESS) {
+			LWIP_DEBUGF(NETIF_DEBUG, ("Error: committing RxBD to HW\r\n"));
+			pbuf_free(p);
+			XEmacPs_BdRingUnAlloc(rxringptr, 1, rxbd);
+			return ERR_IF;
+		}
+
+		bdindex = XEMACPS_BD_TO_INDEX(rxringptr, rxbd);
+		temp = (u32 *)rxbd;
+		*temp = 0;
+		if (bdindex == (XLWIP_CONFIG_N_RX_DESC - 1)) {
+			*temp = 0x00000002;
+		}
+		temp++;
+		*temp = 0;
+		dsb();
+#ifdef ZYNQMP_USE_JUMBO
+		if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+			Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)MAX_FRAME_SIZE_JUMBO);
+		}
+#else
+		if (xemacpsif->emacps.Config.IsCacheCoherent == 0) {
+			Xil_DCacheInvalidateRange((UINTPTR)p->payload, (UINTPTR)XEMACPS_MAX_FRAME_SIZE);
+		}
+#endif
+		XEmacPs_BdSetAddressRx(rxbd, (UINTPTR)p->payload);
+
+		rx_pbufs_storage[index + bdindex] = (UINTPTR)p;
+	}
+	XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
+	if (gigeversion > 2) {
+		XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 1, XEMACPS_SEND);
+	}else {
+		XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, 0, XEMACPS_SEND);
+	}
+	if (gigeversion > 2)
+	{
+		/*
+		 * This version of GEM supports priority queuing and the current
+		 * driver is using tx priority queue 1 and normal rx queue for
+		 * packet transmit and receive. The below code ensure that the
+		 * other queue pointers are parked to known state for avoiding
+		 * the controller to malfunction by fetching the descriptors
+		 * from these queues.
+		 */
+		XEmacPs_BdClear(bdrxterminate);
+		XEmacPs_BdSetAddressRx(bdrxterminate, (XEMACPS_RXBUF_NEW_MASK |
+						XEMACPS_RXBUF_WRAP_MASK));
+		XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_RXQ1BASE_OFFSET),
+				   (UINTPTR)bdrxterminate);
+		XEmacPs_BdClear(bdtxterminate);
+		XEmacPs_BdSetStatus(bdtxterminate, (XEMACPS_TXBUF_USED_MASK |
+						XEMACPS_TXBUF_WRAP_MASK));
+		XEmacPs_Out32((xemacpsif->emacps.Config.BaseAddress + XEMACPS_TXQBASE_OFFSET),
+				   (UINTPTR)bdtxterminate);
+	}
+#if !NO_SYS
+	xPortInstallInterruptHandler(xtopologyp->scugic_emac_intr,
+						( Xil_InterruptHandler ) XEmacPs_IntrHandler,
+						(void *)&xemacpsif->emacps);
+#else
+	/*
+	 * Connect the device driver handler that will be called when an
+	 * interrupt for the device occurs, the handler defined above performs
+	 * the specific interrupt processing for the device.
+	 */
+	XScuGic_RegisterHandler(INTC_BASE_ADDR, xtopologyp->scugic_emac_intr,
+				(Xil_ExceptionHandler)XEmacPs_IntrHandler,
+						(void *)&xemacpsif->emacps);
+#endif
+	/*
+	 * Enable the interrupt for emacps.
+	 */
+	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, (u32) xtopologyp->scugic_emac_intr);
+	emac_intr_num = (u32) xtopologyp->scugic_emac_intr;
+	return 0;
+}
+
+/*
+ * resetrx_on_no_rxdata():
+ *
+ * It is called at regular intervals through the API xemacpsif_resetrx_on_no_rxdata
+ * called by the user.
+ * The EmacPs has a HW bug (SI# 692601) on the Rx path for heavy Rx traffic.
+ * Under heavy Rx traffic because of the HW bug there are times when the Rx path
+ * becomes unresponsive. The workaround for it is to check for the Rx path for
+ * traffic (by reading the stats registers regularly). If the stats register
+ * does not increment for sometime (proving no Rx traffic), the function resets
+ * the Rx data path.
+ *
+ */
+
+void resetrx_on_no_rxdata(xemacpsif_s *xemacpsif)
+{
+	u32_t regctrl;
+	u32_t tempcntr;
+	u32_t gigeversion;
+
+	gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
+	if (gigeversion == 2) {
+		tempcntr = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_RXCNT_OFFSET);
+		if ((!tempcntr) && (!(xemacpsif->last_rx_frms_cntr))) {
+			regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+			regctrl &= (~XEMACPS_NWCTRL_RXEN_MASK);
+			XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET, regctrl);
+			regctrl = XEmacPs_ReadReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET);
+			regctrl |= (XEMACPS_NWCTRL_RXEN_MASK);
+			XEmacPs_WriteReg(xemacpsif->emacps.Config.BaseAddress, XEMACPS_NWCTRL_OFFSET, regctrl);
+		}
+		xemacpsif->last_rx_frms_cntr = tempcntr;
+	}
+}
+
+void free_txrx_pbufs(xemacpsif_s *xemacpsif)
+{
+	s32_t index;
+	s32_t index1;
+	struct pbuf *p;
+
+	index1 = get_base_index_txpbufsstorage (xemacpsif);
+
+	for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
+		if (tx_pbufs_storage[index] != 0) {
+			p = (struct pbuf *)tx_pbufs_storage[index];
+			pbuf_free(p);
+			tx_pbufs_storage[index] = 0;
+		}
+	}
+
+	index1 = get_base_index_rxpbufsstorage(xemacpsif);
+	for (index = index1; index < (index1 + XLWIP_CONFIG_N_RX_DESC); index++) {
+		p = (struct pbuf *)rx_pbufs_storage[index];
+		pbuf_free(p);
+
+	}
+}
+
+void free_onlytx_pbufs(xemacpsif_s *xemacpsif)
+{
+	s32_t index;
+	s32_t index1;
+	struct pbuf *p;
+
+	index1 = get_base_index_txpbufsstorage (xemacpsif);
+	for (index = index1; index < (index1 + XLWIP_CONFIG_N_TX_DESC); index++) {
+		if (tx_pbufs_storage[index] != 0) {
+			p = (struct pbuf *)tx_pbufs_storage[index];
+			pbuf_free(p);
+			tx_pbufs_storage[index] = 0;
+		}
+	}
+}
+
+/* reset Tx and Rx DMA pointers after XEmacPs_Stop */
+void reset_dma(struct xemac_s *xemac)
+{
+	u8 txqueuenum;
+	u32_t gigeversion;
+	xemacpsif_s *xemacpsif = (xemacpsif_s *)(xemac->state);
+	XEmacPs_BdRing *txringptr = &XEmacPs_GetTxRing(&xemacpsif->emacps);
+	XEmacPs_BdRing *rxringptr = &XEmacPs_GetRxRing(&xemacpsif->emacps);
+
+	XEmacPs_BdRingPtrReset(txringptr, xemacpsif->tx_bdspace);
+	XEmacPs_BdRingPtrReset(rxringptr, xemacpsif->rx_bdspace);
+
+	gigeversion = ((Xil_In32(xemacpsif->emacps.Config.BaseAddress + 0xFC)) >> 16) & 0xFFF;
+	if (gigeversion > 2) {
+		txqueuenum = 1;
+	} else {
+		txqueuenum = 0;
+	}
+
+	XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.RxBdRing.BaseBdAddr, 0, XEMACPS_RECV);
+	XEmacPs_SetQueuePtr(&(xemacpsif->emacps), xemacpsif->emacps.TxBdRing.BaseBdAddr, txqueuenum, XEMACPS_SEND);
+}
+
+void emac_disable_intr(void)
+{
+	XScuGic_DisableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
+}
+
+void emac_enable_intr(void)
+{
+	XScuGic_EnableIntr(INTC_DIST_BASE_ADDR, emac_intr_num);
+}
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_hw.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_hw.c
new file mode 100644
index 0000000..a1fdeda
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_hw.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2010 - 2021 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include "netif/xemacpsif.h"
+#include "lwipopts.h"
+
+#if XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT == 1 || \
+	XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT == 1
+#define PCM_PMA_CORE_PRESENT
+#else
+#undef PCM_PMA_CORE_PRESENT
+#endif
+
+u32_t link_speed = 100;
+extern XEmacPs_Config XEmacPs_ConfigTable[];
+extern u32_t phymapemac0[32];
+extern u32_t phymapemac1[32];
+extern u32_t phyaddrforemac;
+extern enum ethernet_link_status eth_link_status;
+
+#if !NO_SYS
+extern long xInsideISR;
+#endif
+
+XEmacPs_Config *xemacps_lookup_config(unsigned mac_base)
+{
+	XEmacPs_Config *cfgptr = NULL;
+	s32_t i;
+
+	for (i = 0; i < XPAR_XEMACPS_NUM_INSTANCES; i++) {
+		if (XEmacPs_ConfigTable[i].BaseAddress == mac_base) {
+			cfgptr = &XEmacPs_ConfigTable[i];
+			break;
+		}
+	}
+
+	return (cfgptr);
+}
+
+void init_emacps(xemacpsif_s *xemacps, struct netif *netif)
+{
+	XEmacPs *xemacpsp;
+	s32_t status = XST_SUCCESS;
+	u32_t i;
+	u32_t phyfoundforemac0 = FALSE;
+	u32_t phyfoundforemac1 = FALSE;
+
+	xemacpsp = &xemacps->emacps;
+
+#ifdef ZYNQMP_USE_JUMBO
+	XEmacPs_SetOptions(xemacpsp, XEMACPS_JUMBO_ENABLE_OPTION);
+#endif
+
+#ifdef LWIP_IGMP
+	XEmacPs_SetOptions(xemacpsp, XEMACPS_MULTICAST_OPTION);
+#endif
+
+	/* set mac address */
+	status = XEmacPs_SetMacAddress(xemacpsp, (void*)(netif->hwaddr), 1);
+	if (status != XST_SUCCESS) {
+		xil_printf("In %s:Emac Mac Address set failed...\r\n",__func__);
+	}
+
+	XEmacPs_SetMdioDivisor(xemacpsp, MDC_DIV_224);
+
+/*  Please refer to file header comments for the file xemacpsif_physpeed.c
+ *  to know more about the PHY programming sequence.
+ *  For PCS PMA core, phy_setup_emacps is called with the predefined PHY address
+ *  exposed through xaparemeters.h
+ *  For RGMII case, assuming multiple PHYs can be present on the MDIO bus,
+ *  detect_phy is called to get the addresses of the PHY present on
+ *  a particular MDIO bus (emac0 or emac1). This address map is populated
+ *  in phymapemac0 or phymapemac1.
+ *  phy_setup_emacps is then called for each PHY present on the MDIO bus.
+ */
+#ifdef PCM_PMA_CORE_PRESENT
+#ifdef  XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT
+	link_speed = phy_setup_emacps(xemacpsp, XPAR_PCSPMA_1000BASEX_PHYADDR);
+#elif XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT
+	link_speed = phy_setup_emacps(xemacpsp, XPAR_PCSPMA_SGMII_PHYADDR);
+#endif
+#else
+	detect_phy(xemacpsp);
+	for (i = 31; i > 0; i--) {
+		if (xemacpsp->Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
+			if (phymapemac0[i] == TRUE) {
+				link_speed = phy_setup_emacps(xemacpsp, i);
+				phyfoundforemac0 = TRUE;
+				phyaddrforemac = i;
+			}
+		} else {
+			if (phymapemac1[i] == TRUE) {
+				link_speed = phy_setup_emacps(xemacpsp, i);
+				phyfoundforemac1 = TRUE;
+				phyaddrforemac = i;
+			}
+		}
+	}
+	/* If no PHY was detected, use broadcast PHY address of 0 */
+	if (xemacpsp->Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR) {
+		if (phyfoundforemac0 == FALSE)
+			link_speed = phy_setup_emacps(xemacpsp, 0);
+	} else {
+		if (phyfoundforemac1 == FALSE)
+			link_speed = phy_setup_emacps(xemacpsp, 0);
+	}
+#endif
+
+	if (link_speed == XST_FAILURE) {
+		eth_link_status = ETH_LINK_DOWN;
+		xil_printf("Phy setup failure %s \n\r",__func__);
+		return;
+	} else {
+		eth_link_status = ETH_LINK_UP;
+	}
+
+	XEmacPs_SetOperatingSpeed(xemacpsp, link_speed);
+	/* Setting the operating speed of the MAC needs a delay. */
+	{
+		volatile s32_t wait;
+		for (wait=0; wait < 20000; wait++);
+	}
+}
+
+void init_emacps_on_error (xemacpsif_s *xemacps, struct netif *netif)
+{
+	XEmacPs *xemacpsp;
+	s32_t status = XST_SUCCESS;
+
+	xemacpsp = &xemacps->emacps;
+
+	/* set mac address */
+	status = XEmacPs_SetMacAddress(xemacpsp, (void*)(netif->hwaddr), 1);
+	if (status != XST_SUCCESS) {
+		xil_printf("In %s:Emac Mac Address set failed...\r\n",__func__);
+	}
+
+	XEmacPs_SetOperatingSpeed(xemacpsp, link_speed);
+
+	/* Setting the operating speed of the MAC needs a delay. */
+	{
+		volatile s32_t wait;
+		for (wait=0; wait < 20000; wait++);
+	}
+}
+
+void setup_isr (struct xemac_s *xemac)
+{
+	xemacpsif_s   *xemacpsif;
+
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	/*
+	 * Setup callbacks
+	 */
+	XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_DMASEND,
+				     (void *) emacps_send_handler,
+				     (void *) xemac);
+
+	XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_DMARECV,
+				    (void *) emacps_recv_handler,
+				    (void *) xemac);
+
+	XEmacPs_SetHandler(&xemacpsif->emacps, XEMACPS_HANDLER_ERROR,
+				    (void *) emacps_error_handler,
+				    (void *) xemac);
+}
+
+void start_emacps (xemacpsif_s *xemacps)
+{
+	/* start the temac */
+	XEmacPs_Start(&xemacps->emacps);
+}
+
+void restart_emacps_transmitter (xemacpsif_s *xemacps) {
+	u32_t Reg;
+	Reg = XEmacPs_ReadReg(xemacps->emacps.Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+	Reg = Reg & (~XEMACPS_NWCTRL_TXEN_MASK);
+	XEmacPs_WriteReg(xemacps->emacps.Config.BaseAddress,
+										XEMACPS_NWCTRL_OFFSET, Reg);
+
+	Reg = XEmacPs_ReadReg(xemacps->emacps.Config.BaseAddress,
+						XEMACPS_NWCTRL_OFFSET);
+	Reg = Reg | (XEMACPS_NWCTRL_TXEN_MASK);
+	XEmacPs_WriteReg(xemacps->emacps.Config.BaseAddress,
+										XEMACPS_NWCTRL_OFFSET, Reg);
+}
+
+void emacps_error_handler(void *arg,u8 Direction, u32 ErrorWord)
+{
+	struct xemac_s *xemac;
+	xemacpsif_s   *xemacpsif;
+	XEmacPs_BdRing *rxring;
+	XEmacPs_BdRing *txring;
+#if !NO_SYS
+	xInsideISR++;
+#endif
+
+	xemac = (struct xemac_s *)(arg);
+	xemacpsif = (xemacpsif_s *)(xemac->state);
+	rxring = &XEmacPs_GetRxRing(&xemacpsif->emacps);
+	txring = &XEmacPs_GetTxRing(&xemacpsif->emacps);
+
+	if (ErrorWord != 0) {
+		switch (Direction) {
+			case XEMACPS_RECV:
+			if (ErrorWord & XEMACPS_RXSR_HRESPNOK_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Receive DMA error\r\n"));
+				HandleEmacPsError(xemac);
+			}
+			if (ErrorWord & XEMACPS_RXSR_RXOVR_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Receive over run\r\n"));
+				emacps_recv_handler(arg);
+				setup_rx_bds(xemacpsif, rxring);
+			}
+			if (ErrorWord & XEMACPS_RXSR_BUFFNA_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Receive buffer not available\r\n"));
+				emacps_recv_handler(arg);
+				setup_rx_bds(xemacpsif, rxring);
+			}
+			break;
+			case XEMACPS_SEND:
+			if (ErrorWord & XEMACPS_TXSR_HRESPNOK_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Transmit DMA error\r\n"));
+				HandleEmacPsError(xemac);
+			}
+			if (ErrorWord & XEMACPS_TXSR_URUN_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Transmit under run\r\n"));
+				HandleTxErrors(xemac);
+			}
+			if (ErrorWord & XEMACPS_TXSR_BUFEXH_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Transmit buffer exhausted\r\n"));
+				HandleTxErrors(xemac);
+			}
+			if (ErrorWord & XEMACPS_TXSR_RXOVR_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Transmit retry excessed limits\r\n"));
+				HandleTxErrors(xemac);
+			}
+			if (ErrorWord & XEMACPS_TXSR_FRAMERX_MASK) {
+				LWIP_DEBUGF(NETIF_DEBUG, ("Transmit collision\r\n"));
+				process_sent_bds(xemacpsif, txring);
+			}
+			break;
+		}
+	}
+#if !NO_SYS
+	xInsideISR--;
+#endif
+}
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_physpeed.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_physpeed.c
new file mode 100644
index 0000000..069020a
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xemacpsif_physpeed.c
@@ -0,0 +1,1037 @@
+/*
+ * Copyright (C) 2010 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+/*****************************************************************************
+* This file xemacpsif_physpeed.c implements functionalities to:
+* - Detect the available PHYs connected to a MAC
+* - Negotiate speed
+* - Configure speed
+* - Configure the SLCR registers for the negotiated speed
+*
+* In a typical use case, users of the APIs implemented in this file need to
+* do the following.
+* - Call the API detect_phy. It probes for the available PHYs connected to a MAC.
+*   The MACs can be Emac0 (XPAR_XEMACPS_0_BASEADDR, 0xE000B000) or Emac1
+*   (XPAR_XEMACPS_0_BASEADDR, 0xE000C000). It populates an array to notify
+*   about the detected PHYs. The array phymapemac0 is used for Emac0 and
+*   phymapemac1 is for Emac1.
+* - The users need to parse the corresponding arrays, phymapemac0 or phymapemac1
+*   to know the available PHYs for a MAC. The users then need to call
+*   phy_setup_emacps to setup the PHYs for proper speed setting. The API
+*   phy_setup_emacps should be called with the PHY address for which the speed
+*   needs to be negotiated or configured. In a specific use case, if 2 PHYs are
+*   connected to Emac0 with addresses of 7 and 11, then users get these address
+*   details from phymapemac0 (after calling detect_phy) and then call
+*   phy_setup_emacps twice, with ab address of 7 and 11.
+* - Points to note: The MAC can operate at only one speed. If a MAC is connected
+*   to multiple PHYs, then all PHYs must negotiate and configured for the same
+*   speed.
+* - This file implements static functions to set proper SLCR clocks. As stated
+*   above, all PHYs connected to a PHY must operate at same speed and the SLCR
+*   clock will be setup accordingly.
+*
+* This file implements the following PHY types.
+* - The standard RGMII.
+* - It provides support for GMII to RGMII converter Xilinx IP. This Xilinx IP
+*   sits on the MDIO bus with a predefined PHY address. This IP exposes register
+*   that needs to be programmed with the negotiated speed.
+*   For example, in a typical design, the Emac0 or Emac1 exposes GMII interface.
+*   The user can then use the Xilinx IP that converts GMII to RGMII.
+*   The external PHY (most typically Marvell 88E1116R) negotiates for speed
+*   with the remote PHY. The implementation in this file then programs the
+*   Xilinx IP with this negotiated speed. The Xilinx IP has a predefined IP
+*   address exposed through xparameters.h
+* - The SGMII and 1000 BaseX PHY interfaces.
+*   If the PHY interface is SGMII or 1000 BaseX a separate "get_IEEE_phy_speed"
+*   is used which is different from standard RGMII "get_IEEE_phy_speed".
+*   The 1000 BaseX always operates at 1000 Mbps. The SGMII interface can
+*   negotiate speed accordingly.
+*   For SGMII or 1000 BaseX interfaces, the detect_phy should not be called.
+*   The phy addresses for these interfaces are fixed at the design time.
+*
+* Point to note:
+* A MAC can not be connected to PHYs where there is a mix between
+* SGMII or 1000 Basex or GMII/MII/RGMII.
+* In a typical multiple PHY designs, it is expected that the PHYs connected
+* will be RGMII or GMII.
+*
+* The users can choose not to negotiate speed from lwip settings GUI.
+* If they opt to choose a particular PHY speed, then the PHY will hard code
+* the speed to operate only at the corresponding speed. It will not advertise
+* any other speeds. It is users responsibility to ensure that the remote PHY
+* supports the speed programmed through the lwip gui.
+*
+* The following combination of MDIO/PHY are supported:
+* - Multiple PHYs connected to the MDIO bus of a MAC. If Emac0 MDIO is connected
+*   to single/multiple PHYs, it is supported. Similarly Emac1 MDIO connected to
+*   single/multiple PHYs is supported.
+* - A design where both the interfaces are present and are connected to their own
+*   MDIO bus is supported.
+*
+* The following MDIO/PHY setup is not supported:
+* - A design has both the MACs present. MDIO bus is available only for one MAC
+*   (Emac0 or Emac1). This MDIO bus has multiple PHYs available for both the
+*   MACs. The negotiated speed for PHYs sitting on the MDIO bus of one MAC will
+*   not be see for the other MAC and hence the speed/SLCR settings of the other
+*   MAC cannot be programmed. Hence this kind of design will not work for
+*   this implementation.
+*
+********************************************************************************/
+
+#include "netif/xemacpsif.h"
+#include "lwipopts.h"
+#include "xparameters_ps.h"
+#include "xparameters.h"
+#include "xemac_ieee_reg.h"
+
+#if defined (__aarch64__)
+#include "bspconfig.h"
+#include "xil_smc.h"
+#endif
+
+#define PHY_DETECT_REG  						1
+#define PHY_IDENTIFIER_1_REG					2
+#define PHY_IDENTIFIER_2_REG					3
+#define PHY_DETECT_MASK 					0x1808
+#define PHY_MARVELL_IDENTIFIER				0x0141
+#define PHY_TI_IDENTIFIER					0x2000
+#define PHY_REALTEK_IDENTIFIER				0x001c
+#define PHY_XILINX_PCS_PMA_ID1			0x0174
+#define PHY_XILINX_PCS_PMA_ID2			0x0C00
+
+#define XEMACPS_GMII2RGMII_SPEED1000_FD		0x140
+#define XEMACPS_GMII2RGMII_SPEED100_FD		0x2100
+#define XEMACPS_GMII2RGMII_SPEED10_FD		0x100
+#define XEMACPS_GMII2RGMII_REG_NUM			0x10
+
+#define PHY_REGCR		0x0D
+#define PHY_ADDAR		0x0E
+#define PHY_RGMIIDCTL	0x86
+#define PHY_RGMIICTL	0x32
+#define PHY_STS			0x11
+#define PHY_TI_CR		0x10
+#define PHY_TI_CFG4		0x31
+
+#define PHY_REGCR_ADDR	0x001F
+#define PHY_REGCR_DATA	0x401F
+#define PHY_TI_CRVAL	0x5048
+#define PHY_TI_CFG4RESVDBIT7	0x80
+
+/* Frequency setting */
+#define SLCR_LOCK_ADDR			(XPS_SYS_CTRL_BASEADDR + 0x4)
+#define SLCR_UNLOCK_ADDR		(XPS_SYS_CTRL_BASEADDR + 0x8)
+#define SLCR_GEM0_CLK_CTRL_ADDR	(XPS_SYS_CTRL_BASEADDR + 0x140)
+#define SLCR_GEM1_CLK_CTRL_ADDR	(XPS_SYS_CTRL_BASEADDR + 0x144)
+#define SLCR_GEM_SRCSEL_EMIO	0x40
+#define SLCR_LOCK_KEY_VALUE 	0x767B
+#define SLCR_UNLOCK_KEY_VALUE	0xDF0D
+#define SLCR_ADDR_GEM_RST_CTRL	(XPS_SYS_CTRL_BASEADDR + 0x214)
+#define EMACPS_SLCR_DIV_MASK	0xFC0FC0FF
+
+#if XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT == 1 || \
+	XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT == 1
+#define PCM_PMA_CORE_PRESENT
+#else
+#undef PCM_PMA_CORE_PRESENT
+#endif
+
+#ifdef PCM_PMA_CORE_PRESENT
+#define IEEE_CTRL_RESET                         0x9140
+#define IEEE_CTRL_ISOLATE_DISABLE               0xFBFF
+#endif
+
+u32_t phymapemac0[32];
+u32_t phymapemac1[32];
+
+#if defined (PCM_PMA_CORE_PRESENT) || defined (CONFIG_LINKSPEED_AUTODETECT)
+static u32_t get_IEEE_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr);
+#endif
+static void SetUpSLCRDivisors(UINTPTR mac_baseaddr, s32_t speed);
+#if defined (CONFIG_LINKSPEED1000) || defined (CONFIG_LINKSPEED100) \
+	|| defined (CONFIG_LINKSPEED10)
+static u32_t configure_IEEE_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr, u32_t speed);
+#endif
+
+#ifdef PCM_PMA_CORE_PRESENT
+u32_t phy_setup_emacps (XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u32_t link_speed;
+	u16_t regval;
+	u16_t phy_id;
+
+	if(phy_addr == 0) {
+		for (phy_addr = 31; phy_addr > 0; phy_addr--) {
+			XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_IDENTIFIER_1_REG,
+					&phy_id);
+
+			if (phy_id == PHY_XILINX_PCS_PMA_ID1) {
+				XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_IDENTIFIER_2_REG,
+						&phy_id);
+				if (phy_id == PHY_XILINX_PCS_PMA_ID2) {
+					/* Found a valid PHY address */
+					LWIP_DEBUGF(NETIF_DEBUG, ("XEmacPs detect_phy: PHY detected at address %d.\r\n",
+							phy_addr));
+					break;
+				}
+			}
+		}
+	}
+
+	link_speed = get_IEEE_phy_speed(xemacpsp, phy_addr);
+	if (link_speed == 1000)
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,1000);
+	else if (link_speed == 100)
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,100);
+	else
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,10);
+
+	xil_printf("link speed for phy address %d: %d\r\n", phy_addr, link_speed);
+	return link_speed;
+}
+
+static u32_t get_IEEE_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u16_t temp;
+	u16_t control;
+	u16_t status;
+	u16_t partner_capabilities;
+
+	xil_printf("Start PHY autonegotiation \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
+	control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
+	control &= IEEE_CTRL_ISOLATE_DISABLE;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
+		sleep(1);
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET,
+																&status);
+	}
+	xil_printf("autonegotiation complete \r\n");
+
+#if XPAR_GIGE_PCS_PMA_1000BASEX_CORE_PRESENT == 1
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 1);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
+	if ((temp & 0x0020) == 0x0020) {
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
+		return 1000;
+	}
+	else {
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
+		xil_printf("Link error, temp = %x\r\n", temp);
+		return 0;
+	}
+#elif XPAR_GIGE_PCS_PMA_SGMII_CORE_PRESENT == 1
+	xil_printf("Waiting for Link to be up; Polling for SGMII core Reg \r\n");
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
+	while(!(temp & 0x8000)) {
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_PARTNER_ABILITIES_1_REG_OFFSET, &temp);
+	}
+	if((temp & 0x0C00) == 0x0800) {
+		return 1000;
+	}
+	else if((temp & 0x0C00) == 0x0400) {
+		return 100;
+	}
+	else if((temp & 0x0C00) == 0x0000) {
+		return 10;
+	} else {
+		xil_printf("get_IEEE_phy_speed(): Invalid speed bit value, Defaulting to Speed = 10 Mbps\r\n");
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &temp);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, 0x0100);
+		return 10;
+	}
+#endif
+
+}
+
+#else /*PCM_PMA_CORE_PRESENT not defined, GMII/RGMII case*/
+void detect_phy(XEmacPs *xemacpsp)
+{
+	u16_t phy_reg;
+	u32_t phy_addr;
+	u32_t emacnum;
+
+	if (xemacpsp->Config.BaseAddress == XPAR_XEMACPS_0_BASEADDR)
+		emacnum = 0;
+	else
+		emacnum = 1;
+	for (phy_addr = 31; phy_addr > 0; phy_addr--) {
+		XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_DETECT_REG,
+							&phy_reg);
+
+		if ((phy_reg != 0xFFFF) &&
+			((phy_reg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
+			/* Found a valid PHY address */
+			LWIP_DEBUGF(NETIF_DEBUG, ("XEmacPs detect_phy: PHY detected at address %d.\r\n",
+																	phy_addr));
+			if (emacnum == 0)
+				phymapemac0[phy_addr] = TRUE;
+			else
+				phymapemac1[phy_addr] = TRUE;
+
+			XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_IDENTIFIER_1_REG,
+							&phy_reg);
+			if ((phy_reg != PHY_MARVELL_IDENTIFIER) &&
+				(phy_reg != PHY_TI_IDENTIFIER) &&
+				(phy_reg != PHY_REALTEK_IDENTIFIER)) {
+				xil_printf("WARNING: Not a Marvell or TI or Realtek Ethernet PHY. Please verify the initialization sequence\r\n");
+			}
+		}
+	}
+}
+
+u32_t phy_setup_emacps (XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u32_t link_speed;
+	u32_t conv_present = 0;
+	u32_t convspeeddupsetting = 0;
+	u32_t convphyaddr = 0;
+
+#ifdef XPAR_GMII2RGMIICON_0N_ETH0_ADDR
+	convphyaddr = XPAR_GMII2RGMIICON_0N_ETH0_ADDR;
+	conv_present = 1;
+#endif
+#ifdef XPAR_GMII2RGMIICON_0N_ETH1_ADDR
+	convphyaddr = XPAR_GMII2RGMIICON_0N_ETH1_ADDR;
+	conv_present = 1;
+#endif
+
+#ifdef  CONFIG_LINKSPEED_AUTODETECT
+	link_speed = get_IEEE_phy_speed(xemacpsp, phy_addr);
+	if (link_speed == 1000) {
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,1000);
+		convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED1000_FD;
+	} else if (link_speed == 100) {
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,100);
+		convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED100_FD;
+	} else if (link_speed != XST_FAILURE){
+		SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,10);
+		convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED10_FD;
+	} else {
+		xil_printf("Phy setup error \r\n");
+		return XST_FAILURE;
+	}
+#elif	defined(CONFIG_LINKSPEED1000)
+	SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,1000);
+	link_speed = 1000;
+	configure_IEEE_phy_speed(xemacpsp, phy_addr, link_speed);
+	convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED1000_FD;
+	sleep(1);
+#elif	defined(CONFIG_LINKSPEED100)
+	SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,100);
+	link_speed = 100;
+	configure_IEEE_phy_speed(xemacpsp, phy_addr, link_speed);
+	convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED100_FD;
+	sleep(1);
+#elif	defined(CONFIG_LINKSPEED10)
+	SetUpSLCRDivisors(xemacpsp->Config.BaseAddress,10);
+	link_speed = 10;
+	configure_IEEE_phy_speed(xemacpsp, phy_addr, link_speed);
+	convspeeddupsetting = XEMACPS_GMII2RGMII_SPEED10_FD;
+	sleep(1);
+#endif
+	if (conv_present) {
+		XEmacPs_PhyWrite(xemacpsp, convphyaddr,
+		XEMACPS_GMII2RGMII_REG_NUM, convspeeddupsetting);
+	}
+
+	xil_printf("link speed for phy address %d: %d\r\n", phy_addr, link_speed);
+	return link_speed;
+}
+
+#if defined CONFIG_LINKSPEED_AUTODETECT
+static u32_t get_TI_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u16_t control;
+	u16_t status;
+	u16_t status_speed;
+	u32_t timeout_counter = 0;
+	u32_t phyregtemp;
+	u32_t RetStatus;
+
+	xil_printf("Start PHY autonegotiation \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, 0x1F, (u16_t *)&phyregtemp);
+	phyregtemp |= 0x4000;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, 0x1F, phyregtemp);
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, 0x1F, (u16_t *)&phyregtemp);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error during sw reset \n\r");
+		return XST_FAILURE;
+	}
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, 0, (u16_t *)&phyregtemp);
+	phyregtemp |= 0x8000;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, 0, phyregtemp);
+
+	/*
+	 * Delay
+	 */
+	sleep(1);
+
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, 0, (u16_t *)&phyregtemp);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error during reset \n\r");
+		return XST_FAILURE;
+	}
+
+	/* FIFO depth */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_TI_CR, PHY_TI_CRVAL);
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_TI_CR, (u16_t *)&phyregtemp);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error writing to 0x10 \n\r");
+		return XST_FAILURE;
+	}
+
+	/* TX/RX tuning */
+	/* Write to PHY_RGMIIDCTL */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_RGMIIDCTL);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, 0xA8);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error in tuning");
+		return XST_FAILURE;
+	}
+
+	/* Read PHY_RGMIIDCTL */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_RGMIIDCTL);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_ADDAR, (u16_t *)&phyregtemp);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error in tuning");
+		return XST_FAILURE;
+	}
+
+	/* Write PHY_RGMIICTL */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_RGMIICTL);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, 0xD3);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error in tuning");
+		return XST_FAILURE;
+	}
+
+	/* Read PHY_RGMIICTL */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_RGMIICTL);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_ADDAR, (u16_t *)&phyregtemp);
+	if (RetStatus != XST_SUCCESS) {
+		xil_printf("Error in tuning");
+		return XST_FAILURE;
+	}
+
+	/* SW workaround for unstable link when RX_CTRL is not STRAP MODE 3 or 4 */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_TI_CFG4);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_ADDAR, (u16_t *)&phyregtemp);
+	phyregtemp &= ~(PHY_TI_CFG4RESVDBIT7);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_ADDR);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, PHY_TI_CFG4);
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_REGCR, PHY_REGCR_DATA);
+	RetStatus = XEmacPs_PhyWrite(xemacpsp, phy_addr, PHY_ADDAR, phyregtemp);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
+	control |= IEEE_ASYMMETRIC_PAUSE_MASK;
+	control |= IEEE_PAUSE_MASK;
+	control |= ADVERTISE_100;
+	control |= ADVERTISE_10;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					&control);
+	control |= ADVERTISE_1000;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
+	control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
+
+	while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
+		sleep(1);
+		timeout_counter++;
+
+		if (timeout_counter == 30) {
+			xil_printf("Auto negotiation error \r\n");
+			return XST_FAILURE;
+		}
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	}
+	xil_printf("autonegotiation complete \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_STS, &status_speed);
+	if ((status_speed & 0xC000) == 0x8000) {
+		return 1000;
+	} else if ((status_speed & 0xC000) == 0x4000) {
+		return 100;
+	} else {
+		return 10;
+	}
+
+	return XST_SUCCESS;
+}
+
+static u32_t get_Marvell_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u16_t temp;
+	u16_t control;
+	u16_t status;
+	u16_t status_speed;
+	u32_t timeout_counter = 0;
+	u32_t temp_speed;
+
+	xil_printf("Start PHY autonegotiation \r\n");
+
+	XEmacPs_PhyWrite(xemacpsp,phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 2);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_MAC, &control);
+	control |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_MAC, control);
+
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
+	control |= IEEE_ASYMMETRIC_PAUSE_MASK;
+	control |= IEEE_PAUSE_MASK;
+	control |= ADVERTISE_100;
+	control |= ADVERTISE_10;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					&control);
+	control |= ADVERTISE_1000;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					control);
+
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
+																&control);
+	control |= (7 << 12);	/* max number of gigabit attempts */
+	control |= (1 << 11);	/* enable downshift */
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_COPPER_SPECIFIC_CONTROL_REG,
+																control);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
+	control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_RESET_MASK;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	while (1) {
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+		if (control & IEEE_CTRL_RESET_MASK)
+			continue;
+		else
+			break;
+	}
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
+
+	while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
+		sleep(1);
+		XEmacPs_PhyRead(xemacpsp, phy_addr,
+						IEEE_COPPER_SPECIFIC_STATUS_REG_2,  &temp);
+		timeout_counter++;
+
+		if (timeout_counter == 30) {
+			xil_printf("Auto negotiation error \r\n");
+			return XST_FAILURE;
+		}
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	}
+	xil_printf("autonegotiation complete \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr,IEEE_SPECIFIC_STATUS_REG,
+					&status_speed);
+	if (status_speed & 0x400) {
+		temp_speed = status_speed & IEEE_SPEED_MASK;
+
+		if (temp_speed == IEEE_SPEED_1000)
+			return 1000;
+		else if(temp_speed == IEEE_SPEED_100)
+			return 100;
+		else
+			return 10;
+	}
+
+	return XST_SUCCESS;
+}
+
+static u32_t get_Realtek_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u16_t control;
+	u16_t status;
+	u16_t status_speed;
+	u32_t timeout_counter = 0;
+	u32_t temp_speed;
+
+	xil_printf("Start PHY autonegotiation \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &control);
+	control |= IEEE_ASYMMETRIC_PAUSE_MASK;
+	control |= IEEE_PAUSE_MASK;
+	control |= ADVERTISE_100;
+	control |= ADVERTISE_10;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					&control);
+	control |= ADVERTISE_1000;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET,
+					control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_AUTONEGOTIATE_ENABLE;
+	control |= IEEE_STAT_AUTONEGOTIATE_RESTART;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control |= IEEE_CTRL_RESET_MASK;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, control);
+
+	while (1) {
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+		if (control & IEEE_CTRL_RESET_MASK)
+			continue;
+		else
+			break;
+	}
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+
+	xil_printf("Waiting for PHY to complete autonegotiation.\r\n");
+
+	while ( !(status & IEEE_STAT_AUTONEGOTIATE_COMPLETE) ) {
+		sleep(1);
+		timeout_counter++;
+
+		if (timeout_counter == 30) {
+			xil_printf("Auto negotiation error \r\n");
+			return XST_FAILURE;
+		}
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_STATUS_REG_OFFSET, &status);
+	}
+	xil_printf("autonegotiation complete \r\n");
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr,IEEE_SPECIFIC_STATUS_REG,
+					&status_speed);
+	if (status_speed & 0x400) {
+		temp_speed = status_speed & IEEE_SPEED_MASK;
+
+		if (temp_speed == IEEE_SPEED_1000)
+			return 1000;
+		else if(temp_speed == IEEE_SPEED_100)
+			return 100;
+		else
+			return 10;
+	}
+
+	return XST_FAILURE;
+}
+
+static u32_t get_IEEE_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr)
+{
+	u16_t phy_identity;
+	u32_t RetStatus;
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, PHY_IDENTIFIER_1_REG,
+					&phy_identity);
+	if (phy_identity == PHY_TI_IDENTIFIER) {
+		RetStatus = get_TI_phy_speed(xemacpsp, phy_addr);
+	} else if (phy_identity == PHY_REALTEK_IDENTIFIER) {
+		RetStatus = get_Realtek_phy_speed(xemacpsp, phy_addr);
+	} else {
+		RetStatus = get_Marvell_phy_speed(xemacpsp, phy_addr);
+	}
+
+	return RetStatus;
+}
+#endif
+
+#if defined (CONFIG_LINKSPEED1000) || defined (CONFIG_LINKSPEED100) \
+	|| defined (CONFIG_LINKSPEED10)
+static u32_t configure_IEEE_phy_speed(XEmacPs *xemacpsp, u32_t phy_addr, u32_t speed)
+{
+	u16_t control;
+	u16_t autonereg;
+
+	XEmacPs_PhyWrite(xemacpsp,phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 2);
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_MAC, &control);
+	control |= IEEE_RGMII_TXRX_CLOCK_DELAYED_MASK;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_MAC, control);
+
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_PAGE_ADDRESS_REGISTER, 0);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+	autonereg |= IEEE_ASYMMETRIC_PAUSE_MASK;
+	autonereg |= IEEE_PAUSE_MASK;
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+
+	XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET, &control);
+	control &= ~IEEE_CTRL_LINKSPEED_1000M;
+	control &= ~IEEE_CTRL_LINKSPEED_100M;
+	control &= ~IEEE_CTRL_LINKSPEED_10M;
+
+	if (speed == 1000) {
+		control |= IEEE_CTRL_LINKSPEED_1000M;
+
+		/* Don't advertise PHY speed of 100 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg &= (~ADVERTISE_100);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+
+		/* Don't advertise PHY speed of 10 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg &= (~ADVERTISE_10);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+
+		/* Advertise PHY speed of 1000 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, &autonereg);
+		autonereg |= ADVERTISE_1000;
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, autonereg);
+	}
+
+	else if (speed == 100) {
+		control |= IEEE_CTRL_LINKSPEED_100M;
+
+		/* Don't advertise PHY speed of 1000 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, &autonereg);
+		autonereg &= (~ADVERTISE_1000);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, autonereg);
+
+		/* Don't advertise PHY speed of 10 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg &= (~ADVERTISE_10);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+
+		/* Advertise PHY speed of 100 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg |= ADVERTISE_100;
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+	}
+
+	else if (speed == 10) {
+		control |= IEEE_CTRL_LINKSPEED_10M;
+
+		/* Don't advertise PHY speed of 1000 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, &autonereg);
+		autonereg &= (~ADVERTISE_1000);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_1000_ADVERTISE_REG_OFFSET, autonereg);
+
+		/* Don't advertise PHY speed of 100 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg &= (~ADVERTISE_100);
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+
+		/* Advertise PHY speed of 10 Mbps */
+		XEmacPs_PhyRead(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, &autonereg);
+		autonereg |= ADVERTISE_10;
+		XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_AUTONEGO_ADVERTISE_REG, autonereg);
+	}
+
+	XEmacPs_PhyWrite(xemacpsp, phy_addr, IEEE_CONTROL_REG_OFFSET,
+											control | IEEE_CTRL_RESET_MASK);
+	{
+		volatile s32_t wait;
+		for (wait=0; wait < 100000; wait++);
+	}
+	return 0;
+}
+#endif
+#endif /*PCM_PMA_CORE_PRESENT*/
+
+static void SetUpSLCRDivisors(UINTPTR mac_baseaddr, s32_t speed)
+{
+	volatile UINTPTR slcrBaseAddress;
+	u32_t SlcrDiv0 = 0;
+	u32_t SlcrDiv1 = 0;
+	u32_t SlcrTxClkCntrl;
+	u32_t gigeversion;
+	volatile UINTPTR CrlApbBaseAddr;
+	u32_t CrlApbDiv0 = 0;
+	u32_t CrlApbDiv1 = 0;
+	u32_t CrlApbGemCtrl;
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+	u32_t ClkId;
+#endif
+
+	gigeversion = ((Xil_In32(mac_baseaddr + 0xFC)) >> 16) & 0xFFF;
+	if (gigeversion == 2) {
+
+		Xil_Out32(SLCR_UNLOCK_ADDR, SLCR_UNLOCK_KEY_VALUE);
+
+		if (mac_baseaddr == ZYNQ_EMACPS_0_BASEADDR) {
+			slcrBaseAddress = SLCR_GEM0_CLK_CTRL_ADDR;
+		} else {
+			slcrBaseAddress = SLCR_GEM1_CLK_CTRL_ADDR;
+		}
+
+		if(Xil_In32(slcrBaseAddress) &
+			SLCR_GEM_SRCSEL_EMIO) {
+				return;
+		}
+
+		if (speed == 1000) {
+			if (mac_baseaddr == ZYNQ_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_0_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			} else {
+#ifdef XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_1_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			}
+		} else if (speed == 100) {
+			if (mac_baseaddr == ZYNQ_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PS7_ETHERNET_0_ENET_SLCR_100MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_0_ENET_SLCR_100MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_0_ENET_SLCR_100MBPS_DIV1;
+#endif
+			} else {
+#ifdef XPAR_PS7_ETHERNET_1_ENET_SLCR_100MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_1_ENET_SLCR_100MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_1_ENET_SLCR_100MBPS_DIV1;
+#endif
+			}
+		} else {
+			if (mac_baseaddr == ZYNQ_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PS7_ETHERNET_0_ENET_SLCR_10MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_0_ENET_SLCR_10MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_0_ENET_SLCR_10MBPS_DIV1;
+#endif
+			} else {
+#ifdef XPAR_PS7_ETHERNET_1_ENET_SLCR_10MBPS_DIV0
+				SlcrDiv0 = XPAR_PS7_ETHERNET_1_ENET_SLCR_10MBPS_DIV0;
+				SlcrDiv1 = XPAR_PS7_ETHERNET_1_ENET_SLCR_10MBPS_DIV1;
+#endif
+			}
+		}
+
+		if (SlcrDiv0 != 0 && SlcrDiv1 != 0) {
+			SlcrTxClkCntrl = Xil_In32(slcrBaseAddress);
+			SlcrTxClkCntrl &= EMACPS_SLCR_DIV_MASK;
+			SlcrTxClkCntrl |= (SlcrDiv1 << 20);
+			SlcrTxClkCntrl |= (SlcrDiv0 << 8);
+			Xil_Out32(slcrBaseAddress, SlcrTxClkCntrl);
+			Xil_Out32(SLCR_LOCK_ADDR, SLCR_LOCK_KEY_VALUE);
+		} else {
+			xil_printf("Clock Divisors incorrect - Please check\r\n");
+		}
+	} else if (gigeversion == GEM_VERSION_ZYNQMP) {
+		/* Setup divisors in CRL_APB for Zynq Ultrascale+ MPSoC */
+		if (mac_baseaddr == ZYNQMP_EMACPS_0_BASEADDR) {
+			CrlApbBaseAddr = CRL_APB_GEM0_REF_CTRL;
+		} else if (mac_baseaddr == ZYNQMP_EMACPS_1_BASEADDR) {
+			CrlApbBaseAddr = CRL_APB_GEM1_REF_CTRL;
+		} else if (mac_baseaddr == ZYNQMP_EMACPS_2_BASEADDR) {
+			CrlApbBaseAddr = CRL_APB_GEM2_REF_CTRL;
+		} else if (mac_baseaddr == ZYNQMP_EMACPS_3_BASEADDR) {
+			CrlApbBaseAddr = CRL_APB_GEM3_REF_CTRL;
+		}
+
+		if (speed == 1000) {
+			if (mac_baseaddr == ZYNQMP_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_0_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_1_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_1_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_2_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_2_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_2_ENET_SLCR_1000MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_2_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_3_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_3_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_3_ENET_SLCR_1000MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_3_ENET_SLCR_1000MBPS_DIV1;
+#endif
+			}
+		} else if (speed == 100) {
+			if (mac_baseaddr == ZYNQMP_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_0_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_0_ENET_SLCR_100MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_0_ENET_SLCR_100MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_1_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_1_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_1_ENET_SLCR_100MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_1_ENET_SLCR_100MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_2_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_2_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_2_ENET_SLCR_100MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_2_ENET_SLCR_100MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_3_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_3_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_3_ENET_SLCR_100MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_3_ENET_SLCR_100MBPS_DIV1;
+#endif
+			}
+		} else {
+			if (mac_baseaddr == ZYNQMP_EMACPS_0_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_0_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_0_ENET_SLCR_10MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_0_ENET_SLCR_10MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_1_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_1_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_1_ENET_SLCR_10MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_1_ENET_SLCR_10MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_2_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_2_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_2_ENET_SLCR_10MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_2_ENET_SLCR_10MBPS_DIV1;
+#endif
+			} else if (mac_baseaddr == ZYNQMP_EMACPS_3_BASEADDR) {
+#ifdef XPAR_PSU_ETHERNET_3_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_PSU_ETHERNET_3_ENET_SLCR_10MBPS_DIV0;
+				CrlApbDiv1 = XPAR_PSU_ETHERNET_3_ENET_SLCR_10MBPS_DIV1;
+#endif
+			}
+		}
+
+		if (CrlApbDiv0 != 0 && CrlApbDiv1 != 0) {
+		#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+			XSmc_OutVar RegRead;
+			RegRead = Xil_Smc(MMIO_READ_SMC_FID, (u64)(CrlApbBaseAddr),
+								0, 0, 0, 0, 0, 0);
+			CrlApbGemCtrl = RegRead.Arg0 >> 32;
+		#else
+			CrlApbGemCtrl = Xil_In32(CrlApbBaseAddr);
+        #endif
+			CrlApbGemCtrl &= ~CRL_APB_GEM_DIV0_MASK;
+			CrlApbGemCtrl |= CrlApbDiv0 << CRL_APB_GEM_DIV0_SHIFT;
+			CrlApbGemCtrl &= ~CRL_APB_GEM_DIV1_MASK;
+			CrlApbGemCtrl |= CrlApbDiv1 << CRL_APB_GEM_DIV1_SHIFT;
+		#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+			Xil_Smc(MMIO_WRITE_SMC_FID, (u64)(CrlApbBaseAddr) | ((u64)(0xFFFFFFFF) << 32),
+				(u64)CrlApbGemCtrl, 0, 0, 0, 0, 0);
+			do {
+			RegRead = Xil_Smc(MMIO_READ_SMC_FID, (u64)(CrlApbBaseAddr),
+				0, 0, 0, 0, 0, 0);
+			} while((RegRead.Arg0 >> 32) != CrlApbGemCtrl);
+		#else
+			Xil_Out32(CrlApbBaseAddr, CrlApbGemCtrl);
+        #endif
+		} else {
+			xil_printf("Clock Divisors incorrect - Please check\r\n");
+		}
+	} else if (gigeversion == GEM_VERSION_VERSAL) {
+		/* Setup divisors in CRL for Versal */
+		if (mac_baseaddr == VERSAL_EMACPS_0_BASEADDR) {
+			CrlApbBaseAddr = VERSAL_CRL_GEM0_REF_CTRL;
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+			ClkId = CLK_GEM0_REF;
+#endif
+		} else if (mac_baseaddr == VERSAL_EMACPS_1_BASEADDR) {
+			CrlApbBaseAddr = VERSAL_CRL_GEM1_REF_CTRL;
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+			ClkId = CLK_GEM1_REF;
+#endif
+		}
+
+		if (speed == 1000) {
+			if (mac_baseaddr == VERSAL_EMACPS_0_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_1000MBPS_DIV0;
+#endif
+			} else if (mac_baseaddr == VERSAL_EMACPS_1_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_1000MBPS_DIV0;
+#endif
+			}
+		} else if (speed == 100) {
+			if (mac_baseaddr == VERSAL_EMACPS_0_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_100MBPS_DIV0;
+#endif
+			} else if (mac_baseaddr == VERSAL_EMACPS_1_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_100MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_100MBPS_DIV0;
+#endif
+			}
+		} else {
+			if (mac_baseaddr == VERSAL_EMACPS_0_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_0_ENET_SLCR_10MBPS_DIV0;
+#endif
+			} else if (mac_baseaddr == VERSAL_EMACPS_1_BASEADDR) {
+#ifdef XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_10MBPS_DIV0
+				CrlApbDiv0 = XPAR_VERSAL_CIPS_0_PSPMC_0_PSV_ETHERNET_1_ENET_SLCR_10MBPS_DIV0;
+#endif
+			}
+		}
+
+		if (CrlApbDiv0 != 0) {
+#if defined (__aarch64__) && (EL1_NONSECURE == 1)
+			Xil_Smc(PM_SET_DIVIDER_SMC_FID, (((u64)CrlApbDiv0 << 32) | ClkId), 0, 0, 0, 0, 0, 0);
+#else
+			CrlApbGemCtrl = Xil_In32(CrlApbBaseAddr);
+			CrlApbGemCtrl &= ~VERSAL_CRL_GEM_DIV_MASK;
+			CrlApbGemCtrl |= CrlApbDiv0 << VERSAL_CRL_APB_GEM_DIV_SHIFT;
+
+			Xil_Out32(CrlApbBaseAddr, CrlApbGemCtrl);
+#endif
+		} else {
+			xil_printf("Clock Divisors incorrect - Please check\r\n");
+		}
+	}
+
+	return;
+}
diff --git a/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xpqueue.c b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xpqueue.c
new file mode 100644
index 0000000..b5a773e
--- /dev/null
+++ b/embeddedsw/ThirdParty/sw_services/lwip211/src/contrib/ports/xilinx/netif/xpqueue.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2007 - 2019 Xilinx, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * This file is part of the lwIP TCP/IP stack.
+ *
+ */
+
+#include <stdlib.h>
+
+#include "netif/xpqueue.h"
+#include "xil_printf.h"
+
+#define NUM_QUEUES	2
+
+pq_queue_t pq_queue[NUM_QUEUES];
+
+pq_queue_t *
+pq_create_queue()
+{
+	static int i;
+	pq_queue_t *q = NULL;
+
+	if (i >= NUM_QUEUES) {
+		xil_printf("ERR: Max Queues allocated\n\r");
+		return q;
+	}
+
+	q = &pq_queue[i++];
+
+	if (!q)
+		return q;
+
+	q->head = q->tail = q->len = 0;
+
+	return q;
+}
+
+int
+pq_enqueue(pq_queue_t *q, void *p)
+{
+	if (q->len == PQ_QUEUE_SIZE)
+		return -1;
+
+	q->data[q->head] = p;
+	q->head = (q->head + 1)%PQ_QUEUE_SIZE;
+	q->len++;
+
+	return 0;
+}
+
+void*
+pq_dequeue(pq_queue_t *q)
+{
+	int ptail;
+
+	if (q->len == 0)
+		return NULL;
+
+	ptail = q->tail;
+	q->tail = (q->tail + 1)%PQ_QUEUE_SIZE;
+	q->len--;
+
+	return q->data[ptail];
+}
+
+int
+pq_qlength(pq_queue_t *q)
+{
+	return q->len;
+}
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.c b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.c
new file mode 100644
index 0000000..88a35cb
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.c
@@ -0,0 +1,487 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xemacps.c
+* @addtogroup emacps_v3_16
+* @{
+*
+* The XEmacPs driver. Functions in this file are the minimum required functions
+* for this driver. See xemacps.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a wsy  01/10/10 First release
+* 2.1  srt  07/15/14 Add support for Zynq Ultrascale Mp GEM specification and
+*		      64-bit changes.
+* 3.00 kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.0  hk   02/20/15 Added support for jumbo frames. Increase AHB burst.
+*                    Disable extended mode. Perform all 64 bit changes under
+*                    check for arch64.
+* 3.1  hk   08/10/15 Update upper 32 bit tx and rx queue ptr registers
+* 3.5  hk   08/14/17 Update cache coherency information of the interface in
+*                    its config structure.
+* 3.8  hk   09/17/18 Cleanup stale comments.
+* 3.8  mus  11/05/18 Support 64 bit DMA addresses for Microblaze-X platform.
+* 3.10 hk   05/16/19 Clear status registers properly in reset
+* 3.11 sd   02/14/20 Add clock support
+*
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xemacps.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+void XEmacPs_StubHandler(void);	/* Default handler routine */
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+* Initialize a specific XEmacPs instance/driver. The initialization entails:
+* - Initialize fields of the XEmacPs instance structure
+* - Reset hardware and apply default options
+* - Configure the DMA channels
+*
+* The PHY is setup independently from the device. Use the MII or whatever other
+* interface may be present for setup.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param CfgPtr is the device configuration structure containing required
+*        hardware build data.
+* @param EffectiveAddress is the base address of the device. If address
+*        translation is not utilized, this parameter can be passed in using
+*        CfgPtr->Config.BaseAddress to specify the physical base address.
+*
+* @return
+* - XST_SUCCESS if initialization was successful
+*
+******************************************************************************/
+LONG XEmacPs_CfgInitialize(XEmacPs *InstancePtr, XEmacPs_Config * CfgPtr,
+			   UINTPTR EffectiveAddress)
+{
+	/* Verify arguments */
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(CfgPtr != NULL);
+
+	/* Set device base address and ID */
+	InstancePtr->Config.DeviceId = CfgPtr->DeviceId;
+	InstancePtr->Config.BaseAddress = EffectiveAddress;
+	InstancePtr->Config.IsCacheCoherent = CfgPtr->IsCacheCoherent;
+#if defined  (XCLOCKING)
+	InstancePtr->Config.RefClk = CfgPtr->RefClk;
+#endif
+
+	InstancePtr->Config.S1GDiv0 = CfgPtr->S1GDiv0;
+	InstancePtr->Config.S1GDiv1 = CfgPtr->S1GDiv1;
+	InstancePtr->Config.S100MDiv0 = CfgPtr->S100MDiv0;
+	InstancePtr->Config.S100MDiv1 = CfgPtr->S100MDiv1;
+	InstancePtr->Config.S10MDiv0 = CfgPtr->S10MDiv0;
+	InstancePtr->Config.S10MDiv1 = CfgPtr->S10MDiv1;
+
+	/* Set callbacks to an initial stub routine */
+	InstancePtr->SendHandler = ((XEmacPs_Handler)((void*)XEmacPs_StubHandler));
+	InstancePtr->RecvHandler = ((XEmacPs_Handler)(void*)XEmacPs_StubHandler);
+	InstancePtr->ErrorHandler = ((XEmacPs_ErrHandler)(void*)XEmacPs_StubHandler);
+
+	/* Reset the hardware and set default options */
+	InstancePtr->IsReady = XIL_COMPONENT_IS_READY;
+	XEmacPs_Reset(InstancePtr);
+
+	return (LONG)(XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+* Start the Ethernet controller as follows:
+*   - Enable transmitter if XTE_TRANSMIT_ENABLE_OPTION is set
+*   - Enable receiver if XTE_RECEIVER_ENABLE_OPTION is set
+*   - Start the SG DMA send and receive channels and enable the device
+*     interrupt
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @return N/A
+*
+* @note
+* Hardware is configured with scatter-gather DMA, the driver expects to start
+* the scatter-gather channels and expects that the user has previously set up
+* the buffer descriptor lists.
+*
+* This function makes use of internal resources that are shared between the
+* Start, Stop, and Set/ClearOptions functions. So if one task might be setting
+* device options while another is trying to start the device, the user is
+* required to provide protection of this shared data (typically using a
+* semaphore).
+*
+* This function must not be preempted by an interrupt that may service the
+* device.
+*
+******************************************************************************/
+void XEmacPs_Start(XEmacPs *InstancePtr)
+{
+	u32 Reg;
+
+	/* Assert bad arguments and conditions */
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+#if defined  (XCLOCKING)
+	if (InstancePtr->IsStarted != (u32)XIL_COMPONENT_IS_STARTED) {
+		Xil_ClockEnable(InstancePtr->Config.RefClk);
+	}
+#endif
+
+	/* Start DMA */
+	/* When starting the DMA channels, both transmit and receive sides
+	 * need an initialized BD list.
+	 */
+	if (InstancePtr->Version == 2) {
+		Xil_AssertVoid(InstancePtr->RxBdRing.BaseBdAddr != 0);
+		Xil_AssertVoid(InstancePtr->TxBdRing.BaseBdAddr != 0);
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_RXQBASE_OFFSET,
+			   InstancePtr->RxBdRing.BaseBdAddr);
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_TXQBASE_OFFSET,
+			   InstancePtr->TxBdRing.BaseBdAddr);
+	}
+
+	/* clear any existed int status */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_ISR_OFFSET,
+			   XEMACPS_IXR_ALL_MASK);
+
+	/* Enable transmitter if not already enabled */
+	if ((InstancePtr->Options & (u32)XEMACPS_TRANSMITTER_ENABLE_OPTION)!=0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+		if ((!(Reg & XEMACPS_NWCTRL_TXEN_MASK))==TRUE) {
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					   XEMACPS_NWCTRL_OFFSET,
+				   Reg | (u32)XEMACPS_NWCTRL_TXEN_MASK);
+		}
+	}
+
+	/* Enable receiver if not already enabled */
+	if ((InstancePtr->Options & XEMACPS_RECEIVER_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+		if ((!(Reg & XEMACPS_NWCTRL_RXEN_MASK))==TRUE) {
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					   XEMACPS_NWCTRL_OFFSET,
+				   Reg | (u32)XEMACPS_NWCTRL_RXEN_MASK);
+		}
+	}
+
+        /* Enable TX and RX interrupts */
+        XEmacPs_IntEnable(InstancePtr, (XEMACPS_IXR_TX_ERR_MASK |
+	XEMACPS_IXR_RX_ERR_MASK | (u32)XEMACPS_IXR_FRAMERX_MASK |
+	(u32)XEMACPS_IXR_TXCOMPL_MASK));
+
+	/* Enable TX Q1 Interrupts */
+	if (InstancePtr->Version > 2)
+		XEmacPs_IntQ1Enable(InstancePtr, XEMACPS_INTQ1_IXR_ALL_MASK);
+
+	/* Mark as started */
+	InstancePtr->IsStarted = XIL_COMPONENT_IS_STARTED;
+
+	return;
+}
+
+
+/*****************************************************************************/
+/**
+* Gracefully stop the Ethernet MAC as follows:
+*   - Disable all interrupts from this device
+*   - Stop DMA channels
+*   - Disable the tansmitter and receiver
+*
+* Device options currently in effect are not changed.
+*
+* This function will disable all interrupts. Default interrupts settings that
+* had been enabled will be restored when XEmacPs_Start() is called.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+* @note
+* This function makes use of internal resources that are shared between the
+* Start, Stop, SetOptions, and ClearOptions functions. So if one task might be
+* setting device options while another is trying to start the device, the user
+* is required to provide protection of this shared data (typically using a
+* semaphore).
+*
+* Stopping the DMA channels causes this function to block until the DMA
+* operation is complete.
+*
+******************************************************************************/
+void XEmacPs_Stop(XEmacPs *InstancePtr)
+{
+	u32 Reg;
+
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Disable all interrupts */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_IDR_OFFSET,
+			   XEMACPS_IXR_ALL_MASK);
+
+	/* Disable the receiver & transmitter */
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWCTRL_OFFSET);
+	Reg &= (u32)(~XEMACPS_NWCTRL_RXEN_MASK);
+	Reg &= (u32)(~XEMACPS_NWCTRL_TXEN_MASK);
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_NWCTRL_OFFSET, Reg);
+
+	/* Mark as stopped */
+	InstancePtr->IsStarted = 0U;
+#if defined  (XCLOCKING)
+	Xil_ClockDisable(InstancePtr->Config.RefClk);
+#endif
+}
+
+
+/*****************************************************************************/
+/**
+* Perform a graceful reset of the Ethernet MAC. Resets the DMA channels, the
+* transmitter, and the receiver.
+*
+* Steps to reset
+* - Stops transmit and receive channels
+* - Stops DMA
+* - Configure transmit and receive buffer size to default
+* - Clear transmit and receive status register and counters
+* - Clear all interrupt sources
+* - Clear phy (if there is any previously detected) address
+* - Clear MAC addresses (1-4) as well as Type IDs and hash value
+*
+* All options are placed in their default state. Any frames in the
+* descriptor lists will remain in the lists. The side effect of doing
+* this is that after a reset and following a restart of the device, frames
+* were in the list before the reset may be transmitted or received.
+*
+* The upper layer software is responsible for re-configuring (if necessary)
+* and restarting the MAC after the reset. Note also that driver statistics
+* are not cleared on reset. It is up to the upper layer software to clear the
+* statistics if needed.
+*
+* When a reset is required, the driver notifies the upper layer software of
+* this need through the ErrorHandler callback and specific status codes.
+* The upper layer software is responsible for calling this Reset function
+* and then re-configuring the device.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+*
+******************************************************************************/
+void XEmacPs_Reset(XEmacPs *InstancePtr)
+{
+	u32 Reg;
+	u8 i;
+	s8 EmacPs_zero_MAC[6] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
+
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Stop the device and reset hardware */
+	XEmacPs_Stop(InstancePtr);
+	InstancePtr->Options = XEMACPS_DEFAULT_OPTIONS;
+
+	InstancePtr->Version = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress, 0xFC);
+
+	InstancePtr->Version = (InstancePtr->Version >> 16) & 0xFFF;
+
+	InstancePtr->MaxMtuSize = XEMACPS_MTU;
+	InstancePtr->MaxFrameSize = XEMACPS_MTU + XEMACPS_HDR_SIZE +
+					XEMACPS_TRL_SIZE;
+	InstancePtr->MaxVlanFrameSize = InstancePtr->MaxFrameSize +
+					XEMACPS_HDR_VLAN_SIZE;
+	InstancePtr->RxBufMask = XEMACPS_RXBUF_LEN_MASK;
+
+	/* Setup hardware with default values */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_NWCTRL_OFFSET,
+			(XEMACPS_NWCTRL_STATCLR_MASK |
+			XEMACPS_NWCTRL_MDEN_MASK) &
+			(u32)(~XEMACPS_NWCTRL_LOOPEN_MASK));
+
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_NWCFG_OFFSET);
+	Reg &= XEMACPS_NWCFG_MDCCLKDIV_MASK;
+
+	Reg = Reg | (u32)XEMACPS_NWCFG_100_MASK |
+			(u32)XEMACPS_NWCFG_FDEN_MASK |
+			(u32)XEMACPS_NWCFG_UCASTHASHEN_MASK;
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCFG_OFFSET, Reg);
+	if (InstancePtr->Version > 2) {
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_NWCFG_OFFSET,
+			(XEmacPs_ReadReg(InstancePtr->Config.BaseAddress, XEMACPS_NWCFG_OFFSET) |
+				XEMACPS_NWCFG_DWIDTH_64_MASK));
+	}
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_DMACR_OFFSET,
+			(((((u32)XEMACPS_RX_BUF_SIZE / (u32)XEMACPS_RX_BUF_UNIT) +
+				(((((u32)XEMACPS_RX_BUF_SIZE %
+				(u32)XEMACPS_RX_BUF_UNIT))!=(u32)0) ? 1U : 0U)) <<
+				(u32)(XEMACPS_DMACR_RXBUF_SHIFT)) &
+				(u32)(XEMACPS_DMACR_RXBUF_MASK)) |
+				(u32)XEMACPS_DMACR_RXSIZE_MASK |
+				(u32)XEMACPS_DMACR_TXSIZE_MASK);
+
+
+	if (InstancePtr->Version > 2) {
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_DMACR_OFFSET,
+			(XEmacPs_ReadReg(InstancePtr->Config.BaseAddress, XEMACPS_DMACR_OFFSET) |
+#if defined(__aarch64__) || defined(__arch64__)
+			(u32)XEMACPS_DMACR_ADDR_WIDTH_64 |
+#endif
+			(u32)XEMACPS_DMACR_INCR16_AHB_BURST));
+	}
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_TXSR_OFFSET, XEMACPS_SR_ALL_MASK);
+
+	XEmacPs_SetQueuePtr(InstancePtr, 0, 0x00U, (u16)XEMACPS_SEND);
+	if (InstancePtr->Version > 2)
+		XEmacPs_SetQueuePtr(InstancePtr, 0, 0x01U, (u16)XEMACPS_SEND);
+	XEmacPs_SetQueuePtr(InstancePtr, 0, 0x00U, (u16)XEMACPS_RECV);
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_RXSR_OFFSET, XEMACPS_SR_ALL_MASK);
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_IDR_OFFSET,
+			   XEMACPS_IXR_ALL_MASK);
+
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_ISR_OFFSET);
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_ISR_OFFSET,
+			   Reg);
+
+	XEmacPs_ClearHash(InstancePtr);
+
+	for (i = 1U; i < 5U; i++) {
+		(void)XEmacPs_SetMacAddress(InstancePtr, EmacPs_zero_MAC, i);
+		(void)XEmacPs_SetTypeIdCheck(InstancePtr, 0x00000000U, i);
+	}
+
+	/* clear all counters */
+	for (i = 0U; i < (u8)((XEMACPS_LAST_OFFSET - XEMACPS_OCTTXL_OFFSET) / 4U);
+	     i++) {
+		(void)XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+                                   XEMACPS_OCTTXL_OFFSET + (u32)(((u32)i) * ((u32)4)));
+	}
+
+	/* Disable the receiver */
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWCTRL_OFFSET);
+	Reg &= (u32)(~XEMACPS_NWCTRL_RXEN_MASK);
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_NWCTRL_OFFSET, Reg);
+
+	/* Sync default options with hardware but leave receiver and
+         * transmitter disabled. They get enabled with XEmacPs_Start() if
+	 * XEMACPS_TRANSMITTER_ENABLE_OPTION and
+         * XEMACPS_RECEIVER_ENABLE_OPTION are set.
+	 */
+	(void)XEmacPs_SetOptions(InstancePtr, InstancePtr->Options &
+			    ~((u32)XEMACPS_TRANSMITTER_ENABLE_OPTION |
+			      (u32)XEMACPS_RECEIVER_ENABLE_OPTION));
+
+	(void)XEmacPs_ClearOptions(InstancePtr, ~InstancePtr->Options);
+}
+
+
+/******************************************************************************/
+/**
+ * This is a stub for the asynchronous callbacks. The stub is here in case the
+ * upper layer forgot to set the handler(s). On initialization, all handlers are
+ * set to this callback. It is considered an error for this handler to be
+ * invoked.
+ *
+ ******************************************************************************/
+void XEmacPs_StubHandler(void)
+{
+	Xil_AssertVoidAlways();
+}
+
+/*****************************************************************************/
+/**
+* This function sets the start address of the transmit/receive buffer queue.
+*
+* @param	InstancePtr is a pointer to the instance to be worked on.
+* @param	QPtr is the address of the Queue to be written
+* @param	QueueNum is the Buffer Queue Index
+* @param	Direction indicates Transmit/Receive
+*
+* @note
+* The buffer queue addresses has to be set before starting the transfer, so
+* this function has to be called in prior to XEmacPs_Start()
+*
+******************************************************************************/
+void XEmacPs_SetQueuePtr(XEmacPs *InstancePtr, UINTPTR QPtr, u8 QueueNum,
+			 u16 Direction)
+{
+	/* Assert bad arguments and conditions */
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+        /* If already started, then there is nothing to do */
+        if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+                return;
+        }
+
+	if (QueueNum == 0x00U) {
+		if (Direction == XEMACPS_SEND) {
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_TXQBASE_OFFSET,
+				(QPtr & ULONG64_LO_MASK));
+		} else {
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_RXQBASE_OFFSET,
+				(QPtr & ULONG64_LO_MASK));
+		}
+	}
+	 else {
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_TXQ1BASE_OFFSET,
+			(QPtr & ULONG64_LO_MASK));
+	}
+#ifdef __aarch64__
+	if (Direction == XEMACPS_SEND) {
+		/* Set the MSB of TX Queue start address */
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_MSBBUF_TXQBASE_OFFSET,
+				(u32)((QPtr & ULONG64_HI_MASK) >> 32U));
+	} else {
+		/* Set the MSB of RX Queue start address */
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_MSBBUF_RXQBASE_OFFSET,
+				(u32)((QPtr & ULONG64_HI_MASK) >> 32U));
+	}
+#endif
+}
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.h b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.h
new file mode 100644
index 0000000..ac3e7a7
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps.h
@@ -0,0 +1,843 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/****************************************************************************/
+/**
+ *
+ * @file xemacps.h
+* @addtogroup emacps_v3_16
+* @{
+* @details
+ *
+ * The Xilinx Embedded Processor Block Ethernet driver.
+ *
+ * For a full description of XEMACPS features, please see the hardware spec.
+ * This driver supports the following features:
+ *   - Memory mapped access to host interface registers
+ *   - Statistics counter registers for RMON/MIB
+ *   - API for interrupt driven frame transfers for hardware configured DMA
+ *   - Virtual memory support
+ *   - Unicast, broadcast, and multicast receive address filtering
+ *   - Full and half duplex operation
+ *   - Automatic PAD & FCS insertion and stripping
+ *   - Flow control
+ *   - Support up to four 48bit addresses
+ *   - Address checking for four specific 48bit addresses
+ *   - VLAN frame support
+ *   - Pause frame support
+ *   - Large frame support up to 1536 bytes
+ *   - Checksum offload
+ *
+ * <b>Driver Description</b>
+ *
+ * The device driver enables higher layer software (e.g., an application) to
+ * communicate to the XEmacPs. The driver handles transmission and reception
+ * of Ethernet frames, as well as configuration and control. No pre or post
+ * processing of frame data is performed. The driver does not validate the
+ * contents of an incoming frame in addition to what has already occurred in
+ * hardware.
+ * A single device driver can support multiple devices even when those devices
+ * have significantly different configurations.
+ *
+ * <b>Initialization & Configuration</b>
+ *
+ * The XEmacPs_Config structure is used by the driver to configure itself.
+ * This configuration structure is typically created by the tool-chain based
+ * on hardware build properties.
+ *
+ * The driver instance can be initialized in
+ *
+ *   - XEmacPs_CfgInitialize(InstancePtr, CfgPtr, EffectiveAddress):  Uses a
+ *     configuration structure provided by the caller. If running in a system
+ *     with address translation, the provided virtual memory base address
+ *     replaces the physical address present in the configuration structure.
+ *
+ * The device supports DMA only as current development plan. No FIFO mode is
+ * supported. The driver expects to start the DMA channels and expects that
+ * the user has set up the buffer descriptor lists.
+ *
+ * <b>Interrupts and Asynchronous Callbacks</b>
+ *
+ * The driver has no dependencies on the interrupt controller. When an
+ * interrupt occurs, the handler will perform a small amount of
+ * housekeeping work, determine the source of the interrupt, and call the
+ * appropriate callback function. All callbacks are registered by the user
+ * level application.
+ *
+ * <b>Virtual Memory</b>
+ *
+ * All virtual to physical memory mappings must occur prior to accessing the
+ * driver API.
+ *
+ * For DMA transactions, user buffers supplied to the driver must be in terms
+ * of their physical address.
+ *
+ * <b>DMA</b>
+ *
+ * The DMA engine uses buffer descriptors (BDs) to describe Ethernet frames.
+ * These BDs are typically chained together into a list the hardware follows
+ * when transferring data in and out of the packet buffers. Each BD describes
+ * a memory region containing either a full or partial Ethernet packet.
+ *
+ * Interrupt coalescing is not supported from this built-in DMA engine.
+ *
+ * This API requires the user to understand how the DMA operates. The
+ * following paragraphs provide some explanation, but the user is encouraged
+ * to read documentation in xemacps_bdring.h as well as study example code
+ * that accompanies this driver.
+ *
+ * The API is designed to get BDs to and from the DMA engine in the most
+ * efficient means possible. The first step is to establish a  memory region
+ * to contain all BDs for a specific channel. This is done with
+ * XEmacPs_BdRingCreate(). This function sets up a BD ring that hardware will
+ * follow as BDs are processed. The ring will consist of a user defined number
+ * of BDs which will all be partially initialized. For example on the transmit
+ * channel, the driver will initialize all BDs' so that they are configured
+ * for transmit. The more fields that can be permanently setup at
+ * initialization, then the fewer accesses will be needed to each BD while
+ * the DMA engine is in operation resulting in better throughput and CPU
+ * utilization. The best case initialization would require the user to set
+ * only a frame buffer address and length prior to submitting the BD to the
+ * engine.
+ *
+ * BDs move through the engine with the help of functions
+ * XEmacPs_BdRingAlloc(), XEmacPs_BdRingToHw(), XEmacPs_BdRingFromHw(),
+ * and XEmacPs_BdRingFree().
+ * All these functions handle BDs that are in place. That is, there are no
+ * copies of BDs kept anywhere and any BD the user interacts with is an actual
+ * BD from the same ring hardware accesses.
+ *
+ * BDs in the ring go through a series of states as follows:
+ *   1. Idle. The driver controls BDs in this state.
+ *   2. The user has data to transfer. XEmacPs_BdRingAlloc() is called to
+ *      reserve BD(s). Once allocated, the user may setup the BD(s) with
+ *      frame buffer address, length, and other attributes. The user controls
+ *      BDs in this state.
+ *   3. The user submits BDs to the DMA engine with XEmacPs_BdRingToHw. BDs
+ *      in this state are either waiting to be processed by hardware, are in
+ *      process, or have been processed. The DMA engine controls BDs in this
+ *      state.
+ *   4. Processed BDs are retrieved with XEmacEpv_BdRingFromHw() by the
+ *      user. Once retrieved, the user can examine each BD for the outcome of
+ *      the DMA transfer. The user controls BDs in this state. After examining
+ *      the BDs the user calls XEmacPs_BdRingFree() which places the BDs back
+ *      into state 1.
+ *
+ * Each of the four BD accessor functions operate on a set of BDs. A set is
+ * defined as a segment of the BD ring consisting of one or more BDs. The user
+ * views the set as a pointer to the first BD along with the number of BDs for
+ * that set. The set can be navigated by using macros XEmacPs_BdNext(). The
+ * user must exercise extreme caution when changing BDs in a set as there is
+ * nothing to prevent doing a mBdNext past the end of the set and modifying a
+ * BD out of bounds.
+ *
+ * XEmacPs_BdRingAlloc() + XEmacPs_BdRingToHw(), as well as
+ * XEmacPs_BdRingFromHw() + XEmacPs_BdRingFree() are designed to be used in
+ * tandem. The same BD set retrieved with BdRingAlloc should be the same one
+ * provided to hardware with BdRingToHw. Same goes with BdRingFromHw and
+ * BdRIngFree.
+ *
+ * <b>Alignment & Data Cache Restrictions</b>
+ *
+ * Due to the design of the hardware, all RX buffers, BDs need to be 4-byte
+ * aligned. Please reference xemacps_bd.h for cache related macros.
+ *
+ * DMA Tx:
+ *
+ *   - If frame buffers exist in cached memory, then they must be flushed
+ *     prior to committing them to hardware.
+ *
+ * DMA Rx:
+ *
+ *   - If frame buffers exist in cached memory, then the cache must be
+ *     invalidated for the memory region containing the frame prior to data
+ *     access
+ *
+ * Both cache invalidate/flush are taken care of in driver code.
+ *
+ * <b>Buffer Copying</b>
+ *
+ * The driver is designed for a zero-copy buffer scheme. That is, the driver
+ * will not copy buffers. This avoids potential throughput bottlenecks within
+ * the driver. If byte copying is required, then the transfer will take longer
+ * to complete.
+ *
+ * <b>Checksum Offloading</b>
+ *
+ * The Embedded Processor Block Ethernet can be configured to perform IP, TCP
+ * and UDP checksum offloading in both receive and transmit directions.
+ *
+ * IP packets contain a 16-bit checksum field, which is the 16-bit 1s
+ * complement of the 1s complement sum of all 16-bit words in the header.
+ * TCP and UDP packets contain a 16-bit checksum field, which is the 16-bit
+ * 1s complement of the 1s complement sum of all 16-bit words in the header,
+ * the data and a conceptual pseudo header.
+ *
+ * To calculate these checksums in software requires each byte of the packet
+ * to be read. For TCP and UDP this can use a large amount of processing power.
+ * Offloading the checksum calculation to hardware can result in significant
+ * performance improvements.
+ *
+ * The transmit checksum offload is only available to use DMA in packet buffer
+ * mode. This is because the complete frame to be transmitted must be read
+ * into the packet buffer memory before the checksum can be calculated and
+ * written to the header at the beginning of the frame.
+ *
+ * For IP, TCP or UDP receive checksum offload to be useful, the operating
+ * system containing the protocol stack must be aware that this offload is
+ * available so that it can make use of the fact that the hardware has verified
+ * the checksum.
+ *
+ * When receive checksum offloading is enabled in the hardware, the IP header
+ * checksum is checked, where the packet meets the following criteria:
+ *
+ * 1. If present, the VLAN header must be four octets long and the CFI bit
+ *    must not be set.
+ * 2. Encapsulation must be RFC 894 Ethernet Type Encoding or RFC 1042 SNAP
+ *    encoding.
+ * 3. IP v4 packet.
+ * 4. IP header is of a valid length.
+ * 5. Good IP header checksum.
+ * 6. No IP fragmentation.
+ * 7. TCP or UDP packet.
+ *
+ * When an IP, TCP or UDP frame is received, the receive buffer descriptor
+ * gives an indication if the hardware was able to verify the checksums.
+ * There is also an indication if the frame had SNAP encapsulation. These
+ * indication bits will replace the type ID match indication bits when the
+ * receive checksum offload is enabled.
+ *
+ * If any of the checksums are verified incorrect by the hardware, the packet
+ * is discarded and the appropriate statistics counter incremented.
+ *
+ * <b>PHY Interfaces</b>
+ *
+ * RGMII 1.3 is the only interface supported.
+ *
+ * <b>Asserts</b>
+ *
+ * Asserts are used within all Xilinx drivers to enforce constraints on
+ * parameters. Asserts can be turned off on a system-wide basis by defining,
+ * at compile time, the NDEBUG identifier. By default, asserts are turned on
+ * and it is recommended that users leave asserts on during development. For
+ * deployment use -DNDEBUG compiler switch to remove assert code.
+ *
+ * @note
+ *
+ * Xilinx drivers are typically composed of two parts, one is the driver
+ * and the other is the adapter.  The driver is independent of OS and processor
+ * and is intended to be highly portable.  The adapter is OS-specific and
+ * facilitates communication between the driver and an OS.
+ * This driver is intended to be RTOS and processor independent. Any needs for
+ * dynamic memory management, threads or thread mutual exclusion, or cache
+ * control must be satisfied bythe layer above this driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver   Who  Date     Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a wsy  01/10/10 First release
+ * 1.00a asa  11/21/11 The function XEmacPs_BdRingFromHwTx in file
+ *		       xemacps_bdring.c is modified. Earlier it was checking for
+ *		       "BdLimit"(passed argument) number of BDs for finding out
+ *		       which BDs are successfully processed. Now one more check
+ *		       is added. It looks for BDs till the current BD pointer
+ *		       reaches HwTail. By doing this processing time is saved.
+ * 1.00a asa  01/24/12 The function XEmacPs_BdRingFromHwTx in file
+ *		       xemacps_bdring.c is modified. Now start of packet is
+ *		       searched for returning the number of BDs processed.
+ * 1.02a asa  11/05/12 Added a new API for deleting an entry from the HASH
+ *		       registers. Added a new API to set the bust length.
+ *		       Added some new hash-defines.
+ * 1.03a asa  01/23/12 Fix for CR #692702 which updates error handling for
+ *		       Rx errors. Under heavy Rx traffic, there will be a large
+ *		       number of errors related to receive buffer not available.
+ *		       Because of a HW bug (SI #692601), under such heavy errors,
+ *		       the Rx data path can become unresponsive. To reduce the
+ *		       probabilities for hitting this HW bug, the SW writes to
+ *		       bit 18 to flush a packet from Rx DPRAM immediately. The
+ *		       changes for it are done in the function
+ *		       XEmacPs_IntrHandler.
+ * 1.05a asa  09/23/13 Cache operations on BDs are not required and hence
+ *		       removed. It is expected that all BDs are allocated in
+ *		       from uncached area.
+ * 1.06a asa  11/02/13 Changed the value for XEMACPS_RXBUF_LEN_MASK from 0x3fff
+ *				to 0x1fff. This fixes the CR#744902.
+ *			  Made changes in example file xemacps_example.h to fix compilation
+ *			  issues with iarcc compiler.
+ * 2.0   adk  10/12/13 Updated as per the New Tcl API's
+ * 2.1   adk  11/08/14 Fixed the CR#811288. Changes are made in the driver tcl file.
+ * 2.1   bss  09/08/14 Modified driver tcl to fix CR#820349 to export phy
+ *		       address in xparameters.h when GMII to RGMII converter
+ *		       is present in hw.
+ * 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp GEM specification and 64-bit
+ *		       changes.
+ * 2.2   adk  29/10/14 Fixed CR#827686 when PCS/PMA core is configured with
+ *                    1000BASE-X mode export proper values to the xparameters.h
+ *                    file. Changes are made in the driver tcl file.
+ * 3.0   adk  08/1/15  Don't include gem in peripheral test when gem is
+ *                    configured with PCS/PMA Core. Changes are made in the
+ *		       test app tcl(CR:827686).
+ * 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+ * 3.0   hk   03/18/15 Added support for jumbo frames. Increase AHB burst.
+ *                     Disable extended mode. Perform all 64 bit changes under
+ *                     check for arch64.
+ *                     Remove "used bit set" from TX error interrupt masks.
+ * 3.1   hk   07/27/15 Do not call error handler with '0' error code when
+ *                     there is no error. CR# 869403
+ *            08/10/15 Update upper 32 bit tx and rx queue ptr registers.
+ * 3.2   hk   02/22/16 Added SGMII support for Zynq Ultrascale+ MPSoC.
+ * 3.4   ms   01/23/17 Modified xil_printf statement in main function for all
+ *                     examples to ensure that "Successfully ran" and "Failed"
+ *                     strings are available in all examples. This is a fix
+ *                     for CR-965028.
+ *       ms   03/17/17 Modified text file in examples folder for doxygen
+ *                     generation.
+ *       ms   04/05/17 Added tabspace for return statements in functions of
+ *                     xemacps_ieee1588_example.c for proper documentation
+ *                     while generating doxygen.
+ * 3.5   hk   08/14/17 Update cache coherency information of the interface in
+ *                     its config structure.
+ * 3.6   rb   09/08/17 HwCnt variable (in XEmacPs_BdRing structure) is
+ *		       changed to volatile.
+ *		       Add API XEmacPs_BdRingPtrReset() to reset pointers
+ * 3.8   hk   07/19/18 Fixed CPP, GCC and doxygen warnings - CR-1006327
+ *	 hk   09/17/18 Fix PTP interrupt masks and cleanup comments.
+ * 3.9   hk   01/23/19 Add RX watermark support
+ * 3.11  sd   02/14/20 Add clock support
+ * 3.13  nsk  12/14/20 Updated the tcl to not to use the instance names.
+ *
+ * </pre>
+ *
+ ****************************************************************************/
+
+#ifndef XEMACPS_H		/* prevent circular inclusions */
+#define XEMACPS_H		/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+#include "xstatus.h"
+#include "xemacps_hw.h"
+#include "xemacps_bd.h"
+#include "xemacps_bdring.h"
+#if defined  (XCLOCKING)
+#include "xil_clocking.h"
+#endif
+
+/************************** Constant Definitions ****************************/
+
+/*
+ * Device information
+ */
+#define XEMACPS_DEVICE_NAME     "xemacps"
+#define XEMACPS_DEVICE_DESC     "Xilinx PS 10/100/1000 MAC"
+
+
+/** @name Configuration options
+ *
+ * Device configuration options. See the XEmacPs_SetOptions(),
+ * XEmacPs_ClearOptions() and XEmacPs_GetOptions() for information on how to
+ * use options.
+ *
+ * The default state of the options are noted and are what the device and
+ * driver will be set to after calling XEmacPs_Reset() or
+ * XEmacPs_Initialize().
+ *
+ * @{
+ */
+
+#define XEMACPS_PROMISC_OPTION               0x00000001U
+/**< Accept all incoming packets.
+ *   This option defaults to disabled (cleared) */
+
+#define XEMACPS_FRAME1536_OPTION             0x00000002U
+/**< Frame larger than 1516 support for Tx & Rx.
+ *   This option defaults to disabled (cleared) */
+
+#define XEMACPS_VLAN_OPTION                  0x00000004U
+/**< VLAN Rx & Tx frame support.
+ *   This option defaults to disabled (cleared) */
+
+#define XEMACPS_FLOW_CONTROL_OPTION          0x00000010U
+/**< Enable recognition of flow control frames on Rx
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_FCS_STRIP_OPTION             0x00000020U
+/**< Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
+ *   stripped.
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_FCS_INSERT_OPTION            0x00000040U
+/**< Generate FCS field and add PAD automatically for outgoing frames.
+ *   This option defaults to disabled (cleared) */
+
+#define XEMACPS_LENTYPE_ERR_OPTION           0x00000080U
+/**< Enable Length/Type error checking for incoming frames. When this option is
+ *   set, the MAC will filter frames that have a mismatched type/length field
+ *   and if XEMACPS_REPORT_RXERR_OPTION is set, the user is notified when these
+ *   types of frames are encountered. When this option is cleared, the MAC will
+ *   allow these types of frames to be received.
+ *
+ *   This option defaults to disabled (cleared) */
+
+#define XEMACPS_TRANSMITTER_ENABLE_OPTION    0x00000100U
+/**< Enable the transmitter.
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_RECEIVER_ENABLE_OPTION       0x00000200U
+/**< Enable the receiver
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_BROADCAST_OPTION             0x00000400U
+/**< Allow reception of the broadcast address
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_MULTICAST_OPTION             0x00000800U
+/**< Allows reception of multicast addresses programmed into hash
+ *   This option defaults to disabled (clear) */
+
+#define XEMACPS_RX_CHKSUM_ENABLE_OPTION      0x00001000U
+/**< Enable the RX checksum offload
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_TX_CHKSUM_ENABLE_OPTION      0x00002000U
+/**< Enable the TX checksum offload
+ *   This option defaults to enabled (set) */
+
+#define XEMACPS_JUMBO_ENABLE_OPTION	0x00004000U
+#define XEMACPS_SGMII_ENABLE_OPTION	0x00008000U
+
+#define XEMACPS_DEFAULT_OPTIONS                     \
+    ((u32)XEMACPS_FLOW_CONTROL_OPTION |                  \
+     (u32)XEMACPS_FCS_INSERT_OPTION |                    \
+     (u32)XEMACPS_FCS_STRIP_OPTION |                     \
+     (u32)XEMACPS_BROADCAST_OPTION |                     \
+     (u32)XEMACPS_LENTYPE_ERR_OPTION |                   \
+     (u32)XEMACPS_TRANSMITTER_ENABLE_OPTION |            \
+     (u32)XEMACPS_RECEIVER_ENABLE_OPTION |               \
+     (u32)XEMACPS_RX_CHKSUM_ENABLE_OPTION |              \
+     (u32)XEMACPS_TX_CHKSUM_ENABLE_OPTION)
+
+/**< Default options set when device is initialized or reset */
+/*@}*/
+
+/** @name Callback identifiers
+ *
+ * These constants are used as parameters to XEmacPs_SetHandler()
+ * @{
+ */
+#define XEMACPS_HANDLER_DMASEND 1U
+#define XEMACPS_HANDLER_DMARECV 2U
+#define XEMACPS_HANDLER_ERROR   3U
+/*@}*/
+
+/* Constants to determine the configuration of the hardware device. They are
+ * used to allow the driver to verify it can operate with the hardware.
+ */
+#define XEMACPS_MDIO_DIV_DFT    MDC_DIV_32 /**< Default MDIO clock divisor */
+
+/* The next few constants help upper layers determine the size of memory
+ * pools used for Ethernet buffers and descriptor lists.
+ */
+#define XEMACPS_MAC_ADDR_SIZE   6U	/* size of Ethernet header */
+
+#define XEMACPS_MTU             1500U	/* max MTU size of Ethernet frame */
+#define XEMACPS_MTU_JUMBO       10240U	/* max MTU size of jumbo frame */
+#define XEMACPS_HDR_SIZE        14U	/* size of Ethernet header */
+#define XEMACPS_HDR_VLAN_SIZE   18U	/* size of Ethernet header with VLAN */
+#define XEMACPS_TRL_SIZE        4U	/* size of Ethernet trailer (FCS) */
+#define XEMACPS_MAX_FRAME_SIZE       (XEMACPS_MTU + XEMACPS_HDR_SIZE + \
+        XEMACPS_TRL_SIZE)
+#define XEMACPS_MAX_VLAN_FRAME_SIZE  (XEMACPS_MTU + XEMACPS_HDR_SIZE + \
+        XEMACPS_HDR_VLAN_SIZE + XEMACPS_TRL_SIZE)
+#define XEMACPS_MAX_VLAN_FRAME_SIZE_JUMBO  (XEMACPS_MTU_JUMBO + XEMACPS_HDR_SIZE + \
+        XEMACPS_HDR_VLAN_SIZE + XEMACPS_TRL_SIZE)
+
+/* DMACR Bust length hash defines */
+
+#define XEMACPS_SINGLE_BURST	0x00000001
+#define XEMACPS_4BYTE_BURST		0x00000004
+#define XEMACPS_8BYTE_BURST		0x00000008
+#define XEMACPS_16BYTE_BURST	0x00000010
+
+
+/**************************** Type Definitions ******************************/
+/** @name Typedefs for callback functions
+ *
+ * These callbacks are invoked in interrupt context.
+ * @{
+ */
+/**
+ * Callback invoked when frame(s) have been sent or received in interrupt
+ * driven DMA mode. To set the send callback, invoke XEmacPs_SetHandler().
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ *
+ * @note
+ * See xemacps_hw.h for bitmasks definitions and the device hardware spec for
+ * further information on their meaning.
+ *
+ */
+typedef void (*XEmacPs_Handler) (void *CallBackRef);
+
+/**
+ * Callback when an asynchronous error occurs. To set this callback, invoke
+ * XEmacPs_SetHandler() with XEMACPS_HANDLER_ERROR in the HandlerType
+ * parameter.
+ *
+ * @param CallBackRef is user data assigned when the callback was set.
+ * @param Direction defines either receive or transmit error(s) has occurred.
+ * @param ErrorWord definition varies with Direction
+ *
+ */
+typedef void (*XEmacPs_ErrHandler) (void *CallBackRef, u8 Direction,
+				     u32 ErrorWord);
+
+/*@}*/
+
+/**
+ * This typedef contains configuration information for a device.
+ */
+typedef struct {
+	u16 DeviceId;	/**< Unique ID  of device */
+	UINTPTR BaseAddress;/**< Physical base address of IPIF registers */
+	u8 IsCacheCoherent; /**< Applicable only to A53 in EL1 mode;
+				* describes whether Cache Coherent or not */
+#if defined  (XCLOCKING)
+	u32 RefClk;	/**< Input clock */
+#endif
+	u16 S1GDiv0;	/**< 1Gbps Clock Divider 0 */
+	u8 S1GDiv1;	/**< 1Gbps Clock Divider 1 */
+	u16 S100MDiv0;	/**< 100Mbps Clock Divider 0 */
+	u8 S100MDiv1;	/**< 100Mbps Clock Divider 1 */
+	u16 S10MDiv0;	/**< 10Mbps Clock Divider 0 */
+	u8 S10MDiv1;	/**< 10Mbps Clock Divider 1 */
+} XEmacPs_Config;
+
+
+/**
+ * The XEmacPs driver instance data. The user is required to allocate a
+ * structure of this type for every XEmacPs device in the system. A pointer
+ * to a structure of this type is then passed to the driver API functions.
+ */
+typedef struct XEmacPs_Instance {
+	XEmacPs_Config Config;	/* Hardware configuration */
+	u32 IsStarted;		/* Device is currently started */
+	u32 IsReady;		/* Device is initialized and ready */
+	u32 Options;		/* Current options word */
+
+	XEmacPs_BdRing TxBdRing;	/* Transmit BD ring */
+	XEmacPs_BdRing RxBdRing;	/* Receive BD ring */
+
+	XEmacPs_Handler SendHandler;
+	XEmacPs_Handler RecvHandler;
+	void *SendRef;
+	void *RecvRef;
+
+	XEmacPs_ErrHandler ErrorHandler;
+	void *ErrorRef;
+	u32 Version;
+	u32 RxBufMask;
+	u32 MaxMtuSize;
+	u32 MaxFrameSize;
+	u32 MaxVlanFrameSize;
+
+} XEmacPs;
+
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/****************************************************************************/
+/**
+* Retrieve the Tx ring object. This object can be used in the various Ring
+* API functions.
+*
+* @param  InstancePtr is the DMA channel to operate on.
+*
+* @return TxBdRing attribute
+*
+* @note
+* C-style signature:
+*    XEmacPs_BdRing XEmacPs_GetTxRing(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_GetTxRing(InstancePtr) ((InstancePtr)->TxBdRing)
+
+/****************************************************************************/
+/**
+* Retrieve the Rx ring object. This object can be used in the various Ring
+* API functions.
+*
+* @param  InstancePtr is the DMA channel to operate on.
+*
+* @return RxBdRing attribute
+*
+* @note
+* C-style signature:
+*    XEmacPs_BdRing XEmacPs_GetRxRing(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_GetRxRing(InstancePtr) ((InstancePtr)->RxBdRing)
+
+/****************************************************************************/
+/**
+*
+* Enable interrupts specified in <i>Mask</i>. The corresponding interrupt for
+* each bit set to 1 in <i>Mask</i>, will be enabled.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask contains a bit mask of interrupts to enable. The mask can
+*        be formed using a set of bitwise or'd values.
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+* C-style signature
+*     void XEmacPs_IntEnable(XEmacPs *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XEmacPs_IntEnable(InstancePtr, Mask)                            \
+	XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,             \
+		XEMACPS_IER_OFFSET,                                     \
+		((Mask) & XEMACPS_IXR_ALL_MASK));
+
+/****************************************************************************/
+/**
+*
+* Disable interrupts specified in <i>Mask</i>. The corresponding interrupt for
+* each bit set to 1 in <i>Mask</i>, will be enabled.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask contains a bit mask of interrupts to disable. The mask can
+*        be formed using a set of bitwise or'd values.
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+* C-style signature
+*     void XEmacPs_IntDisable(XEmacPs *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XEmacPs_IntDisable(InstancePtr, Mask)                           \
+	XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,             \
+		XEMACPS_IDR_OFFSET,                                     \
+		((Mask) & XEMACPS_IXR_ALL_MASK));
+
+/****************************************************************************/
+/**
+*
+* Enable interrupts specified in <i>Mask</i>. The corresponding interrupt for
+* each bit set to 1 in <i>Mask</i>, will be enabled.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask contains a bit mask of interrupts to enable. The mask can
+*        be formed using a set of bitwise or'd values.
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+* C-style signature
+*     void XEmacPs_IntQ1Enable(XEmacPs *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XEmacPs_IntQ1Enable(InstancePtr, Mask)                            \
+	XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,             \
+		XEMACPS_INTQ1_IER_OFFSET,                                \
+		((Mask) & XEMACPS_INTQ1_IXR_ALL_MASK));
+
+/****************************************************************************/
+/**
+*
+* Disable interrupts specified in <i>Mask</i>. The corresponding interrupt for
+* each bit set to 1 in <i>Mask</i>, will be enabled.
+*
+* @param InstancePtr is a pointer to the instance to be worked on.
+* @param Mask contains a bit mask of interrupts to disable. The mask can
+*        be formed using a set of bitwise or'd values.
+*
+* @note
+* The state of the transmitter and receiver are not modified by this function.
+* C-style signature
+*     void XEmacPs_IntDisable(XEmacPs *InstancePtr, u32 Mask)
+*
+*****************************************************************************/
+#define XEmacPs_IntQ1Disable(InstancePtr, Mask)                           \
+	XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,             \
+		XEMACPS_INTQ1_IDR_OFFSET,                               \
+		((Mask) & XEMACPS_INTQ1_IXR_ALL_MASK));
+
+/****************************************************************************/
+/**
+*
+* This macro triggers trasmit circuit to send data currently in TX buffer(s).
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+*
+* @return
+*
+* @note
+*
+* Signature: void XEmacPs_Transmit(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_Transmit(InstancePtr)                              \
+        XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,          \
+        XEMACPS_NWCTRL_OFFSET,                                     \
+        (XEmacPs_ReadReg((InstancePtr)->Config.BaseAddress,          \
+        XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK))
+
+/****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with checksum offloading
+* on the receive channel
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with checksum offloading, or
+* FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XEmacPs_IsRxCsum(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_IsRxCsum(InstancePtr)                                     \
+        ((XEmacPs_ReadReg((InstancePtr)->Config.BaseAddress,             \
+          XEMACPS_NWCFG_OFFSET) & XEMACPS_NWCFG_RXCHKSUMEN_MASK) != 0U     \
+          ? TRUE : FALSE)
+
+/****************************************************************************/
+/**
+*
+* This macro determines if the device is configured with checksum offloading
+* on the transmit channel
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+*
+* @return
+*
+* Boolean TRUE if the device is configured with checksum offloading, or
+* FALSE otherwise.
+*
+* @note
+*
+* Signature: u32 XEmacPs_IsTxCsum(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_IsTxCsum(InstancePtr)                                     \
+        ((XEmacPs_ReadReg((InstancePtr)->Config.BaseAddress,              \
+          XEMACPS_DMACR_OFFSET) & XEMACPS_DMACR_TCPCKSUM_MASK) != 0U       \
+          ? TRUE : FALSE)
+
+/************************** Function Prototypes *****************************/
+
+/****************************************************************************/
+/**
+*
+* This macro sets RX watermark register.
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+* @param High is the non-zero RX high watermark value. When SRAM fill level
+*	 is above this, a pause frame will be sent.
+* @param Low is the non-zero RX low watermark value. When SRAM fill level
+*	 is below this, a zero length pause frame will be sent IF the last
+*	 pause frame sent was non-zero.
+*
+* @return None
+*
+* @note
+*
+* Signature: void XEmacPs_SetRXWatermark(XEmacPs *InstancePtr, u16 High,
+* 					u16 Low)
+*
+*****************************************************************************/
+#define XEmacPs_SetRXWatermark(InstancePtr, High, Low)                     \
+        XEmacPs_WriteReg((InstancePtr)->Config.BaseAddress,                \
+        XEMACPS_RXWATERMARK_OFFSET,                                        \
+        (High & XEMACPS_RXWM_HIGH_MASK) |  \
+        ((Low << XEMACPS_RXWM_LOW_SHFT_MSK) & XEMACPS_RXWM_LOW_MASK) |)
+
+/****************************************************************************/
+/**
+*
+* This macro gets RX watermark register.
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+*
+* @return RX watermark register value
+*
+* @note
+*
+* Signature: void XEmacPs_GetRXWatermark(XEmacPs *InstancePtr)
+*
+*****************************************************************************/
+#define XEmacPs_GetRXWatermark(InstancePtr)                     \
+        XEmacPs_ReadReg((InstancePtr)->Config.BaseAddress,                \
+        XEMACPS_RXWATERMARK_OFFSET)
+/*
+ * Initialization functions in xemacps.c
+ */
+LONG XEmacPs_CfgInitialize(XEmacPs *InstancePtr, XEmacPs_Config *CfgPtr,
+			   UINTPTR EffectiveAddress);
+void XEmacPs_Start(XEmacPs *InstancePtr);
+void XEmacPs_Stop(XEmacPs *InstancePtr);
+void XEmacPs_Reset(XEmacPs *InstancePtr);
+void XEmacPs_SetQueuePtr(XEmacPs *InstancePtr, UINTPTR QPtr, u8 QueueNum,
+			 u16 Direction);
+
+/*
+ * Lookup configuration in xemacps_sinit.c
+ */
+XEmacPs_Config *XEmacPs_LookupConfig(u16 DeviceId);
+
+/*
+ * Interrupt-related functions in xemacps_intr.c
+ * DMA only and FIFO is not supported. This DMA does not support coalescing.
+ */
+LONG XEmacPs_SetHandler(XEmacPs *InstancePtr, u32 HandlerType,
+			void *FuncPointer, void *CallBackRef);
+void XEmacPs_IntrHandler(void *XEmacPsPtr);
+
+/*
+ * MAC configuration/control functions in XEmacPs_control.c
+ */
+LONG XEmacPs_SetOptions(XEmacPs *InstancePtr, u32 Options);
+LONG XEmacPs_ClearOptions(XEmacPs *InstancePtr, u32 Options);
+u32 XEmacPs_GetOptions(XEmacPs *InstancePtr);
+
+LONG XEmacPs_SetMacAddress(XEmacPs *InstancePtr, void *AddressPtr, u8 Index);
+LONG XEmacPs_DeleteHash(XEmacPs *InstancePtr, void *AddressPtr);
+void XEmacPs_GetMacAddress(XEmacPs *InstancePtr, void *AddressPtr, u8 Index);
+
+LONG XEmacPs_SetHash(XEmacPs *InstancePtr, void *AddressPtr);
+void XEmacPs_ClearHash(XEmacPs *InstancePtr);
+void XEmacPs_GetHash(XEmacPs *InstancePtr, void *AddressPtr);
+
+void XEmacPs_SetMdioDivisor(XEmacPs *InstancePtr,
+				XEmacPs_MdcDiv Divisor);
+void XEmacPs_SetOperatingSpeed(XEmacPs *InstancePtr, u16 Speed);
+u16 XEmacPs_GetOperatingSpeed(XEmacPs *InstancePtr);
+LONG XEmacPs_PhyRead(XEmacPs *InstancePtr, u32 PhyAddress,
+		     u32 RegisterNum, u16 *PhyDataPtr);
+LONG XEmacPs_PhyWrite(XEmacPs *InstancePtr, u32 PhyAddress,
+		      u32 RegisterNum, u16 PhyData);
+LONG XEmacPs_SetTypeIdCheck(XEmacPs *InstancePtr, u32 Id_Check, u8 Index);
+
+LONG XEmacPs_SendPausePacket(XEmacPs *InstancePtr);
+void XEmacPs_DMABLengthUpdate(XEmacPs *InstancePtr, s32 BLength);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bd.h b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bd.h
new file mode 100644
index 0000000..aff79ff
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bd.h
@@ -0,0 +1,762 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+ *
+ * @file xemacps_bd.h
+* @addtogroup emacps_v3_16
+* @{
+ *
+ * This header provides operations to manage buffer descriptors in support
+ * of scatter-gather DMA.
+ *
+ * The API exported by this header defines abstracted macros that allow the
+ * user to read/write specific BD fields.
+ *
+ * <b>Buffer Descriptors</b>
+ *
+ * A buffer descriptor (BD) defines a DMA transaction. The macros defined by
+ * this header file allow access to most fields within a BD to tailor a DMA
+ * transaction according to user and hardware requirements.  See the hardware
+ * IP DMA spec for more information on BD fields and how they affect transfers.
+ *
+ * The XEmacPs_Bd structure defines a BD. The organization of this structure
+ * is driven mainly by the hardware for use in scatter-gather DMA transfers.
+ *
+ * <b>Performance</b>
+ *
+ * Limiting I/O to BDs can improve overall performance of the DMA channel.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver   Who  Date     Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a wsy  01/10/10 First release
+ * 2.1   srt  07/15/14 Add support for Zynq Ultrascale MP GEM specification
+ *                     and 64-bit changes.
+ * 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+ * 3.0   hk   02/20/15 Added support for jumbo frames.
+ *                     Disable extended mode. Perform all 64 bit changes under
+ *                     check for arch64.
+ * 3.2   hk   11/18/15 Change BD typedef and number of words.
+ * 3.8   hk   08/18/18 Remove duplicate definition of XEmacPs_BdSetLength
+ * 3.8   mus  11/05/18 Support 64 bit DMA addresses for Microblaze-X platform.
+ *
+ * </pre>
+ *
+ * ***************************************************************************
+ */
+
+#ifndef XEMACPS_BD_H		/* prevent circular inclusions */
+#define XEMACPS_BD_H		/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include <string.h>
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+#ifdef __aarch64__
+/* Minimum BD alignment */
+#define XEMACPS_DMABD_MINIMUM_ALIGNMENT  64U
+#define XEMACPS_BD_NUM_WORDS 4U
+#else
+/* Minimum BD alignment */
+#define XEMACPS_DMABD_MINIMUM_ALIGNMENT  4U
+#define XEMACPS_BD_NUM_WORDS 2U
+#endif
+
+/**
+ * The XEmacPs_Bd is the type for buffer descriptors (BDs).
+ */
+typedef u32 XEmacPs_Bd[XEMACPS_BD_NUM_WORDS];
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+ * Zero out BD fields
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @return Nothing
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdClear(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdClear(BdPtr)                                  \
+    memset((BdPtr), 0, sizeof(XEmacPs_Bd))
+
+/****************************************************************************/
+/**
+*
+* Read the given Buffer Descriptor word.
+*
+* @param    BaseAddress is the base address of the BD to read
+* @param    Offset is the word offset to be read
+*
+* @return   The 32-bit value of the field
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_BdRead(UINTPTR BaseAddress, UINTPTR Offset)
+*
+*****************************************************************************/
+#define XEmacPs_BdRead(BaseAddress, Offset)             \
+	(*(u32 *)((UINTPTR)((void*)(BaseAddress)) + (u32)(Offset)))
+
+/****************************************************************************/
+/**
+*
+* Write the given Buffer Descriptor word.
+*
+* @param    BaseAddress is the base address of the BD to write
+* @param    Offset is the word offset to be written
+* @param    Data is the 32-bit value to write to the field
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XEmacPs_BdWrite(UINTPTR BaseAddress, UINTPTR Offset, UINTPTR Data)
+*
+*****************************************************************************/
+#define XEmacPs_BdWrite(BaseAddress, Offset, Data)              \
+    (*(u32 *)((UINTPTR)(void*)(BaseAddress) + (u32)(Offset)) = (u32)(Data))
+
+/*****************************************************************************/
+/**
+ * Set the BD's Address field (word 0).
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ * @param  Addr  is the value to write to BD's status field.
+ *
+ * @note :
+ *
+ * C-style signature:
+ *    void XEmacPs_BdSetAddressTx(XEmacPs_Bd* BdPtr, UINTPTR Addr)
+ *
+ *****************************************************************************/
+#if defined(__aarch64__) || defined(__arch64__)
+#define XEmacPs_BdSetAddressTx(BdPtr, Addr)                        \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET,		\
+			(u32)((Addr) & ULONG64_LO_MASK));		\
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_HI_OFFSET,		\
+	(u32)(((Addr) & ULONG64_HI_MASK) >> 32U));
+#else
+#define XEmacPs_BdSetAddressTx(BdPtr, Addr)                        \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET, (u32)(Addr))
+#endif
+
+/*****************************************************************************/
+/**
+ * Set the BD's Address field (word 0).
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ * @param  Addr  is the value to write to BD's status field.
+ *
+ * @note : Due to some bits are mixed within receive BD's address field,
+ *         read-modify-write is performed.
+ *
+ * C-style signature:
+ *    void XEmacPs_BdSetAddressRx(XEmacPs_Bd* BdPtr, UINTPTR Addr)
+ *
+ *****************************************************************************/
+#ifdef __aarch64__
+#define XEmacPs_BdSetAddressRx(BdPtr, Addr)                        \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET,              \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) &           \
+	~XEMACPS_RXBUF_ADD_MASK) | ((u32)((Addr) & ULONG64_LO_MASK))));  \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_HI_OFFSET, 	\
+	(u32)(((Addr) & ULONG64_HI_MASK) >> 32U));
+#else
+#define XEmacPs_BdSetAddressRx(BdPtr, Addr)                        \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET,              \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) &           \
+    ~XEMACPS_RXBUF_ADD_MASK) | (UINTPTR)(Addr)))
+#endif
+
+/*****************************************************************************/
+/**
+ * Set the BD's Status field (word 1).
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ * @param  Data  is the value to write to BD's status field.
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetStatus(XEmacPs_Bd* BdPtr, UINTPTR Data)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdSetStatus(BdPtr, Data)                           \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,              \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) | (Data))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD's Packet DMA transfer status word (word 1).
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @return Status word
+ *
+ * @note
+ * C-style signature:
+ *    u32 XEmacPs_BdGetStatus(XEmacPs_Bd* BdPtr)
+ *
+ * Due to the BD bit layout differences in transmit and receive. User's
+ * caution is required.
+ *****************************************************************************/
+#define XEmacPs_BdGetStatus(BdPtr)                                 \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET)
+
+
+/*****************************************************************************/
+/**
+ * Get the address (bits 0..31) of the BD's buffer address (word 0)
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdGetBufAddr(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#if defined(__aarch64__) || defined(__arch64__)
+#define XEmacPs_BdGetBufAddr(BdPtr)                               \
+    (XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) |		  \
+	(XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_HI_OFFSET)) << 32U)
+#else
+#define XEmacPs_BdGetBufAddr(BdPtr)                               \
+    (XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET))
+#endif
+
+/*****************************************************************************/
+/**
+ * Set transfer length in bytes for the given BD. The length must be set each
+ * time a BD is submitted to hardware.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ * @param  LenBytes is the number of bytes to transfer.
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetLength(XEmacPs_Bd* BdPtr, u32 LenBytes)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdSetLength(BdPtr, LenBytes)                       \
+    XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,              \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    ~XEMACPS_TXBUF_LEN_MASK) | (LenBytes)))
+
+
+/*****************************************************************************/
+/**
+ * Retrieve the BD length field.
+ *
+ * For Tx channels, the returned value is the same as that written with
+ * XEmacPs_BdSetLength().
+ *
+ * For Rx channels, the returned value is the size of the received packet.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @return Length field processed by hardware or set by
+ *         XEmacPs_BdSetLength().
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdGetLength(XEmacPs_Bd* BdPtr)
+ *    XEAMCPS_RXBUF_LEN_MASK is same as XEMACPS_TXBUF_LEN_MASK.
+ *
+ *****************************************************************************/
+#define XEmacPs_BdGetLength(BdPtr)                                 \
+    (XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &            \
+    XEMACPS_RXBUF_LEN_MASK)
+
+/*****************************************************************************/
+/**
+ * Retrieve the RX frame size.
+ *
+ * The returned value is the size of the received packet.
+ * This API supports jumbo frame sizes if enabled.
+ *
+ * @param  InstancePtr is the pointer to XEmacps instance
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @return Length field processed by hardware or set by
+ *         XEmacPs_BdSetLength().
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_GetRxFrameSize(XEmacPs* InstancePtr, XEmacPs_Bd* BdPtr)
+ *    RxBufMask is dependent on whether jumbo is enabled or not.
+ *
+ *****************************************************************************/
+#define XEmacPs_GetRxFrameSize(InstancePtr, BdPtr)                   \
+    (XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &            \
+    (InstancePtr)->RxBufMask)
+
+/*****************************************************************************/
+/**
+ * Test whether the given BD has been marked as the last BD of a packet.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @return TRUE if BD represents the "Last" BD of a packet, FALSE otherwise
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsLast(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsLast(BdPtr)                                    \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_EOF_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Tell the DMA engine that the given transmit BD marks the end of the current
+ * packet to be processed.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetLast(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdSetLast(BdPtr)                                   \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) |             \
+    XEMACPS_TXBUF_LAST_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Tell the DMA engine that the current packet does not end with the given
+ * BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdClearLast(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdClearLast(BdPtr)                                 \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &             \
+    ~XEMACPS_TXBUF_LAST_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Set this bit to mark the last descriptor in the receive buffer descriptor
+ * list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetRxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+/*#define XEmacPs_BdSetRxWrap(BdPtr)                                 \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) |             \
+    XEMACPS_RXBUF_WRAP_MASK))
+*/
+
+/*****************************************************************************/
+/**
+ * Determine the wrap bit of the receive BD which indicates end of the
+ * BD list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    u8 XEmacPs_BdIsRxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxWrap(BdPtr)                                  \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) &           \
+    XEMACPS_RXBUF_WRAP_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Sets this bit to mark the last descriptor in the transmit buffer
+ * descriptor list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetTxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+/*#define XEmacPs_BdSetTxWrap(BdPtr)                                 \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) |             \
+    XEMACPS_TXBUF_WRAP_MASK))
+*/
+
+/*****************************************************************************/
+/**
+ * Determine the wrap bit of the transmit BD which indicates end of the
+ * BD list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    u8 XEmacPs_BdGetTxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsTxWrap(BdPtr)                                  \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_TXBUF_WRAP_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/*
+ * Must clear this bit to enable the MAC to write data to the receive
+ * buffer. Hardware sets this bit once it has successfully written a frame to
+ * memory. Once set, software has to clear the bit before the buffer can be
+ * used again. This macro clear the new bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdClearRxNew(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdClearRxNew(BdPtr)                                \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_ADDR_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) &             \
+    ~XEMACPS_RXBUF_NEW_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Determine the new bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxNew(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxNew(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_ADDR_OFFSET) &           \
+    XEMACPS_RXBUF_NEW_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Software sets this bit to disable the buffer to be read by the hardware.
+ * Hardware sets this bit for the first buffer of a frame once it has been
+ * successfully transmitted. This macro sets this bit of transmit BD to avoid
+ * confusion.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetTxUsed(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdSetTxUsed(BdPtr)                                 \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) |             \
+    XEMACPS_TXBUF_USED_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Software clears this bit to enable the buffer to be read by the hardware.
+ * Hardware sets this bit for the first buffer of a frame once it has been
+ * successfully transmitted. This macro clears this bit of transmit BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdClearTxUsed(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdClearTxUsed(BdPtr)                               \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &             \
+    ~XEMACPS_TXBUF_USED_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Determine the used bit of the transmit BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsTxUsed(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsTxUsed(BdPtr)                                  \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_TXBUF_USED_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if a frame fails to be transmitted due to too many retries.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsTxRetry(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsTxRetry(BdPtr)                                 \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_TXBUF_RETRY_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if a frame fails to be transmitted due to data can not be
+ * feteched in time or buffers are exhausted.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsTxUrun(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsTxUrun(BdPtr)                                  \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_TXBUF_URUN_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if a frame fails to be transmitted due to buffer is exhausted
+ * mid-frame.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsTxExh(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsTxExh(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_TXBUF_EXH_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Sets this bit, no CRC will be appended to the current frame. This control
+ * bit must be set for the first buffer in a frame and will be ignored for
+ * the subsequent buffers of a frame.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * This bit must be clear when using the transmit checksum generation offload,
+ * otherwise checksum generation and substitution will not occur.
+ *
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdSetTxNoCRC(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdSetTxNoCRC(BdPtr)                                \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) |             \
+    XEMACPS_TXBUF_NOCRC_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Clear this bit, CRC will be appended to the current frame. This control
+ * bit must be set for the first buffer in a frame and will be ignored for
+ * the subsequent buffers of a frame.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * This bit must be clear when using the transmit checksum generation offload,
+ * otherwise checksum generation and substitution will not occur.
+ *
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdClearTxNoCRC(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdClearTxNoCRC(BdPtr)                              \
+    (XEmacPs_BdWrite((BdPtr), XEMACPS_BD_STAT_OFFSET,             \
+    XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &             \
+    ~XEMACPS_TXBUF_NOCRC_MASK))
+
+
+/*****************************************************************************/
+/**
+ * Determine the broadcast bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxBcast(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxBcast(BdPtr)                                 \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_BCAST_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine the multicast hash bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxMultiHash(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxMultiHash(BdPtr)                             \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_MULTIHASH_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine the unicast hash bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxUniHash(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxUniHash(BdPtr)                               \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_UNIHASH_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if the received frame is a VLAN Tagged frame.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxVlan(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxVlan(BdPtr)                                  \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_VLAN_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if the received frame has Type ID of 8100h and null VLAN
+ * identifier(Priority tag).
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxPri(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxPri(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_PRI_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine if the received frame's Concatenation Format Indicator (CFI) of
+ * the frames VLANTCI field was set.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdIsRxCFI(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxCFI(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_CFI_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine the End Of Frame (EOF) bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdGetRxEOF(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxEOF(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_EOF_MASK)!=0U ? TRUE : FALSE)
+
+
+/*****************************************************************************/
+/**
+ * Determine the Start Of Frame (SOF) bit of the receive BD.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    UINTPTR XEmacPs_BdGetRxSOF(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+#define XEmacPs_BdIsRxSOF(BdPtr)                                   \
+    ((XEmacPs_BdRead((BdPtr), XEMACPS_BD_STAT_OFFSET) &           \
+    XEMACPS_RXBUF_SOF_MASK)!=0U ? TRUE : FALSE)
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.c b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.c
new file mode 100644
index 0000000..829f37c
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.c
@@ -0,0 +1,1076 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xemacps_bdring.c
+* @addtogroup emacps_v3_16
+* @{
+*
+* This file implements buffer descriptor ring related functions.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a wsy  01/10/10 First release
+* 1.00a asa  11/21/11 The function XEmacPs_BdRingFromHwTx is modified.
+*		      Earlier it used to search in "BdLimit" number of BDs to
+*		      know which BDs are processed. Now one more check is
+*		      added. It looks for BDs till the current BD pointer
+*		      reaches HwTail. By doing this processing time is saved.
+* 1.00a asa  01/24/12 The function XEmacPs_BdRingFromHwTx in file
+*		      xemacps_bdring.c is modified. Now start of packet is
+*		      searched for returning the number of BDs processed.
+* 1.05a asa  09/23/13 Cache operations on BDs are not required and hence
+*		      removed. It is expected that all BDs are allocated in
+*		      from uncached area. Fix for CR #663885.
+* 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp architecture.
+* 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.6   rb   09/08/17 Add XEmacPs_BdRingPtrReset() API to reset BD ring
+* 		      pointers
+*
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xil_cache.h"
+#include "xemacps_hw.h"
+#include "xemacps_bd.h"
+#include "xemacps_bdring.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************
+ * Compute the virtual address of a descriptor from its physical address
+ *
+ * @param BdPtr is the physical address of the BD
+ *
+ * @returns Virtual address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ ****************************************************************************/
+#define XEMACPS_PHYS_TO_VIRT(BdPtr) \
+    ((UINTPTR)(BdPtr) + (RingPtr->BaseBdAddr - RingPtr->PhysBaseAddr))
+
+/****************************************************************************
+ * Compute the physical address of a descriptor from its virtual address
+ *
+ * @param BdPtr is the physical address of the BD
+ *
+ * @returns Physical address of BdPtr
+ *
+ * @note Assume BdPtr is always a valid BD in the ring
+ ****************************************************************************/
+#define XEMACPS_VIRT_TO_PHYS(BdPtr) \
+    ((UINTPTR)(BdPtr) - (RingPtr->BaseBdAddr - RingPtr->PhysBaseAddr))
+
+/****************************************************************************
+ * Move the BdPtr argument ahead an arbitrary number of BDs wrapping around
+ * to the beginning of the ring if needed.
+ *
+ * We know if a wrapaound should occur if the new BdPtr is greater than
+ * the high address in the ring OR if the new BdPtr crosses over the
+ * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
+ * allow a BD space to span this boundary.
+ *
+ * @param RingPtr is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ *        final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ ****************************************************************************/
+#define XEMACPS_RING_SEEKAHEAD(RingPtr, BdPtr, NumBd)                  \
+    {                                                                   \
+        UINTPTR Addr = (UINTPTR)(void *)(BdPtr);                        \
+                                                                        \
+        Addr += ((RingPtr)->Separation * (NumBd));                        \
+        if ((Addr > (RingPtr)->HighBdAddr) || ((UINTPTR)(void *)(BdPtr) > Addr))  \
+        {                                                               \
+            Addr -= (RingPtr)->Length;                                  \
+        }                                                               \
+                                                                        \
+        (BdPtr) = (XEmacPs_Bd*)(void *)Addr;                                     \
+    }
+
+/****************************************************************************
+ * Move the BdPtr argument backwards an arbitrary number of BDs wrapping
+ * around to the end of the ring if needed.
+ *
+ * We know if a wrapaound should occur if the new BdPtr is less than
+ * the base address in the ring OR if the new BdPtr crosses over the
+ * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
+ * allow a BD space to span this boundary.
+ *
+ * @param RingPtr is the ring BdPtr appears in
+ * @param BdPtr on input is the starting BD position and on output is the
+ *        final BD position
+ * @param NumBd is the number of BD spaces to increment
+ *
+ ****************************************************************************/
+#define XEMACPS_RING_SEEKBACK(RingPtr, BdPtr, NumBd)                   \
+    {                                                                   \
+        UINTPTR Addr = (UINTPTR)(void *)(BdPtr);                                  \
+                                                                        \
+        Addr -= ((RingPtr)->Separation * (NumBd));                        \
+        if ((Addr < (RingPtr)->BaseBdAddr) || ((UINTPTR)(void*)(BdPtr) < Addr))  \
+        {                                                               \
+            Addr += (RingPtr)->Length;                                  \
+        }                                                               \
+                                                                        \
+        (BdPtr) = (XEmacPs_Bd*)(void*)Addr;                                     \
+    }
+
+
+/************************** Function Prototypes ******************************/
+
+static void XEmacPs_BdSetRxWrap(UINTPTR BdPtr);
+static void XEmacPs_BdSetTxWrap(UINTPTR BdPtr);
+
+/************************** Variable Definitions *****************************/
+
+/*****************************************************************************/
+/**
+ * Using a memory segment allocated by the caller, create and setup the BD list
+ * for the given DMA channel.
+ *
+ * @param RingPtr is the instance to be worked on.
+ * @param PhysAddr is the physical base address of user memory region.
+ * @param VirtAddr is the virtual base address of the user memory region. If
+ *        address translation is not being utilized, then VirtAddr should be
+ *        equivalent to PhysAddr.
+ * @param Alignment governs the byte alignment of individual BDs. This function
+ *        will enforce a minimum alignment of 4 bytes with no maximum as long
+ *        as it is specified as a power of 2.
+ * @param BdCount is the number of BDs to setup in the user memory region. It
+ *        is assumed the region is large enough to contain the BDs.
+ *
+ * @return
+ *
+ * - XST_SUCCESS if initialization was successful
+ * - XST_NO_FEATURE if the provided instance is a non DMA type
+ *   channel.
+ * - XST_INVALID_PARAM under any of the following conditions:
+ *   1) PhysAddr and/or VirtAddr are not aligned to the given Alignment
+ *      parameter.
+ *   2) Alignment parameter does not meet minimum requirements or is not a
+ *      power of 2 value.
+ *   3) BdCount is 0.
+ * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
+ *   over address 0x00000000 in virtual address space.
+ *
+ * @note
+ * Make sure to pass in the right alignment value.
+ *****************************************************************************/
+LONG XEmacPs_BdRingCreate(XEmacPs_BdRing * RingPtr, UINTPTR PhysAddr,
+			  UINTPTR VirtAddr, u32 Alignment, u32 BdCount)
+{
+	u32 i;
+	UINTPTR BdVirtAddr;
+	UINTPTR BdPhyAddr;
+	UINTPTR VirtAddrLoc = VirtAddr;
+
+	/* In case there is a failure prior to creating list, make sure the
+	 * following attributes are 0 to prevent calls to other functions
+	 * from doing anything.
+	 */
+	RingPtr->AllCnt = 0U;
+	RingPtr->FreeCnt = 0U;
+	RingPtr->HwCnt = 0U;
+	RingPtr->PreCnt = 0U;
+	RingPtr->PostCnt = 0U;
+
+	/* Make sure Alignment parameter meets minimum requirements */
+	if (Alignment < (u32)XEMACPS_DMABD_MINIMUM_ALIGNMENT) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Make sure Alignment is a power of 2 */
+	if (((Alignment - 0x00000001U) & Alignment)!=0x00000000U) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Make sure PhysAddr and VirtAddr are on same Alignment */
+	if (((PhysAddr % Alignment)!=(u32)0) || ((VirtAddrLoc % Alignment)!=(u32)0)) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Is BdCount reasonable? */
+	if (BdCount == 0x00000000U) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Figure out how many bytes will be between the start of adjacent BDs */
+	RingPtr->Separation = ((u32)sizeof(XEmacPs_Bd));
+
+	/* Must make sure the ring doesn't span address 0x00000000. If it does,
+	 * then the next/prev BD traversal macros will fail.
+	 */
+	if (VirtAddrLoc > ((VirtAddrLoc + (RingPtr->Separation * BdCount)) - (u32)1)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	/* Initial ring setup:
+	 *  - Clear the entire space
+	 *  - Setup each BD's BDA field with the physical address of the next BD
+	 */
+	(void)memset((void *) VirtAddrLoc, 0, (RingPtr->Separation * BdCount));
+
+	BdVirtAddr = VirtAddrLoc;
+	BdPhyAddr = PhysAddr + RingPtr->Separation;
+	for (i = 1U; i < BdCount; i++) {
+		BdVirtAddr += RingPtr->Separation;
+		BdPhyAddr += RingPtr->Separation;
+	}
+
+	/* Setup and initialize pointers and counters */
+	RingPtr->RunState = (u32)(XST_DMA_SG_IS_STOPPED);
+	RingPtr->BaseBdAddr = VirtAddrLoc;
+	RingPtr->PhysBaseAddr = PhysAddr;
+	RingPtr->HighBdAddr = BdVirtAddr;
+	RingPtr->Length =
+		((RingPtr->HighBdAddr - RingPtr->BaseBdAddr) + RingPtr->Separation);
+	RingPtr->AllCnt = (u32)BdCount;
+	RingPtr->FreeCnt = (u32)BdCount;
+	RingPtr->FreeHead = (XEmacPs_Bd *)(void *)VirtAddrLoc;
+	RingPtr->PreHead = (XEmacPs_Bd *)VirtAddrLoc;
+	RingPtr->HwHead = (XEmacPs_Bd *)VirtAddrLoc;
+	RingPtr->HwTail = (XEmacPs_Bd *)VirtAddrLoc;
+	RingPtr->PostHead = (XEmacPs_Bd *)VirtAddrLoc;
+	RingPtr->BdaRestart = (XEmacPs_Bd *)(void *)PhysAddr;
+
+	return (LONG)(XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Clone the given BD into every BD in the list.
+ * every field of the source BD is replicated in every BD of the list.
+ *
+ * This function can be called only when all BDs are in the free group such as
+ * they are immediately after initialization with XEmacPs_BdRingCreate().
+ * This prevents modification of BDs while they are in use by hardware or the
+ * user.
+ *
+ * @param RingPtr is the pointer of BD ring instance to be worked on.
+ * @param SrcBdPtr is the source BD template to be cloned into the list. This
+ *        BD will be modified.
+ * @param Direction is either XEMACPS_SEND or XEMACPS_RECV that indicates
+ *        which direction.
+ *
+ * @return
+ *   - XST_SUCCESS if the list was modified.
+ *   - XST_DMA_SG_NO_LIST if a list has not been created.
+ *   - XST_DMA_SG_LIST_ERROR if some of the BDs in this channel are under
+ *     hardware or user control.
+ *   - XST_DEVICE_IS_STARTED if the DMA channel has not been stopped.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingClone(XEmacPs_BdRing * RingPtr, XEmacPs_Bd * SrcBdPtr,
+			 u8 Direction)
+{
+	u32 i;
+	UINTPTR CurBd;
+
+	/* Can't do this function if there isn't a ring */
+	if (RingPtr->AllCnt == 0x00000000U) {
+		return (LONG)(XST_DMA_SG_NO_LIST);
+	}
+
+	/* Can't do this function with the channel running */
+	if (RingPtr->RunState == (u32)XST_DMA_SG_IS_STARTED) {
+		return (LONG)(XST_DEVICE_IS_STARTED);
+	}
+
+	/* Can't do this function with some of the BDs in use */
+	if (RingPtr->FreeCnt != RingPtr->AllCnt) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	if ((Direction != (u8)XEMACPS_SEND) && (Direction != (u8)XEMACPS_RECV)) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Starting from the top of the ring, save BD.Next, overwrite the entire
+	 * BD with the template, then restore BD.Next
+	 */
+	CurBd = RingPtr->BaseBdAddr;
+	for (i = 0U; i < RingPtr->AllCnt; i++) {
+		memcpy((void *)CurBd, SrcBdPtr, sizeof(XEmacPs_Bd));
+	CurBd += RingPtr->Separation;
+	}
+
+	CurBd -= RingPtr->Separation;
+
+	if (Direction == XEMACPS_RECV) {
+		XEmacPs_BdSetRxWrap(CurBd);
+	}
+	else {
+		XEmacPs_BdSetTxWrap(CurBd);
+	}
+
+	return (LONG)(XST_SUCCESS);
+}
+
+
+/*****************************************************************************/
+/**
+ * Reserve locations in the BD list. The set of returned BDs may be modified
+ * in preparation for future DMA transaction(s). Once the BDs are ready to be
+ * submitted to hardware, the user must call XEmacPs_BdRingToHw() in the same
+ * order which they were allocated here. Example:
+ *
+ * <pre>
+ *        NumBd = 2,
+ *        Status = XEmacPs_BdRingAlloc(MyRingPtr, NumBd, &MyBdSet),
+ *
+ *        if (Status != XST_SUCCESS)
+ *        {
+ *            *Not enough BDs available for the request*
+ *        }
+ *
+ *        CurBd = MyBdSet,
+ *        for (i=0; i<NumBd; i++)
+ *        {
+ *            * Prepare CurBd *.....
+ *
+ *            * Onto next BD *
+ *            CurBd = XEmacPs_BdRingNext(MyRingPtr, CurBd),
+ *        }
+ *
+ *        * Give list to hardware *
+ *        Status = XEmacPs_BdRingToHw(MyRingPtr, NumBd, MyBdSet),
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be allocated and given to hardware in the correct sequence:
+ * <pre>
+ *        * Legal *
+ *        XEmacPs_BdRingAlloc(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingToHw(MyRingPtr, NumBd1, MySet1),
+ *
+ *        * Legal *
+ *        XEmacPs_BdRingAlloc(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingAlloc(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingToHw(MyRingPtr, NumBd1, MySet1),
+ *        XEmacPs_BdRingToHw(MyRingPtr, NumBd2, MySet2),
+ *
+ *        * Not legal *
+ *        XEmacPs_BdRingAlloc(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingAlloc(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingToHw(MyRingPtr, NumBd2, MySet2),
+ *        XEmacPs_BdRingToHw(MyRingPtr, NumBd1, MySet1),
+ * </pre>
+ *
+ * Use the API defined in xemacps_bd.h to modify individual BDs. Traversal
+ * of the BD set can be done using XEmacPs_BdRingNext() and
+ * XEmacPs_BdRingPrev().
+ *
+ * @param RingPtr is a pointer to the BD ring instance to be worked on.
+ * @param NumBd is the number of BDs to allocate
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ *        for modification.
+ *
+ * @return
+ *   - XST_SUCCESS if the requested number of BDs was returned in the BdSetPtr
+ *     parameter.
+ *   - XST_FAILURE if there were not enough free BDs to satisfy the request.
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ * @note Do not modify more BDs than the number requested with the NumBd
+ *       parameter. Doing so will lead to data corruption and system
+ *       instability.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingAlloc(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			 XEmacPs_Bd ** BdSetPtr)
+{
+	LONG Status;
+	/* Enough free BDs available for the request? */
+	if (RingPtr->FreeCnt < NumBd) {
+		Status = (LONG)(XST_FAILURE);
+	} else {
+	/* Set the return argument and move FreeHead forward */
+	*BdSetPtr = RingPtr->FreeHead;
+	XEMACPS_RING_SEEKAHEAD(RingPtr, RingPtr->FreeHead, NumBd);
+	RingPtr->FreeCnt -= NumBd;
+	RingPtr->PreCnt += NumBd;
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+ * Fully or partially undo an XEmacPs_BdRingAlloc() operation. Use this
+ * function if all the BDs allocated by XEmacPs_BdRingAlloc() could not be
+ * transferred to hardware with XEmacPs_BdRingToHw().
+ *
+ * This function helps out in situations when an unrelated error occurs after
+ * BDs have been allocated but before they have been given to hardware.
+ * An example of this type of error would be an OS running out of resources.
+ *
+ * This function is not the same as XEmacPs_BdRingFree(). The Free function
+ * returns BDs to the free list after they have been processed by hardware,
+ * while UnAlloc returns them before being processed by hardware.
+ *
+ * There are two scenarios where this function can be used. Full UnAlloc or
+ * Partial UnAlloc. A Full UnAlloc means all the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ *    Status = XEmacPs_BdRingAlloc(MyRingPtr, 10, &BdPtr),
+ *        ...
+ *    if (Error)
+ *    {
+ *        Status = XEmacPs_BdRingUnAlloc(MyRingPtr, 10, &BdPtr),
+ *    }
+ * </pre>
+ *
+ * A partial UnAlloc means some of the BDs Alloc'd will be returned:
+ *
+ * <pre>
+ *    Status = XEmacPs_BdRingAlloc(MyRingPtr, 10, &BdPtr),
+ *    BdsLeft = 10,
+ *    CurBdPtr = BdPtr,
+ *
+ *    while (BdsLeft)
+ *    {
+ *       if (Error)
+ *       {
+ *          Status = XEmacPs_BdRingUnAlloc(MyRingPtr, BdsLeft, CurBdPtr),
+ *       }
+ *
+ *       CurBdPtr = XEmacPs_BdRingNext(MyRingPtr, CurBdPtr),
+ *       BdsLeft--,
+ *    }
+ * </pre>
+ *
+ * A partial UnAlloc must include the last BD in the list that was Alloc'd.
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs to allocate
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ *        for modification.
+ *
+ * @return
+ *   - XST_SUCCESS if the BDs were unallocated.
+ *   - XST_FAILURE if NumBd parameter was greater that the number of BDs in
+ *     the preprocessing state.
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingUnAlloc(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			   XEmacPs_Bd * BdSetPtr)
+{
+	LONG Status;
+	(void) BdSetPtr;
+	Xil_AssertNonvoid(RingPtr != NULL);
+	Xil_AssertNonvoid(BdSetPtr != NULL);
+
+	/* Enough BDs in the free state for the request? */
+	if (RingPtr->PreCnt < NumBd) {
+		Status = (LONG)(XST_FAILURE);
+	} else {
+	/* Set the return argument and move FreeHead backward */
+		XEMACPS_RING_SEEKBACK(RingPtr, (RingPtr->FreeHead), NumBd);
+	RingPtr->FreeCnt += NumBd;
+	RingPtr->PreCnt -= NumBd;
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Enqueue a set of BDs to hardware that were previously allocated by
+ * XEmacPs_BdRingAlloc(). Once this function returns, the argument BD set goes
+ * under hardware control. Any changes made to these BDs after this point will
+ * corrupt the BD list leading to data corruption and system instability.
+ *
+ * The set will be rejected if the last BD of the set does not mark the end of
+ * a packet (see XEmacPs_BdSetLast()).
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs in the set.
+ * @param BdSetPtr is the first BD of the set to commit to hardware.
+ *
+ * @return
+ *   - XST_SUCCESS if the set of BDs was accepted and enqueued to hardware.
+ *   - XST_FAILURE if the set of BDs was rejected because the last BD of the set
+ *     did not have its "last" bit set.
+ *   - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ *     XEmacPs_BdRingAlloc().
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingToHw(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			XEmacPs_Bd * BdSetPtr)
+{
+	XEmacPs_Bd *CurBdPtr;
+	u32 i;
+	LONG Status;
+	/* if no bds to process, simply return. */
+	if (0U == NumBd){
+		Status = (LONG)(XST_SUCCESS);
+	} else {
+	/* Make sure we are in sync with XEmacPs_BdRingAlloc() */
+	if ((RingPtr->PreCnt < NumBd) || (RingPtr->PreHead != BdSetPtr)) {
+			Status = (LONG)(XST_DMA_SG_LIST_ERROR);
+		} else {
+	CurBdPtr = BdSetPtr;
+			for (i = 0U; i < NumBd; i++) {
+				CurBdPtr = (XEmacPs_Bd *)((void *)XEmacPs_BdRingNext(RingPtr, CurBdPtr));
+	}
+	/* Adjust ring pointers & counters */
+	XEMACPS_RING_SEEKAHEAD(RingPtr, RingPtr->PreHead, NumBd);
+	RingPtr->PreCnt -= NumBd;
+	RingPtr->HwTail = CurBdPtr;
+	RingPtr->HwCnt += NumBd;
+
+			Status = (LONG)(XST_SUCCESS);
+		}
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Returns a set of BD(s) that have been processed by hardware. The returned
+ * BDs may be examined to determine the outcome of the DMA transaction(s).
+ * Once the BDs have been examined, the user must call XEmacPs_BdRingFree()
+ * in the same order which they were retrieved here. Example:
+ *
+ * <pre>
+ *        NumBd = XEmacPs_BdRingFromHwTx(MyRingPtr, MaxBd, &MyBdSet),
+ *        if (NumBd == 0)
+ *        {
+ *           * hardware has nothing ready for us yet*
+ *        }
+ *
+ *        CurBd = MyBdSet,
+ *        for (i=0; i<NumBd; i++)
+ *        {
+ *           * Examine CurBd for post processing *.....
+ *
+ *           * Onto next BD *
+ *           CurBd = XEmacPs_BdRingNext(MyRingPtr, CurBd),
+ *           }
+ *
+ *           XEmacPs_BdRingFree(MyRingPtr, NumBd, MyBdSet),  *Return list*
+ *        }
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be retrieved from hardware and freed in the correct sequence:
+ * <pre>
+ *        * Legal *
+ *        XEmacPs_BdRingFromHwTx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ *
+ *        * Legal *
+ *        XEmacPs_BdRingFromHwTx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFromHwTx(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd2, MySet2),
+ *
+ *        * Not legal *
+ *        XEmacPs_BdRingFromHwTx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFromHwTx(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd2, MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ * </pre>
+ *
+ * If hardware has only partially completed a packet spanning multiple BDs,
+ * then none of the BDs for that packet will be included in the results.
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param BdLimit is the maximum number of BDs to return in the set.
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ *        for examination.
+ *
+ * @return
+ *   The number of BDs processed by hardware. A value of 0 indicates that no
+ *   data is available. No more than BdLimit BDs will be returned.
+ *
+ * @note Treat BDs returned by this function as read-only.
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+u32 XEmacPs_BdRingFromHwTx(XEmacPs_BdRing * RingPtr, u32 BdLimit,
+				 XEmacPs_Bd ** BdSetPtr)
+{
+	XEmacPs_Bd *CurBdPtr;
+	u32 BdStr = 0U;
+	u32 BdCount;
+	u32 BdPartialCount;
+	u32 Sop = 0U;
+	u32 Status;
+	u32 BdLimitLoc = BdLimit;
+	CurBdPtr = RingPtr->HwHead;
+	BdCount = 0U;
+	BdPartialCount = 0U;
+
+	/* If no BDs in work group, then there's nothing to search */
+	if (RingPtr->HwCnt == 0x00000000U) {
+		*BdSetPtr = NULL;
+		Status = 0U;
+	} else {
+
+		if (BdLimitLoc > RingPtr->HwCnt){
+			BdLimitLoc = RingPtr->HwCnt;
+	}
+	/* Starting at HwHead, keep moving forward in the list until:
+	 *  - A BD is encountered with its new/used bit set which means
+	 *    hardware has not completed processing of that BD.
+	 *  - RingPtr->HwTail is reached and RingPtr->HwCnt is reached.
+	 *  - The number of requested BDs has been processed
+	 */
+		while (BdCount < BdLimitLoc) {
+		/* Read the status */
+			if(CurBdPtr != NULL){
+		BdStr = XEmacPs_BdRead(CurBdPtr, XEMACPS_BD_STAT_OFFSET);
+			}
+
+			if ((Sop == 0x00000000U) && ((BdStr & XEMACPS_TXBUF_USED_MASK)!=0x00000000U)){
+				Sop = 1U;
+			}
+			if (Sop == 0x00000001U) {
+			BdCount++;
+			BdPartialCount++;
+		}
+
+		/* hardware has processed this BD so check the "last" bit.
+		 * If it is clear, then there are more BDs for the current
+		 * packet. Keep a count of these partial packet BDs.
+		 */
+			if ((Sop == 0x00000001U) && ((BdStr & XEMACPS_TXBUF_LAST_MASK)!=0x00000000U)) {
+				Sop = 0U;
+				BdPartialCount = 0U;
+		}
+
+		/* Move on to next BD in work group */
+		CurBdPtr = XEmacPs_BdRingNext(RingPtr, CurBdPtr);
+	}
+
+	/* Subtract off any partial packet BDs found */
+        BdCount -= BdPartialCount;
+
+	/* If BdCount is non-zero then BDs were found to return. Set return
+	 * parameters, update pointers and counters, return success
+	 */
+		if (BdCount > 0x00000000U) {
+		*BdSetPtr = RingPtr->HwHead;
+		RingPtr->HwCnt -= BdCount;
+		RingPtr->PostCnt += BdCount;
+		XEMACPS_RING_SEEKAHEAD(RingPtr, RingPtr->HwHead, BdCount);
+			Status = (BdCount);
+		} else {
+			*BdSetPtr = NULL;
+			Status = 0U;
+	}
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Returns a set of BD(s) that have been processed by hardware. The returned
+ * BDs may be examined to determine the outcome of the DMA transaction(s).
+ * Once the BDs have been examined, the user must call XEmacPs_BdRingFree()
+ * in the same order which they were retrieved here. Example:
+ *
+ * <pre>
+ *        NumBd = XEmacPs_BdRingFromHwRx(MyRingPtr, MaxBd, &MyBdSet),
+ *
+ *        if (NumBd == 0)
+ *        {
+ *           *hardware has nothing ready for us yet*
+ *        }
+ *
+ *        CurBd = MyBdSet,
+ *        for (i=0; i<NumBd; i++)
+ *        {
+ *           * Examine CurBd for post processing *.....
+ *
+ *           * Onto next BD *
+ *           CurBd = XEmacPs_BdRingNext(MyRingPtr, CurBd),
+ *           }
+ *
+ *           XEmacPs_BdRingFree(MyRingPtr, NumBd, MyBdSet),  * Return list *
+ *        }
+ * </pre>
+ *
+ * A more advanced use of this function may allocate multiple sets of BDs.
+ * They must be retrieved from hardware and freed in the correct sequence:
+ * <pre>
+ *        * Legal *
+ *        XEmacPs_BdRingFromHwRx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ *
+ *        * Legal *
+ *        XEmacPs_BdRingFromHwRx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFromHwRx(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd2, MySet2),
+ *
+ *        * Not legal *
+ *        XEmacPs_BdRingFromHwRx(MyRingPtr, NumBd1, &MySet1),
+ *        XEmacPs_BdRingFromHwRx(MyRingPtr, NumBd2, &MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd2, MySet2),
+ *        XEmacPs_BdRingFree(MyRingPtr, NumBd1, MySet1),
+ * </pre>
+ *
+ * If hardware has only partially completed a packet spanning multiple BDs,
+ * then none of the BDs for that packet will be included in the results.
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param BdLimit is the maximum number of BDs to return in the set.
+ * @param BdSetPtr is an output parameter, it points to the first BD available
+ *        for examination.
+ *
+ * @return
+ *   The number of BDs processed by hardware. A value of 0 indicates that no
+ *   data is available. No more than BdLimit BDs will be returned.
+ *
+ * @note Treat BDs returned by this function as read-only.
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+u32 XEmacPs_BdRingFromHwRx(XEmacPs_BdRing * RingPtr, u32 BdLimit,
+				 XEmacPs_Bd ** BdSetPtr)
+{
+	XEmacPs_Bd *CurBdPtr;
+	u32 BdStr = 0U;
+	u32 BdCount;
+	u32 BdPartialCount;
+	u32 Status;
+
+	CurBdPtr = RingPtr->HwHead;
+	BdCount = 0U;
+	BdPartialCount = 0U;
+
+	/* If no BDs in work group, then there's nothing to search */
+	if (RingPtr->HwCnt == 0x00000000U) {
+		*BdSetPtr = NULL;
+		Status = 0U;
+	} else {
+
+	/* Starting at HwHead, keep moving forward in the list until:
+	 *  - A BD is encountered with its new/used bit set which means
+	 *    hardware has completed processing of that BD.
+	 *  - RingPtr->HwTail is reached and RingPtr->HwCnt is reached.
+	 *  - The number of requested BDs has been processed
+	 */
+	while (BdCount < BdLimit) {
+
+		/* Read the status */
+			if(CurBdPtr!=NULL){
+		BdStr = XEmacPs_BdRead(CurBdPtr, XEMACPS_BD_STAT_OFFSET);
+			}
+			if ((!(XEmacPs_BdIsRxNew(CurBdPtr)))==TRUE) {
+			break;
+		}
+
+		BdCount++;
+
+		/* hardware has processed this BD so check the "last" bit. If
+                 * it is clear, then there are more BDs for the current packet.
+                 * Keep a count of these partial packet BDs.
+		 */
+			if ((BdStr & XEMACPS_RXBUF_EOF_MASK)!=0x00000000U) {
+				BdPartialCount = 0U;
+			} else {
+			BdPartialCount++;
+		}
+
+		/* Move on to next BD in work group */
+		CurBdPtr = XEmacPs_BdRingNext(RingPtr, CurBdPtr);
+	}
+
+	/* Subtract off any partial packet BDs found */
+	BdCount -= BdPartialCount;
+
+	/* If BdCount is non-zero then BDs were found to return. Set return
+	 * parameters, update pointers and counters, return success
+	 */
+		if (BdCount > 0x00000000U) {
+		*BdSetPtr = RingPtr->HwHead;
+		RingPtr->HwCnt -= BdCount;
+		RingPtr->PostCnt += BdCount;
+		XEMACPS_RING_SEEKAHEAD(RingPtr, RingPtr->HwHead, BdCount);
+			Status = (BdCount);
+	}
+	else {
+		*BdSetPtr = NULL;
+			Status = 0U;
+	}
+}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Frees a set of BDs that had been previously retrieved with
+ * XEmacPs_BdRingFromHw().
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param NumBd is the number of BDs to free.
+ * @param BdSetPtr is the head of a list of BDs returned by
+ * XEmacPs_BdRingFromHw().
+ *
+ * @return
+ *   - XST_SUCCESS if the set of BDs was freed.
+ *   - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
+ *     XEmacPs_BdRingFromHw().
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingFree(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			XEmacPs_Bd * BdSetPtr)
+{
+	LONG Status;
+	/* if no bds to process, simply return. */
+	if (0x00000000U == NumBd){
+		Status = (LONG)(XST_SUCCESS);
+	} else {
+	/* Make sure we are in sync with XEmacPs_BdRingFromHw() */
+	if ((RingPtr->PostCnt < NumBd) || (RingPtr->PostHead != BdSetPtr)) {
+			Status = (LONG)(XST_DMA_SG_LIST_ERROR);
+		} else {
+	/* Update pointers and counters */
+	RingPtr->FreeCnt += NumBd;
+	RingPtr->PostCnt -= NumBd;
+	XEMACPS_RING_SEEKAHEAD(RingPtr, RingPtr->PostHead, NumBd);
+			Status = (LONG)(XST_SUCCESS);
+		}
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Check the internal data structures of the BD ring for the provided channel.
+ * The following checks are made:
+ *
+ *   - Is the BD ring linked correctly in physical address space.
+ *   - Do the internal pointers point to BDs in the ring.
+ *   - Do the internal counters add up.
+ *
+ * The channel should be stopped prior to calling this function.
+ *
+ * @param RingPtr is a pointer to the instance to be worked on.
+ * @param Direction is either XEMACPS_SEND or XEMACPS_RECV that indicates
+ *        which direction.
+ *
+ * @return
+ *   - XST_SUCCESS if the set of BDs was freed.
+ *   - XST_DMA_SG_NO_LIST if the list has not been created.
+ *   - XST_IS_STARTED if the channel is not stopped.
+ *   - XST_DMA_SG_LIST_ERROR if a problem is found with the internal data
+ *     structures. If this value is returned, the channel should be reset to
+ *     avoid data corruption or system instability.
+ *
+ * @note This function should not be preempted by another XEmacPs_Bd function
+ *       call that modifies the BD space. It is the caller's responsibility to
+ *       provide a mutual exclusion mechanism.
+ *
+ *****************************************************************************/
+LONG XEmacPs_BdRingCheck(XEmacPs_BdRing * RingPtr, u8 Direction)
+{
+	UINTPTR AddrV, AddrP;
+	u32 i;
+
+	if ((Direction != (u8)XEMACPS_SEND) && (Direction != (u8)XEMACPS_RECV)) {
+		return (LONG)(XST_INVALID_PARAM);
+	}
+
+	/* Is the list created */
+	if (RingPtr->AllCnt == 0x00000000U) {
+		return (LONG)(XST_DMA_SG_NO_LIST);
+	}
+
+	/* Can't check if channel is running */
+	if (RingPtr->RunState == (u32)XST_DMA_SG_IS_STARTED) {
+		return (LONG)(XST_IS_STARTED);
+	}
+
+	/* RunState doesn't make sense */
+	if (RingPtr->RunState != (u32)XST_DMA_SG_IS_STOPPED) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	/* Verify internal pointers point to correct memory space */
+	AddrV = (UINTPTR) RingPtr->FreeHead;
+	if ((AddrV < RingPtr->BaseBdAddr) || (AddrV > RingPtr->HighBdAddr)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	AddrV = (UINTPTR) RingPtr->PreHead;
+	if ((AddrV < RingPtr->BaseBdAddr) || (AddrV > RingPtr->HighBdAddr)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	AddrV = (UINTPTR) RingPtr->HwHead;
+	if ((AddrV < RingPtr->BaseBdAddr) || (AddrV > RingPtr->HighBdAddr)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	AddrV = (UINTPTR) RingPtr->HwTail;
+	if ((AddrV < RingPtr->BaseBdAddr) || (AddrV > RingPtr->HighBdAddr)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	AddrV = (UINTPTR) RingPtr->PostHead;
+	if ((AddrV < RingPtr->BaseBdAddr) || (AddrV > RingPtr->HighBdAddr)) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	/* Verify internal counters add up */
+	if ((RingPtr->HwCnt + RingPtr->PreCnt + RingPtr->FreeCnt +
+	     RingPtr->PostCnt) != RingPtr->AllCnt) {
+		return (LONG)(XST_DMA_SG_LIST_ERROR);
+	}
+
+	/* Verify BDs are linked correctly */
+	AddrV = RingPtr->BaseBdAddr;
+	AddrP = RingPtr->PhysBaseAddr + RingPtr->Separation;
+
+	for (i = 1U; i < RingPtr->AllCnt; i++) {
+		/* Check BDA for this BD. It should point to next physical addr */
+		if (XEmacPs_BdRead(AddrV, XEMACPS_BD_ADDR_OFFSET) != AddrP) {
+			return (LONG)(XST_DMA_SG_LIST_ERROR);
+		}
+
+		/* Move on to next BD */
+		AddrV += RingPtr->Separation;
+		AddrP += RingPtr->Separation;
+	}
+
+	/* Last BD should have wrap bit set */
+	if (XEMACPS_SEND == Direction) {
+		if ((!XEmacPs_BdIsTxWrap(AddrV))==TRUE) {
+			return (LONG)(XST_DMA_SG_LIST_ERROR);
+		}
+	}
+	else {			/* XEMACPS_RECV */
+		if ((!XEmacPs_BdIsRxWrap(AddrV))==TRUE) {
+			return (LONG)(XST_DMA_SG_LIST_ERROR);
+		}
+	}
+
+	/* No problems found */
+	return (LONG)(XST_SUCCESS);
+}
+
+/*****************************************************************************/
+/**
+ * Set this bit to mark the last descriptor in the receive buffer descriptor
+ * list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetRxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+static void XEmacPs_BdSetRxWrap(UINTPTR BdPtr)
+{
+    u32 DataValueRx;
+	u32 *TempPtr;
+
+	BdPtr += (u32)(XEMACPS_BD_ADDR_OFFSET);
+	TempPtr = (u32 *)BdPtr;
+	if(TempPtr != NULL) {
+		DataValueRx = *TempPtr;
+		DataValueRx |= XEMACPS_RXBUF_WRAP_MASK;
+		*TempPtr = DataValueRx;
+	}
+}
+
+/*****************************************************************************/
+/**
+ * Sets this bit to mark the last descriptor in the transmit buffer
+ * descriptor list.
+ *
+ * @param  BdPtr is the BD pointer to operate on
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdSetTxWrap(XEmacPs_Bd* BdPtr)
+ *
+ *****************************************************************************/
+static void XEmacPs_BdSetTxWrap(UINTPTR BdPtr)
+{
+    u32 DataValueTx;
+	u32 *TempPtr;
+
+	BdPtr += (u32)(XEMACPS_BD_STAT_OFFSET);
+	TempPtr = (u32 *)BdPtr;
+	if(TempPtr != NULL) {
+		DataValueTx = *TempPtr;
+		DataValueTx |= XEMACPS_TXBUF_WRAP_MASK;
+		*TempPtr = DataValueTx;
+	}
+}
+
+/*****************************************************************************/
+/**
+ * Reset BD ring head and tail pointers.
+ *
+ * @param RingPtr is the instance to be worked on.
+ * @param virtaddrloc is the virtual base address of the user memory region.
+ *
+ * @note
+ * Should be called after XEmacPs_Stop()
+ *
+ * @note
+ * C-style signature:
+ *    void XEmacPs_BdRingPtrReset(XEmacPs_BdRing * RingPtr, void *virtaddrloc)
+ *
+ *****************************************************************************/
+void XEmacPs_BdRingPtrReset(XEmacPs_BdRing * RingPtr, void *virtaddrloc)
+{
+	RingPtr->FreeHead = virtaddrloc;
+	RingPtr->PreHead = virtaddrloc;
+	RingPtr->HwHead = virtaddrloc;
+	RingPtr->HwTail = virtaddrloc;
+	RingPtr->PostHead = virtaddrloc;
+}
+
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.h b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.h
new file mode 100644
index 0000000..f5653db
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_bdring.h
@@ -0,0 +1,215 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xemacps_bdring.h
+* @addtogroup emacps_v3_16
+* @{
+*
+* The Xiline EmacPs Buffer Descriptor ring driver. This is part of EmacPs
+* DMA functionalities.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a wsy  01/10/10 First release
+* 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp architecture.
+* 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.6   rb   09/08/17 HwCnt variable (in XEmacPs_BdRing structure) is
+*		      changed to volatile.
+*
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMACPS_BDRING_H	/* prevent curcular inclusions */
+#define XEMACPS_BDRING_H	/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************** Type Definitions *******************************/
+
+/** This is an internal structure used to maintain the DMA list */
+typedef struct {
+	UINTPTR PhysBaseAddr;/**< Physical address of 1st BD in list */
+	UINTPTR BaseBdAddr;	 /**< Virtual address of 1st BD in list */
+	UINTPTR HighBdAddr;	 /**< Virtual address of last BD in the list */
+	u32 Length;	 /**< Total size of ring in bytes */
+	u32 RunState;	 /**< Flag to indicate DMA is started */
+	u32 Separation;	 /**< Number of bytes between the starting address
+                                  of adjacent BDs */
+	XEmacPs_Bd *FreeHead;
+			     /**< First BD in the free group */
+	XEmacPs_Bd *PreHead;/**< First BD in the pre-work group */
+	XEmacPs_Bd *HwHead; /**< First BD in the work group */
+	XEmacPs_Bd *HwTail; /**< Last BD in the work group */
+	XEmacPs_Bd *PostHead;
+			     /**< First BD in the post-work group */
+	XEmacPs_Bd *BdaRestart;
+			     /**< BDA to load when channel is started */
+
+	volatile u32 HwCnt;    /**< Number of BDs in work group */
+	u32 PreCnt;     /**< Number of BDs in pre-work group */
+	u32 FreeCnt;    /**< Number of allocatable BDs in the free group */
+	u32 PostCnt;    /**< Number of BDs in post-work group */
+	u32 AllCnt;     /**< Total Number of BDs for channel */
+} XEmacPs_BdRing;
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many BDs will fit
+* in a BD list within the given memory constraints.
+*
+* The results of this macro can be provided to XEmacPs_BdRingCreate().
+*
+* @param Alignment specifies what byte alignment the BDs must fall on and
+*        must be a power of 2 to get an accurate calculation (32, 64, 128,...)
+* @param Bytes is the number of bytes to be used to store BDs.
+*
+* @return Number of BDs that can fit in the given memory area
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_BdRingCntCalc(u32 Alignment, u32 Bytes)
+*
+******************************************************************************/
+#define XEmacPs_BdRingCntCalc(Alignment, Bytes)                    \
+    (u32)((Bytes) / (sizeof(XEmacPs_Bd)))
+
+/*****************************************************************************/
+/**
+* Use this macro at initialization time to determine how many bytes of memory
+* is required to contain a given number of BDs at a given alignment.
+*
+* @param Alignment specifies what byte alignment the BDs must fall on. This
+*        parameter must be a power of 2 to get an accurate calculation (32, 64,
+*        128,...)
+* @param NumBd is the number of BDs to calculate memory size requirements for
+*
+* @return The number of bytes of memory required to create a BD list with the
+*         given memory constraints.
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_BdRingMemCalc(u32 Alignment, u32 NumBd)
+*
+******************************************************************************/
+#define XEmacPs_BdRingMemCalc(Alignment, NumBd)                    \
+    (u32)(sizeof(XEmacPs_Bd) * (NumBd))
+
+/****************************************************************************/
+/**
+* Return the total number of BDs allocated by this channel with
+* XEmacPs_BdRingCreate().
+*
+* @param  RingPtr is the DMA channel to operate on.
+*
+* @return The total number of BDs allocated for this channel.
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_BdRingGetCnt(XEmacPs_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XEmacPs_BdRingGetCnt(RingPtr) ((RingPtr)->AllCnt)
+
+/****************************************************************************/
+/**
+* Return the number of BDs allocatable with XEmacPs_BdRingAlloc() for pre-
+* processing.
+*
+* @param  RingPtr is the DMA channel to operate on.
+*
+* @return The number of BDs currently allocatable.
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_BdRingGetFreeCnt(XEmacPs_BdRing* RingPtr)
+*
+*****************************************************************************/
+#define XEmacPs_BdRingGetFreeCnt(RingPtr)   ((RingPtr)->FreeCnt)
+
+/****************************************************************************/
+/**
+* Return the next BD from BdPtr in a list.
+*
+* @param  RingPtr is the DMA channel to operate on.
+* @param  BdPtr is the BD to operate on.
+*
+* @return The next BD in the list relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+*    XEmacPs_Bd *XEmacPs_BdRingNext(XEmacPs_BdRing* RingPtr,
+*                                      XEmacPs_Bd *BdPtr)
+*
+*****************************************************************************/
+#define XEmacPs_BdRingNext(RingPtr, BdPtr)                           \
+    (((UINTPTR)((void *)(BdPtr)) >= (RingPtr)->HighBdAddr) ?                     \
+    (XEmacPs_Bd*)((void*)(RingPtr)->BaseBdAddr) :                              \
+    (XEmacPs_Bd*)((UINTPTR)((void *)(BdPtr)) + (RingPtr)->Separation))
+
+/****************************************************************************/
+/**
+* Return the previous BD from BdPtr in the list.
+*
+* @param  RingPtr is the DMA channel to operate on.
+* @param  BdPtr is the BD to operate on
+*
+* @return The previous BD in the list relative to the BdPtr parameter.
+*
+* @note
+* C-style signature:
+*    XEmacPs_Bd *XEmacPs_BdRingPrev(XEmacPs_BdRing* RingPtr,
+*                                      XEmacPs_Bd *BdPtr)
+*
+*****************************************************************************/
+#define XEmacPs_BdRingPrev(RingPtr, BdPtr)                           \
+    (((UINTPTR)(BdPtr) <= (RingPtr)->BaseBdAddr) ?                     \
+    (XEmacPs_Bd*)(RingPtr)->HighBdAddr :                              \
+    (XEmacPs_Bd*)((UINTPTR)(BdPtr) - (RingPtr)->Separation))
+
+/************************** Function Prototypes ******************************/
+
+/*
+ * Scatter gather DMA related functions in xemacps_bdring.c
+ */
+LONG XEmacPs_BdRingCreate(XEmacPs_BdRing * RingPtr, UINTPTR PhysAddr,
+			  UINTPTR VirtAddr, u32 Alignment, u32 BdCount);
+LONG XEmacPs_BdRingClone(XEmacPs_BdRing * RingPtr, XEmacPs_Bd * SrcBdPtr,
+			 u8 Direction);
+LONG XEmacPs_BdRingAlloc(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			 XEmacPs_Bd ** BdSetPtr);
+LONG XEmacPs_BdRingUnAlloc(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			   XEmacPs_Bd * BdSetPtr);
+LONG XEmacPs_BdRingToHw(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			XEmacPs_Bd * BdSetPtr);
+LONG XEmacPs_BdRingFree(XEmacPs_BdRing * RingPtr, u32 NumBd,
+			XEmacPs_Bd * BdSetPtr);
+u32 XEmacPs_BdRingFromHwTx(XEmacPs_BdRing * RingPtr, u32 BdLimit,
+				 XEmacPs_Bd ** BdSetPtr);
+u32 XEmacPs_BdRingFromHwRx(XEmacPs_BdRing * RingPtr, u32 BdLimit,
+				 XEmacPs_Bd ** BdSetPtr);
+LONG XEmacPs_BdRingCheck(XEmacPs_BdRing * RingPtr, u8 Direction);
+
+void XEmacPs_BdRingPtrReset(XEmacPs_BdRing * RingPtr, void *virtaddrloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* end of protection macros */
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_control.c b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_control.c
new file mode 100644
index 0000000..4c72f20
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_control.c
@@ -0,0 +1,1133 @@
+/******************************************************************************
+* Copyright (C) 2009 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+ *
+ * @file xemacps_control.c
+* @addtogroup emacps_v3_16
+* @{
+ *
+ * Functions in this file implement general purpose command and control related
+ * functionality. See xemacps.h for a detailed description of the driver.
+ *
+ * <pre>
+ * MODIFICATION HISTORY:
+ *
+ * Ver   Who  Date     Changes
+ * ----- ---- -------- -------------------------------------------------------
+ * 1.00a wsy  01/10/10 First release
+ * 1.02a asa  11/05/12 Added a new API for deleting an entry from the HASH
+ *					   register. Added a new API for setting the BURST length
+ *					   in DMACR register.
+ * 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp architecture.
+ * 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+ * 3.0   hk   02/20/15 Added support for jumbo frames.
+ * 3.2   hk   02/22/16 Added SGMII support for Zynq Ultrascale+ MPSoC.
+ * </pre>
+ *****************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xemacps.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * Set the MAC address for this driver/device.  The address is a 48-bit value.
+ * The device must be stopped before calling this function.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte MAC address.
+ * @param Index is a index to which MAC (1-4) address.
+ *
+ * @return
+ * - XST_SUCCESS if the MAC address was set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ *****************************************************************************/
+LONG XEmacPs_SetMacAddress(XEmacPs *InstancePtr, void *AddressPtr, u8 Index)
+{
+	u32 MacAddr;
+	u8 *Aptr = (u8 *)(void *)AddressPtr;
+	u8 IndexLoc = Index;
+	LONG Status;
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(Aptr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+	Xil_AssertNonvoid((IndexLoc <= (u8)XEMACPS_MAX_MAC_ADDR) && (IndexLoc > 0x00U));
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	}
+	else{
+	/* Index ranges 1 to 4, for offset calculation is 0 to 3. */
+		IndexLoc--;
+
+	/* Set the MAC bits [31:0] in BOT */
+		MacAddr = *(Aptr);
+		MacAddr |= ((u32)(*(Aptr+1)) << 8U);
+		MacAddr |= ((u32)(*(Aptr+2)) << 16U);
+		MacAddr |= ((u32)(*(Aptr+3)) << 24U);
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				((u32)XEMACPS_LADDR1L_OFFSET + ((u32)IndexLoc * (u32)8)), MacAddr);
+
+	/* There are reserved bits in TOP so don't affect them */
+	MacAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					((u32)XEMACPS_LADDR1H_OFFSET + ((u32)IndexLoc * (u32)8)));
+
+		MacAddr &= (u32)(~XEMACPS_LADDR_MACH_MASK);
+
+	/* Set MAC bits [47:32] in TOP */
+		MacAddr |= (u32)(*(Aptr+4));
+		MacAddr |= (u32)(*(Aptr+5)) << 8U;
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				((u32)XEMACPS_LADDR1H_OFFSET + ((u32)IndexLoc * (u32)8)), MacAddr);
+
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the MAC address for this driver/device.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is an output parameter, and is a pointer to a buffer into
+ *        which the current MAC address will be copied.
+ * @param Index is a index to which MAC (1-4) address.
+ *
+ *****************************************************************************/
+void XEmacPs_GetMacAddress(XEmacPs *InstancePtr, void *AddressPtr, u8 Index)
+{
+	u32 MacAddr;
+	u8 *Aptr = (u8 *)(void *)AddressPtr;
+	u8 IndexLoc = Index;
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(Aptr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+	Xil_AssertVoid((IndexLoc <= (u8)XEMACPS_MAX_MAC_ADDR) && (IndexLoc > 0x00U));
+
+	/* Index ranges 1 to 4, for offset calculation is 0 to 3. */
+	IndexLoc--;
+
+	MacAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				    ((u32)XEMACPS_LADDR1L_OFFSET + ((u32)IndexLoc * (u32)8)));
+	*Aptr = (u8) MacAddr;
+	*(Aptr+1) = (u8) (MacAddr >> 8U);
+	*(Aptr+2) = (u8) (MacAddr >> 16U);
+	*(Aptr+3) = (u8) (MacAddr >> 24U);
+
+	/* Read MAC bits [47:32] in TOP */
+	MacAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				    ((u32)XEMACPS_LADDR1H_OFFSET + ((u32)IndexLoc * (u32)8)));
+	*(Aptr+4) = (u8) MacAddr;
+	*(Aptr+5) = (u8) (MacAddr >> 8U);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set 48-bit MAC addresses in hash table.
+ * The device must be stopped before calling this function.
+ *
+ * The hash address register is 64 bits long and takes up two locations in
+ * the memory map. The least significant bits are stored in hash register
+ * bottom and the most significant bits in hash register top.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the network
+ * configuration register enable the reception of hash matched frames. The
+ * destination address is reduced to a 6 bit index into the 64 bit hash
+ * register using the following hash function. The hash function is an XOR
+ * of every sixth bit of the destination address.
+ *
+ * <pre>
+ * hash_index[05] = da[05]^da[11]^da[17]^da[23]^da[29]^da[35]^da[41]^da[47]
+ * hash_index[04] = da[04]^da[10]^da[16]^da[22]^da[28]^da[34]^da[40]^da[46]
+ * hash_index[03] = da[03]^da[09]^da[15]^da[21]^da[27]^da[33]^da[39]^da[45]
+ * hash_index[02] = da[02]^da[08]^da[14]^da[20]^da[26]^da[32]^da[38]^da[44]
+ * hash_index[01] = da[01]^da[07]^da[13]^da[19]^da[25]^da[31]^da[37]^da[43]
+ * hash_index[00] = da[00]^da[06]^da[12]^da[18]^da[24]^da[30]^da[36]^da[42]
+ * </pre>
+ *
+ * da[0] represents the least significant bit of the first byte received,
+ * that is, the multicast/unicast indicator, and da[47] represents the most
+ * significant bit of the last byte received.
+ *
+ * If the hash index points to a bit that is set in the hash register then
+ * the frame will be matched according to whether the frame is multicast
+ * or unicast.
+ *
+ * A multicast match will be signaled if the multicast hash enable bit is
+ * set, da[0] is logic 1 and the hash index points to a bit set in the hash
+ * register.
+ *
+ * A unicast match will be signaled if the unicast hash enable bit is set,
+ * da[0] is logic 0 and the hash index points to a bit set in the hash
+ * register.
+ *
+ * To receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the network
+ * configuration register.
+ *
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte MAC address.
+ *
+ * @return
+ * - XST_SUCCESS if the HASH MAC address was set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ * - XST_INVALID_PARAM if the HASH MAC address passed in does not meet
+ *   requirement after calculation
+ *
+ * @note
+ * Having Aptr be unsigned type prevents the following operations from sign
+ * extending.
+ *****************************************************************************/
+LONG XEmacPs_SetHash(XEmacPs *InstancePtr, void *AddressPtr)
+{
+	u32 HashAddr;
+	u8 *Aptr = (u8 *)(void *)AddressPtr;
+	u8 Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+	u32 Result;
+	LONG Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(AddressPtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	} else {
+		Temp1 = (*(Aptr+0)) & 0x3FU;
+		Temp2 = ((*(Aptr+0) >> 6U) & 0x03U) | ((*(Aptr+1) & 0x0FU) << 2U);
+
+		Temp3 = ((*(Aptr+1) >> 4U) & 0x0FU) | ((*(Aptr+2) & 0x3U) << 4U);
+		Temp4 = ((*(Aptr+2) >> 2U) & 0x3FU);
+		Temp5 =   (*(Aptr+3)) & 0x3FU;
+		Temp6 = ((*(Aptr+3) >> 6U) & 0x03U) | ((*(Aptr+4) & 0x0FU) << 2U);
+		Temp7 = ((*(Aptr+4) >> 4U) & 0x0FU) | ((*(Aptr+5) & 0x03U) << 4U);
+		Temp8 = ((*(Aptr+5) >> 2U) & 0x3FU);
+
+		Result = (u32)((u32)Temp1 ^ (u32)Temp2 ^ (u32)Temp3 ^ (u32)Temp4 ^
+				(u32)Temp5 ^ (u32)Temp6 ^ (u32)Temp7 ^ (u32)Temp8);
+
+		if (Result >= (u32)XEMACPS_MAX_HASH_BITS) {
+			Status = (LONG)(XST_INVALID_PARAM);
+		} else {
+
+			if (Result < (u32)32) {
+		HashAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_HASHL_OFFSET);
+				HashAddr |= (u32)(0x00000001U << Result);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_HASHL_OFFSET, HashAddr);
+	} else {
+		HashAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_HASHH_OFFSET);
+				HashAddr |= (u32)(0x00000001U << (u32)(Result - (u32)32));
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_HASHH_OFFSET, HashAddr);
+	}
+			Status = (LONG)(XST_SUCCESS);
+		}
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+ * Delete 48-bit MAC addresses in hash table.
+ * The device must be stopped before calling this function.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is a pointer to a 6-byte MAC address.
+ *
+ * @return
+ * - XST_SUCCESS if the HASH MAC address was deleted successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ * - XST_INVALID_PARAM if the HASH MAC address passed in does not meet
+ *   requirement after calculation
+ *
+ * @note
+ * Having Aptr be unsigned type prevents the following operations from sign
+ * extending.
+ *****************************************************************************/
+LONG XEmacPs_DeleteHash(XEmacPs *InstancePtr, void *AddressPtr)
+{
+	u32 HashAddr;
+	u8 *Aptr = (u8 *)(void *)AddressPtr;
+	u8 Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8;
+	u32 Result;
+	LONG Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(Aptr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	} else {
+		Temp1 = (*(Aptr+0)) & 0x3FU;
+		Temp2 = ((*(Aptr+0) >> 6U) & 0x03U) | ((*(Aptr+1) & 0x0FU) << 2U);
+		Temp3 = ((*(Aptr+1) >> 4U) & 0x0FU) | ((*(Aptr+2) & 0x03U) << 4U);
+		Temp4 = ((*(Aptr+2) >> 2U) & 0x3FU);
+		Temp5 =   (*(Aptr+3)) & 0x3FU;
+		Temp6 = ((*(Aptr+3) >> 6U) & 0x03U) | ((*(Aptr+4) & 0x0FU) << 2U);
+		Temp7 = ((*(Aptr+4) >> 4U) & 0x0FU) | ((*(Aptr+5) & 0x03U) << 4U);
+		Temp8 = ((*(Aptr+5) >> 2U) & 0x3FU);
+
+		Result = (u32)((u32)Temp1 ^ (u32)Temp2 ^ (u32)Temp3 ^ (u32)Temp4 ^
+					(u32)Temp5 ^ (u32)Temp6 ^ (u32)Temp7 ^ (u32)Temp8);
+
+		if (Result >= (u32)(XEMACPS_MAX_HASH_BITS)) {
+			Status =  (LONG)(XST_INVALID_PARAM);
+		} else {
+			if (Result < (u32)32) {
+		HashAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_HASHL_OFFSET);
+				HashAddr &= (u32)(~(0x00000001U << Result));
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_HASHL_OFFSET, HashAddr);
+	} else {
+		HashAddr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_HASHH_OFFSET);
+				HashAddr &= (u32)(~(0x00000001U << (u32)(Result - (u32)32)));
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_HASHH_OFFSET, HashAddr);
+	}
+			Status = (LONG)(XST_SUCCESS);
+		}
+	}
+	return Status;
+}
+/*****************************************************************************/
+/**
+ * Clear the Hash registers for the mac address pointed by AddressPtr.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ *****************************************************************************/
+void XEmacPs_ClearHash(XEmacPs *InstancePtr)
+{
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				    XEMACPS_HASHL_OFFSET, 0x0U);
+
+	/* write bits [63:32] in TOP */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				    XEMACPS_HASHH_OFFSET, 0x0U);
+}
+
+
+/*****************************************************************************/
+/**
+ * Get the Hash address for this driver/device.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param AddressPtr is an output parameter, and is a pointer to a buffer into
+ *        which the current HASH MAC address will be copied.
+ *
+ *****************************************************************************/
+void XEmacPs_GetHash(XEmacPs *InstancePtr, void *AddressPtr)
+{
+	u32 *Aptr = (u32 *)(void *)AddressPtr;
+
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(AddressPtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	*(Aptr+0) = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				    XEMACPS_HASHL_OFFSET);
+
+	/* Read Hash bits [63:32] in TOP */
+	*(Aptr+1) = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				    XEMACPS_HASHH_OFFSET);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set the Type ID match for this driver/device.  The register is a 32-bit
+ * value. The device must be stopped before calling this function.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Id_Check is type ID to be configured.
+ * @param Index is a index to which Type ID (1-4).
+ *
+ * @return
+ * - XST_SUCCESS if the MAC address was set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ *****************************************************************************/
+LONG XEmacPs_SetTypeIdCheck(XEmacPs *InstancePtr, u32 Id_Check, u8 Index)
+{
+	u8 IndexLoc = Index;
+	LONG Status;
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+	Xil_AssertNonvoid((IndexLoc <= (u8)XEMACPS_MAX_TYPE_ID) && (IndexLoc > 0x00U));
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	} else {
+
+	/* Index ranges 1 to 4, for offset calculation is 0 to 3. */
+		IndexLoc--;
+
+	/* Set the ID bits in MATCHx register */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   ((u32)XEMACPS_MATCH1_OFFSET + ((u32)IndexLoc * (u32)4)), Id_Check);
+
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+ * Set options for the driver/device. The driver should be stopped with
+ * XEmacPs_Stop() before changing options.
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Options are the options to set. Multiple options can be set by OR'ing
+ *        XTE_*_OPTIONS constants together. Options not specified are not
+ *        affected.
+ *
+ * @return
+ * - XST_SUCCESS if the options were set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ * @note
+ * See xemacps.h for a description of the available options.
+ *
+ *****************************************************************************/
+LONG XEmacPs_SetOptions(XEmacPs *InstancePtr, u32 Options)
+{
+	u32 Reg;		/* Generic register contents */
+	u32 RegNetCfg;		/* Reflects original contents of NET_CONFIG */
+	u32 RegNewNetCfg;	/* Reflects new contents of NET_CONFIG */
+	LONG Status;
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	} else {
+
+	/* Many of these options will change the NET_CONFIG registers.
+	 * To reduce the amount of IO to the device, group these options here
+	 * and change them all at once.
+	 */
+
+	/* Grab current register contents */
+	RegNetCfg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				      XEMACPS_NWCFG_OFFSET);
+	RegNewNetCfg = RegNetCfg;
+
+	/*
+	 * It is configured to max 1536.
+	 */
+		if ((Options & XEMACPS_FRAME1536_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= (XEMACPS_NWCFG_1536RXEN_MASK);
+	}
+
+	/* Turn on VLAN packet only, only VLAN tagged will be accepted */
+		if ((Options & XEMACPS_VLAN_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_NVLANDISC_MASK;
+	}
+
+	/* Turn on FCS stripping on receive packets */
+		if ((Options & XEMACPS_FCS_STRIP_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_FCSREM_MASK;
+	}
+
+	/* Turn on length/type field checking on receive packets */
+		if ((Options & XEMACPS_LENTYPE_ERR_OPTION) != 0x00000000U) {
+			RegNewNetCfg |= XEMACPS_NWCFG_LENERRDSCRD_MASK;
+	}
+
+	/* Turn on flow control */
+		if ((Options & XEMACPS_FLOW_CONTROL_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_PAUSEEN_MASK;
+	}
+
+	/* Turn on promiscuous frame filtering (all frames are received) */
+		if ((Options & XEMACPS_PROMISC_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_COPYALLEN_MASK;
+	}
+
+	/* Allow broadcast address reception */
+		if ((Options & XEMACPS_BROADCAST_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_BCASTDI_MASK);
+	}
+
+	/* Allow multicast address filtering */
+		if ((Options & XEMACPS_MULTICAST_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_MCASTHASHEN_MASK;
+	}
+
+	/* enable RX checksum offload */
+		if ((Options & XEMACPS_RX_CHKSUM_ENABLE_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_RXCHKSUMEN_MASK;
+	}
+
+	/* Enable jumbo frames */
+	if (((Options & XEMACPS_JUMBO_ENABLE_OPTION) != 0x00000000U) &&
+		(InstancePtr->Version > 2)) {
+		RegNewNetCfg |= XEMACPS_NWCFG_JUMBO_MASK;
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_JUMBOMAXLEN_OFFSET, XEMACPS_RX_BUF_SIZE_JUMBO);
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				      XEMACPS_DMACR_OFFSET);
+		Reg &= ~XEMACPS_DMACR_RXBUF_MASK;
+		Reg |= (((((u32)XEMACPS_RX_BUF_SIZE_JUMBO / (u32)XEMACPS_RX_BUF_UNIT) +
+			(((((u32)XEMACPS_RX_BUF_SIZE_JUMBO %
+			(u32)XEMACPS_RX_BUF_UNIT))!=(u32)0) ? 1U : 0U)) <<
+			(u32)(XEMACPS_DMACR_RXBUF_SHIFT)) &
+			(u32)(XEMACPS_DMACR_RXBUF_MASK));
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_DMACR_OFFSET, Reg);
+		InstancePtr->MaxMtuSize = XEMACPS_MTU_JUMBO;
+		InstancePtr->MaxFrameSize = XEMACPS_MTU_JUMBO +
+					XEMACPS_HDR_SIZE + XEMACPS_TRL_SIZE;
+		InstancePtr->MaxVlanFrameSize = InstancePtr->MaxFrameSize +
+					XEMACPS_HDR_VLAN_SIZE;
+		InstancePtr->RxBufMask = XEMACPS_RXBUF_LEN_JUMBO_MASK;
+	}
+
+	if (((Options & XEMACPS_SGMII_ENABLE_OPTION) != 0x00000000U) &&
+		(InstancePtr->Version > 2)) {
+		RegNewNetCfg |= (XEMACPS_NWCFG_SGMIIEN_MASK |
+						XEMACPS_NWCFG_PCSSEL_MASK);
+	}
+
+	/* Officially change the NET_CONFIG registers if it needs to be
+	 * modified.
+	 */
+	if (RegNetCfg != RegNewNetCfg) {
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCFG_OFFSET, RegNewNetCfg);
+	}
+
+	/* Enable TX checksum offload */
+		if ((Options & XEMACPS_TX_CHKSUM_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_DMACR_OFFSET);
+		Reg |= XEMACPS_DMACR_TCPCKSUM_MASK;
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					 XEMACPS_DMACR_OFFSET, Reg);
+	}
+
+	/* Enable transmitter */
+		if ((Options & XEMACPS_TRANSMITTER_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+		Reg |= XEMACPS_NWCTRL_TXEN_MASK;
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCTRL_OFFSET, Reg);
+	}
+
+	/* Enable receiver */
+		if ((Options & XEMACPS_RECEIVER_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+		Reg |= XEMACPS_NWCTRL_RXEN_MASK;
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCTRL_OFFSET, Reg);
+	}
+
+	/* The remaining options not handled here are managed elsewhere in the
+	 * driver. No register modifications are needed at this time. Reflecting
+	 * the option in InstancePtr->Options is good enough for now.
+	 */
+
+	/* Set options word to its new value */
+	InstancePtr->Options |= Options;
+
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Clear options for the driver/device
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Options are the options to clear. Multiple options can be cleared by
+ *        OR'ing XEMACPS_*_OPTIONS constants together. Options not specified
+ *        are not affected.
+ *
+ * @return
+ * - XST_SUCCESS if the options were set successfully
+ * - XST_DEVICE_IS_STARTED if the device has not yet been stopped
+ *
+ * @note
+ * See xemacps.h for a description of the available options.
+ *
+ *****************************************************************************/
+LONG XEmacPs_ClearOptions(XEmacPs *InstancePtr, u32 Options)
+{
+	u32 Reg;		/* Generic */
+	u32 RegNetCfg;		/* Reflects original contents of NET_CONFIG */
+	u32 RegNewNetCfg;	/* Reflects new contents of NET_CONFIG */
+	LONG Status;
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Be sure device has been stopped */
+	if (InstancePtr->IsStarted == (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STARTED);
+	} else {
+
+	/* Many of these options will change the NET_CONFIG registers.
+	 * To reduce the amount of IO to the device, group these options here
+	 * and change them all at once.
+	 */
+
+	/* Grab current register contents */
+	RegNetCfg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				      XEMACPS_NWCFG_OFFSET);
+	RegNewNetCfg = RegNetCfg;
+
+	/* There is only RX configuration!?
+	 * It is configured in two different length, up to 1536 and 10240 bytes
+	 */
+		if ((Options & XEMACPS_FRAME1536_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_1536RXEN_MASK);
+	}
+
+	/* Turn off VLAN packet only */
+		if ((Options & XEMACPS_VLAN_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_NVLANDISC_MASK);
+	}
+
+	/* Turn off FCS stripping on receive packets */
+		if ((Options & XEMACPS_FCS_STRIP_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_FCSREM_MASK);
+	}
+
+	/* Turn off length/type field checking on receive packets */
+		if ((Options & XEMACPS_LENTYPE_ERR_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_LENERRDSCRD_MASK);
+	}
+
+	/* Turn off flow control */
+		if ((Options & XEMACPS_FLOW_CONTROL_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_PAUSEEN_MASK);
+	}
+
+	/* Turn off promiscuous frame filtering (all frames are received) */
+		if ((Options & XEMACPS_PROMISC_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_COPYALLEN_MASK);
+	}
+
+	/* Disallow broadcast address filtering => broadcast reception */
+		if ((Options & XEMACPS_BROADCAST_OPTION) != 0x00000000U) {
+		RegNewNetCfg |= XEMACPS_NWCFG_BCASTDI_MASK;
+	}
+
+	/* Disallow multicast address filtering */
+		if ((Options & XEMACPS_MULTICAST_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_MCASTHASHEN_MASK);
+	}
+
+	/* Disable RX checksum offload */
+		if ((Options & XEMACPS_RX_CHKSUM_ENABLE_OPTION) != 0x00000000U) {
+			RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_RXCHKSUMEN_MASK);
+	}
+
+	/* Disable jumbo frames */
+	if (((Options & XEMACPS_JUMBO_ENABLE_OPTION) != 0x00000000U) &&
+		(InstancePtr->Version > 2)) {
+		RegNewNetCfg &= (u32)(~XEMACPS_NWCFG_JUMBO_MASK);
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				      XEMACPS_DMACR_OFFSET);
+		Reg &= ~XEMACPS_DMACR_RXBUF_MASK;
+		Reg |= (((((u32)XEMACPS_RX_BUF_SIZE / (u32)XEMACPS_RX_BUF_UNIT) +
+			(((((u32)XEMACPS_RX_BUF_SIZE %
+			(u32)XEMACPS_RX_BUF_UNIT))!=(u32)0) ? 1U : 0U)) <<
+			(u32)(XEMACPS_DMACR_RXBUF_SHIFT)) &
+			(u32)(XEMACPS_DMACR_RXBUF_MASK));
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_DMACR_OFFSET, Reg);
+		InstancePtr->MaxMtuSize = XEMACPS_MTU;
+		InstancePtr->MaxFrameSize = XEMACPS_MTU +
+					XEMACPS_HDR_SIZE + XEMACPS_TRL_SIZE;
+		InstancePtr->MaxVlanFrameSize = InstancePtr->MaxFrameSize +
+					XEMACPS_HDR_VLAN_SIZE;
+		InstancePtr->RxBufMask = XEMACPS_RXBUF_LEN_MASK;
+	}
+
+	if (((Options & XEMACPS_SGMII_ENABLE_OPTION) != 0x00000000U) &&
+		(InstancePtr->Version > 2)) {
+		RegNewNetCfg &= (u32)(~(XEMACPS_NWCFG_SGMIIEN_MASK |
+						XEMACPS_NWCFG_PCSSEL_MASK));
+	}
+
+	/* Officially change the NET_CONFIG registers if it needs to be
+	 * modified.
+	 */
+	if (RegNetCfg != RegNewNetCfg) {
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCFG_OFFSET, RegNewNetCfg);
+	}
+
+	/* Disable TX checksum offload */
+		if ((Options & XEMACPS_TX_CHKSUM_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_DMACR_OFFSET);
+			Reg &= (u32)(~XEMACPS_DMACR_TCPCKSUM_MASK);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					 XEMACPS_DMACR_OFFSET, Reg);
+	}
+
+	/* Disable transmitter */
+		if ((Options & XEMACPS_TRANSMITTER_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+			Reg &= (u32)(~XEMACPS_NWCTRL_TXEN_MASK);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCTRL_OFFSET, Reg);
+	}
+
+	/* Disable receiver */
+		if ((Options & XEMACPS_RECEIVER_ENABLE_OPTION) != 0x00000000U) {
+		Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET);
+			Reg &= (u32)(~XEMACPS_NWCTRL_RXEN_MASK);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_NWCTRL_OFFSET, Reg);
+	}
+
+	/* The remaining options not handled here are managed elsewhere in the
+	 * driver. No register modifications are needed at this time. Reflecting
+	 * option in InstancePtr->Options is good enough for now.
+	 */
+
+	/* Set options word to its new value */
+	InstancePtr->Options &= ~Options;
+
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * Get current option settings
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * A bitmask of XTE_*_OPTION constants. Any bit set to 1 is to be interpreted
+ * as a set option.
+ *
+ * @note
+ * See xemacps.h for a description of the available options.
+ *
+ *****************************************************************************/
+u32 XEmacPs_GetOptions(XEmacPs *InstancePtr)
+{
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	return (InstancePtr->Options);
+}
+
+
+/*****************************************************************************/
+/**
+ * Send a pause packet
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ *
+ * @return
+ * - XST_SUCCESS if pause frame transmission was initiated
+ * - XST_DEVICE_IS_STOPPED if the device has not been started.
+ *
+ *****************************************************************************/
+LONG XEmacPs_SendPausePacket(XEmacPs *InstancePtr)
+{
+	u32 Reg;
+	LONG Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* Make sure device is ready for this operation */
+	if (InstancePtr->IsStarted != (u32)XIL_COMPONENT_IS_STARTED) {
+		Status = (LONG)(XST_DEVICE_IS_STOPPED);
+	} else {
+	/* Send flow control frame */
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWCTRL_OFFSET);
+	Reg |= XEMACPS_NWCTRL_PAUSETX_MASK;
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_NWCTRL_OFFSET, Reg);
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+ * XEmacPs_GetOperatingSpeed gets the current operating link speed. This may
+ * be the value set by XEmacPs_SetOperatingSpeed() or a hardware default.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ *
+ * @return XEmacPs_GetOperatingSpeed returns the link speed in units of
+ *         megabits per second.
+ *
+ * @note
+ *
+ *****************************************************************************/
+u16 XEmacPs_GetOperatingSpeed(XEmacPs *InstancePtr)
+{
+	u32 Reg;
+	u16 Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_NWCFG_OFFSET);
+
+	if ((Reg & XEMACPS_NWCFG_1000_MASK) != 0x00000000U) {
+		Status = (u16)(1000);
+	} else {
+		if ((Reg & XEMACPS_NWCFG_100_MASK) != 0x00000000U) {
+			Status = (u16)(100);
+		} else {
+			Status = (u16)(10);
+		}
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+ * XEmacPs_SetOperatingSpeed sets the current operating link speed. For any
+ * traffic to be passed, this speed must match the current MII/GMII/SGMII/RGMII
+ * link speed.
+ *
+ * @param InstancePtr references the TEMAC channel on which to operate.
+ * @param Speed is the speed to set in units of Mbps. Valid values are 10, 100,
+ *        or 1000. XEmacPs_SetOperatingSpeed ignores invalid values.
+ *
+ * @note
+ *
+ *****************************************************************************/
+void XEmacPs_SetOperatingSpeed(XEmacPs *InstancePtr, u16 Speed)
+{
+        u32 Reg;
+        Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+    Xil_AssertVoid((Speed == (u16)10) || (Speed == (u16)100) || (Speed == (u16)1000));
+
+        Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+			XEMACPS_NWCFG_OFFSET);
+	Reg &= (u32)(~(XEMACPS_NWCFG_1000_MASK | XEMACPS_NWCFG_100_MASK));
+
+	switch (Speed) {
+		case (u16)10:
+                break;
+
+        case (u16)100:
+                Reg |= XEMACPS_NWCFG_100_MASK;
+                break;
+
+        case (u16)1000:
+                Reg |= XEMACPS_NWCFG_1000_MASK;
+                break;
+    }
+
+        /* Set register and return */
+        XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+                XEMACPS_NWCFG_OFFSET, Reg);
+}
+
+
+/*****************************************************************************/
+/**
+ * Set the MDIO clock divisor.
+ *
+ * Calculating the divisor:
+ *
+ * <pre>
+ *              f[HOSTCLK]
+ *   f[MDC] = -----------------
+ *            (1 + Divisor) * 2
+ * </pre>
+ *
+ * where f[HOSTCLK] is the bus clock frequency in MHz, and f[MDC] is the
+ * MDIO clock frequency in MHz to the PHY. Typically, f[MDC] should not
+ * exceed 2.5 MHz. Some PHYs can tolerate faster speeds which means faster
+ * access. Here is the table to show values to generate MDC,
+ *
+ * <pre>
+ * 000 : divide pclk by   8 (pclk up to  20 MHz)
+ * 001 : divide pclk by  16 (pclk up to  40 MHz)
+ * 010 : divide pclk by  32 (pclk up to  80 MHz)
+ * 011 : divide pclk by  48 (pclk up to 120 MHz)
+ * 100 : divide pclk by  64 (pclk up to 160 MHz)
+ * 101 : divide pclk by  96 (pclk up to 240 MHz)
+ * 110 : divide pclk by 128 (pclk up to 320 MHz)
+ * 111 : divide pclk by 224 (pclk up to 540 MHz)
+ * </pre>
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param Divisor is the divisor to set. Range is 0b000 to 0b111.
+ *
+ *****************************************************************************/
+void XEmacPs_SetMdioDivisor(XEmacPs *InstancePtr, XEmacPs_MdcDiv Divisor)
+{
+	u32 Reg;
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+	Xil_AssertVoid(Divisor <= (XEmacPs_MdcDiv)0x7); /* only last three bits are valid */
+
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWCFG_OFFSET);
+	/* clear these three bits, could be done with mask */
+	Reg &= (u32)(~XEMACPS_NWCFG_MDCCLKDIV_MASK);
+
+	Reg |= ((u32)Divisor << XEMACPS_NWCFG_MDC_SHIFT_MASK);
+
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_NWCFG_OFFSET, Reg);
+}
+
+
+/*****************************************************************************/
+/**
+* Read the current value of the PHY register indicated by the PhyAddress and
+* the RegisterNum parameters. The MAC provides the driver with the ability to
+* talk to a PHY that adheres to the Media Independent Interface (MII) as
+* defined in the IEEE 802.3 standard.
+*
+* Prior to PHY access with this function, the user should have setup the MDIO
+* clock with XEmacPs_SetMdioDivisor().
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+* @param PhyAddress is the address of the PHY to be read (supports multiple
+*        PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+*        to read
+* @param PhyDataPtr is an output parameter, and points to a 16-bit buffer into
+*        which the current value of the register will be copied.
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was read from successfully
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+*
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the read is
+* done). If this is of concern to the user, the user should provide a mechanism
+* suitable to their needs for recovery.
+*
+* For the duration of this function, all host interface reads and writes are
+* blocked to the current XEmacPs instance.
+*
+******************************************************************************/
+LONG XEmacPs_PhyRead(XEmacPs *InstancePtr, u32 PhyAddress,
+		     u32 RegisterNum, u16 *PhyDataPtr)
+{
+	u32 Mgtcr;
+	volatile u32 Ipisr;
+	u32 IpReadTemp;
+	LONG Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+
+	/* Make sure no other PHY operation is currently in progress */
+	if ((!(XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWSR_OFFSET) &
+	      XEMACPS_NWSR_MDIOIDLE_MASK))==TRUE) {
+		Status = (LONG)(XST_EMAC_MII_BUSY);
+	} else {
+
+	/* Construct Mgtcr mask for the operation */
+	Mgtcr = XEMACPS_PHYMNTNC_OP_MASK | XEMACPS_PHYMNTNC_OP_R_MASK |
+			(PhyAddress << XEMACPS_PHYMNTNC_PHAD_SHFT_MSK) |
+			(RegisterNum << XEMACPS_PHYMNTNC_PREG_SHFT_MSK);
+
+	/* Write Mgtcr and wait for completion */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_PHYMNTNC_OFFSET, Mgtcr);
+
+	do {
+		Ipisr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					  XEMACPS_NWSR_OFFSET);
+			IpReadTemp = Ipisr;
+		} while ((IpReadTemp & XEMACPS_NWSR_MDIOIDLE_MASK) == 0x00000000U);
+
+	/* Read data */
+		*PhyDataPtr = (u16)XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_PHYMNTNC_OFFSET);
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+
+/*****************************************************************************/
+/**
+* Write data to the specified PHY register. The Ethernet driver does not
+* require the device to be stopped before writing to the PHY.  Although it is
+* probably a good idea to stop the device, it is the responsibility of the
+* application to deem this necessary. The MAC provides the driver with the
+* ability to talk to a PHY that adheres to the Media Independent Interface
+* (MII) as defined in the IEEE 802.3 standard.
+*
+* Prior to PHY access with this function, the user should have setup the MDIO
+* clock with XEmacPs_SetMdioDivisor().
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+* @param PhyAddress is the address of the PHY to be written (supports multiple
+*        PHYs)
+* @param RegisterNum is the register number, 0-31, of the specific PHY register
+*        to write
+* @param PhyData is the 16-bit value that will be written to the register
+*
+* @return
+*
+* - XST_SUCCESS if the PHY was written to successfully. Since there is no error
+*   status from the MAC on a write, the user should read the PHY to verify the
+*   write was successful.
+* - XST_EMAC_MII_BUSY if there is another PHY operation in progress
+*
+* @note
+*
+* This function is not thread-safe. The user must provide mutually exclusive
+* access to this function if there are to be multiple threads that can call it.
+*
+* There is the possibility that this function will not return if the hardware
+* is broken (i.e., it never sets the status bit indicating that the write is
+* done). If this is of concern to the user, the user should provide a mechanism
+* suitable to their needs for recovery.
+*
+* For the duration of this function, all host interface reads and writes are
+* blocked to the current XEmacPs instance.
+*
+******************************************************************************/
+LONG XEmacPs_PhyWrite(XEmacPs *InstancePtr, u32 PhyAddress,
+		      u32 RegisterNum, u16 PhyData)
+{
+	u32 Mgtcr;
+	volatile u32 Ipisr;
+	u32 IpWriteTemp;
+	LONG Status;
+
+	Xil_AssertNonvoid(InstancePtr != NULL);
+
+	/* Make sure no other PHY operation is currently in progress */
+	if ((!(XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				XEMACPS_NWSR_OFFSET) &
+	      XEMACPS_NWSR_MDIOIDLE_MASK))==TRUE) {
+		Status = (LONG)(XST_EMAC_MII_BUSY);
+	} else {
+	/* Construct Mgtcr mask for the operation */
+	Mgtcr = XEMACPS_PHYMNTNC_OP_MASK | XEMACPS_PHYMNTNC_OP_W_MASK |
+			(PhyAddress << XEMACPS_PHYMNTNC_PHAD_SHFT_MSK) |
+			(RegisterNum << XEMACPS_PHYMNTNC_PREG_SHFT_MSK) | (u32)PhyData;
+
+	/* Write Mgtcr and wait for completion */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+			   XEMACPS_PHYMNTNC_OFFSET, Mgtcr);
+
+	do {
+		Ipisr = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					  XEMACPS_NWSR_OFFSET);
+				IpWriteTemp = Ipisr;
+		} while ((IpWriteTemp & XEMACPS_NWSR_MDIOIDLE_MASK) == 0x00000000U);
+
+		Status = (LONG)(XST_SUCCESS);
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+* API to update the Burst length in the DMACR register.
+*
+* @param InstancePtr is a pointer to the XEmacPs instance to be worked on.
+* @param BLength is the length in bytes for the dma burst.
+*
+* @return None
+*
+******************************************************************************/
+void XEmacPs_DMABLengthUpdate(XEmacPs *InstancePtr, s32 BLength)
+{
+	u32 Reg;
+	u32 RegUpdateVal = 0;
+
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid((BLength == XEMACPS_SINGLE_BURST) ||
+					(BLength == XEMACPS_4BYTE_BURST) ||
+					(BLength == XEMACPS_8BYTE_BURST) ||
+					(BLength == XEMACPS_16BYTE_BURST));
+
+	switch (BLength) {
+		case XEMACPS_SINGLE_BURST:
+			RegUpdateVal = XEMACPS_DMACR_SINGLE_AHB_BURST;
+			break;
+
+		case XEMACPS_4BYTE_BURST:
+			RegUpdateVal = XEMACPS_DMACR_INCR4_AHB_BURST;
+			break;
+
+		case XEMACPS_8BYTE_BURST:
+			RegUpdateVal = XEMACPS_DMACR_INCR8_AHB_BURST;
+			break;
+
+		case XEMACPS_16BYTE_BURST:
+			RegUpdateVal = XEMACPS_DMACR_INCR16_AHB_BURST;
+			break;
+	}
+	Reg = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+						XEMACPS_DMACR_OFFSET);
+
+	Reg &= (u32)(~XEMACPS_DMACR_BLENGTH_MASK);
+	Reg |= RegUpdateVal;
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_DMACR_OFFSET,
+																	Reg);
+}
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_hw.h b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_hw.h
new file mode 100644
index 0000000..701c8a9
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_hw.h
@@ -0,0 +1,646 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xemacps_hw.h
+* @addtogroup emacps_v3_16
+* @{
+*
+* This header file contains identifiers and low-level driver functions (or
+* macros) that can be used to access the PS Ethernet MAC (XEmacPs) device.
+* High-level driver functions are defined in xemacps.h.
+*
+* @note
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a wsy  01/10/10 First release.
+* 1.02a asa  11/05/12 Added hash defines for DMACR burst length configuration.
+* 1.05a kpc  28/06/13 Added XEmacPs_ResetHw function prototype
+* 1.06a asa  11/02/13 Changed the value for XEMACPS_RXBUF_LEN_MASK from 0x3fff
+*					  to 0x1fff. This fixes the CR#744902.
+* 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp GEM specification.
+* 3.0   kvn  12/16/14 Changed name of XEMACPS_NWCFG_LENGTHERRDSCRD_MASK to
+*					  XEMACPS_NWCFG_LENERRDSCRD_MASK as it exceeds 31 characters.
+* 3.0  kpc   1/23/15  Corrected the extended descriptor macro values.
+* 3.0  kvn   02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.0  hk   03/18/15 Added support for jumbo frames.
+*                    Remove "used bit set" from TX error interrupt masks.
+* 3.1  hk   08/10/15 Update upper 32 bit tx and rx queue ptr register offsets.
+* 3.2   hk   02/22/16 Added SGMII support for Zynq Ultrascale+ MPSoC.
+* 3.8  hk   09/17/18 Fix PTP interrupt masks.
+* 3.9  hk   01/23/19 Add RX watermark support
+* 3.10 hk   05/16/19 Clear status registers properly in reset
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XEMACPS_HW_H		/* prevent circular inclusions */
+#define XEMACPS_HW_H		/* by using protection macros */
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+#include "xil_io.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constant Definitions *****************************/
+
+#define XEMACPS_MAX_MAC_ADDR     4U   /**< Maxmum number of mac address
+                                           supported */
+#define XEMACPS_MAX_TYPE_ID      4U   /**< Maxmum number of type id supported */
+
+#ifdef __aarch64__
+#define XEMACPS_BD_ALIGNMENT     64U   /**< Minimum buffer descriptor alignment
+                                           on the local bus */
+#else
+
+#define XEMACPS_BD_ALIGNMENT     4U   /**< Minimum buffer descriptor alignment
+                                           on the local bus */
+#endif
+#define XEMACPS_RX_BUF_ALIGNMENT 4U   /**< Minimum buffer alignment when using
+                                           options that impose alignment
+                                           restrictions on the buffer data on
+                                           the local bus */
+
+/** @name Direction identifiers
+ *
+ *  These are used by several functions and callbacks that need
+ *  to specify whether an operation specifies a send or receive channel.
+ * @{
+ */
+#define XEMACPS_SEND        1U	      /**< send direction */
+#define XEMACPS_RECV        2U	      /**< receive direction */
+/*@}*/
+
+/**  @name MDC clock division
+ *  currently supporting 8, 16, 32, 48, 64, 96, 128, 224.
+ * @{
+ */
+typedef enum { MDC_DIV_8 = 0U, MDC_DIV_16, MDC_DIV_32, MDC_DIV_48,
+	MDC_DIV_64, MDC_DIV_96, MDC_DIV_128, MDC_DIV_224
+} XEmacPs_MdcDiv;
+
+/*@}*/
+
+#define XEMACPS_RX_BUF_SIZE 1536U /**< Specify the receive buffer size in
+                                       bytes, 64, 128, ... 10240 */
+#define XEMACPS_RX_BUF_SIZE_JUMBO 10240U
+
+#define XEMACPS_RX_BUF_UNIT   64U /**< Number of receive buffer bytes as a
+                                       unit, this is HW setup */
+
+#define XEMACPS_MAX_RXBD     128U /**< Size of RX buffer descriptor queues */
+#define XEMACPS_MAX_TXBD     128U /**< Size of TX buffer descriptor queues */
+
+#define XEMACPS_MAX_HASH_BITS 64U /**< Maximum value for hash bits. 2**6 */
+
+/* Register offset definitions. Unless otherwise noted, register access is
+ * 32 bit. Names are self explained here.
+ */
+
+#define XEMACPS_NWCTRL_OFFSET        0x00000000U /**< Network Control reg */
+#define XEMACPS_NWCFG_OFFSET         0x00000004U /**< Network Config reg */
+#define XEMACPS_NWSR_OFFSET          0x00000008U /**< Network Status reg */
+
+#define XEMACPS_DMACR_OFFSET         0x00000010U /**< DMA Control reg */
+#define XEMACPS_TXSR_OFFSET          0x00000014U /**< TX Status reg */
+#define XEMACPS_RXQBASE_OFFSET       0x00000018U /**< RX Q Base address reg */
+#define XEMACPS_TXQBASE_OFFSET       0x0000001CU /**< TX Q Base address reg */
+#define XEMACPS_RXSR_OFFSET          0x00000020U /**< RX Status reg */
+
+#define XEMACPS_ISR_OFFSET           0x00000024U /**< Interrupt Status reg */
+#define XEMACPS_IER_OFFSET           0x00000028U /**< Interrupt Enable reg */
+#define XEMACPS_IDR_OFFSET           0x0000002CU /**< Interrupt Disable reg */
+#define XEMACPS_IMR_OFFSET           0x00000030U /**< Interrupt Mask reg */
+
+#define XEMACPS_PHYMNTNC_OFFSET      0x00000034U /**< Phy Maintaince reg */
+#define XEMACPS_RXPAUSE_OFFSET       0x00000038U /**< RX Pause Time reg */
+#define XEMACPS_TXPAUSE_OFFSET       0x0000003CU /**< TX Pause Time reg */
+
+#define XEMACPS_JUMBOMAXLEN_OFFSET   0x00000048U /**< Jumbo max length reg */
+
+#define XEMACPS_RXWATERMARK_OFFSET   0x0000007CU /**< RX watermark reg */
+
+#define XEMACPS_HASHL_OFFSET         0x00000080U /**< Hash Low address reg */
+#define XEMACPS_HASHH_OFFSET         0x00000084U /**< Hash High address reg */
+
+#define XEMACPS_LADDR1L_OFFSET       0x00000088U /**< Specific1 addr low reg */
+#define XEMACPS_LADDR1H_OFFSET       0x0000008CU /**< Specific1 addr high reg */
+#define XEMACPS_LADDR2L_OFFSET       0x00000090U /**< Specific2 addr low reg */
+#define XEMACPS_LADDR2H_OFFSET       0x00000094U /**< Specific2 addr high reg */
+#define XEMACPS_LADDR3L_OFFSET       0x00000098U /**< Specific3 addr low reg */
+#define XEMACPS_LADDR3H_OFFSET       0x0000009CU /**< Specific3 addr high reg */
+#define XEMACPS_LADDR4L_OFFSET       0x000000A0U /**< Specific4 addr low reg */
+#define XEMACPS_LADDR4H_OFFSET       0x000000A4U /**< Specific4 addr high reg */
+
+#define XEMACPS_MATCH1_OFFSET        0x000000A8U /**< Type ID1 Match reg */
+#define XEMACPS_MATCH2_OFFSET        0x000000ACU /**< Type ID2 Match reg */
+#define XEMACPS_MATCH3_OFFSET        0x000000B0U /**< Type ID3 Match reg */
+#define XEMACPS_MATCH4_OFFSET        0x000000B4U /**< Type ID4 Match reg */
+
+#define XEMACPS_STRETCH_OFFSET       0x000000BCU /**< IPG Stretch reg */
+
+#define XEMACPS_OCTTXL_OFFSET        0x00000100U /**< Octects transmitted Low
+                                                      reg */
+#define XEMACPS_OCTTXH_OFFSET        0x00000104U /**< Octects transmitted High
+                                                      reg */
+
+#define XEMACPS_TXCNT_OFFSET         0x00000108U /**< Error-free Frmaes
+                                                      transmitted counter */
+#define XEMACPS_TXBCCNT_OFFSET       0x0000010CU /**< Error-free Broadcast
+                                                      Frames counter*/
+#define XEMACPS_TXMCCNT_OFFSET       0x00000110U /**< Error-free Multicast
+                                                      Frame counter */
+#define XEMACPS_TXPAUSECNT_OFFSET    0x00000114U /**< Pause Frames Transmitted
+                                                      Counter */
+#define XEMACPS_TX64CNT_OFFSET       0x00000118U /**< Error-free 64 byte Frames
+                                                      Transmitted counter */
+#define XEMACPS_TX65CNT_OFFSET       0x0000011CU /**< Error-free 65-127 byte
+                                                      Frames Transmitted
+                                                      counter */
+#define XEMACPS_TX128CNT_OFFSET      0x00000120U /**< Error-free 128-255 byte
+                                                      Frames Transmitted
+                                                      counter*/
+#define XEMACPS_TX256CNT_OFFSET      0x00000124U /**< Error-free 256-511 byte
+                                                      Frames transmitted
+                                                      counter */
+#define XEMACPS_TX512CNT_OFFSET      0x00000128U /**< Error-free 512-1023 byte
+                                                      Frames transmitted
+                                                      counter */
+#define XEMACPS_TX1024CNT_OFFSET     0x0000012CU /**< Error-free 1024-1518 byte
+                                                      Frames transmitted
+                                                      counter */
+#define XEMACPS_TX1519CNT_OFFSET     0x00000130U /**< Error-free larger than
+                                                      1519 byte Frames
+                                                      transmitted counter */
+#define XEMACPS_TXURUNCNT_OFFSET     0x00000134U /**< TX under run error
+                                                      counter */
+
+#define XEMACPS_SNGLCOLLCNT_OFFSET   0x00000138U /**< Single Collision Frame
+                                                      Counter */
+#define XEMACPS_MULTICOLLCNT_OFFSET  0x0000013CU /**< Multiple Collision Frame
+                                                      Counter */
+#define XEMACPS_EXCESSCOLLCNT_OFFSET 0x00000140U /**< Excessive Collision Frame
+                                                      Counter */
+#define XEMACPS_LATECOLLCNT_OFFSET   0x00000144U /**< Late Collision Frame
+                                                      Counter */
+#define XEMACPS_TXDEFERCNT_OFFSET    0x00000148U /**< Deferred Transmission
+                                                      Frame Counter */
+#define XEMACPS_TXCSENSECNT_OFFSET   0x0000014CU /**< Transmit Carrier Sense
+                                                      Error Counter */
+
+#define XEMACPS_OCTRXL_OFFSET        0x00000150U /**< Octects Received register
+                                                      Low */
+#define XEMACPS_OCTRXH_OFFSET        0x00000154U /**< Octects Received register
+                                                      High */
+
+#define XEMACPS_RXCNT_OFFSET         0x00000158U /**< Error-free Frames
+                                                      Received Counter */
+#define XEMACPS_RXBROADCNT_OFFSET    0x0000015CU /**< Error-free Broadcast
+                                                      Frames Received Counter */
+#define XEMACPS_RXMULTICNT_OFFSET    0x00000160U /**< Error-free Multicast
+                                                      Frames Received Counter */
+#define XEMACPS_RXPAUSECNT_OFFSET    0x00000164U /**< Pause Frames
+                                                      Received Counter */
+#define XEMACPS_RX64CNT_OFFSET       0x00000168U /**< Error-free 64 byte Frames
+                                                      Received Counter */
+#define XEMACPS_RX65CNT_OFFSET       0x0000016CU /**< Error-free 65-127 byte
+                                                      Frames Received Counter */
+#define XEMACPS_RX128CNT_OFFSET      0x00000170U /**< Error-free 128-255 byte
+                                                      Frames Received Counter */
+#define XEMACPS_RX256CNT_OFFSET      0x00000174U /**< Error-free 256-512 byte
+                                                      Frames Received Counter */
+#define XEMACPS_RX512CNT_OFFSET      0x00000178U /**< Error-free 512-1023 byte
+                                                      Frames Received Counter */
+#define XEMACPS_RX1024CNT_OFFSET     0x0000017CU /**< Error-free 1024-1518 byte
+                                                      Frames Received Counter */
+#define XEMACPS_RX1519CNT_OFFSET     0x00000180U /**< Error-free 1519-max byte
+                                                      Frames Received Counter */
+#define XEMACPS_RXUNDRCNT_OFFSET     0x00000184U /**< Undersize Frames Received
+                                                      Counter */
+#define XEMACPS_RXOVRCNT_OFFSET      0x00000188U /**< Oversize Frames Received
+                                                      Counter */
+#define XEMACPS_RXJABCNT_OFFSET      0x0000018CU /**< Jabbers Received
+                                                      Counter */
+#define XEMACPS_RXFCSCNT_OFFSET      0x00000190U /**< Frame Check Sequence
+                                                      Error Counter */
+#define XEMACPS_RXLENGTHCNT_OFFSET   0x00000194U /**< Length Field Error
+                                                      Counter */
+#define XEMACPS_RXSYMBCNT_OFFSET     0x00000198U /**< Symbol Error Counter */
+#define XEMACPS_RXALIGNCNT_OFFSET    0x0000019CU /**< Alignment Error Counter */
+#define XEMACPS_RXRESERRCNT_OFFSET   0x000001A0U /**< Receive Resource Error
+                                                      Counter */
+#define XEMACPS_RXORCNT_OFFSET       0x000001A4U /**< Receive Overrun Counter */
+#define XEMACPS_RXIPCCNT_OFFSET      0x000001A8U /**< IP header Checksum Error
+                                                      Counter */
+#define XEMACPS_RXTCPCCNT_OFFSET     0x000001ACU /**< TCP Checksum Error
+                                                      Counter */
+#define XEMACPS_RXUDPCCNT_OFFSET     0x000001B0U /**< UDP Checksum Error
+                                                      Counter */
+#define XEMACPS_LAST_OFFSET          0x000001B4U /**< Last statistic counter
+						      offset, for clearing */
+
+#define XEMACPS_1588_SEC_OFFSET      0x000001D0U /**< 1588 second counter */
+#define XEMACPS_1588_NANOSEC_OFFSET  0x000001D4U /**< 1588 nanosecond counter */
+#define XEMACPS_1588_ADJ_OFFSET      0x000001D8U /**< 1588 nanosecond
+						      adjustment counter */
+#define XEMACPS_1588_INC_OFFSET      0x000001DCU /**< 1588 nanosecond
+						      increment counter */
+#define XEMACPS_PTP_TXSEC_OFFSET     0x000001E0U /**< 1588 PTP transmit second
+						      counter */
+#define XEMACPS_PTP_TXNANOSEC_OFFSET 0x000001E4U /**< 1588 PTP transmit
+						      nanosecond counter */
+#define XEMACPS_PTP_RXSEC_OFFSET     0x000001E8U /**< 1588 PTP receive second
+						      counter */
+#define XEMACPS_PTP_RXNANOSEC_OFFSET 0x000001ECU /**< 1588 PTP receive
+						      nanosecond counter */
+#define XEMACPS_PTPP_TXSEC_OFFSET    0x000001F0U /**< 1588 PTP peer transmit
+						      second counter */
+#define XEMACPS_PTPP_TXNANOSEC_OFFSET 0x000001F4U /**< 1588 PTP peer transmit
+						      nanosecond counter */
+#define XEMACPS_PTPP_RXSEC_OFFSET    0x000001F8U /**< 1588 PTP peer receive
+						      second counter */
+#define XEMACPS_PTPP_RXNANOSEC_OFFSET 0x000001FCU /**< 1588 PTP peer receive
+						      nanosecond counter */
+
+#define XEMACPS_INTQ1_STS_OFFSET     0x00000400U /**< Interrupt Q1 Status
+							reg */
+#define XEMACPS_TXQ1BASE_OFFSET	     0x00000440U /**< TX Q1 Base address
+							reg */
+#define XEMACPS_RXQ1BASE_OFFSET	     0x00000480U /**< RX Q1 Base address
+							reg */
+#define XEMACPS_MSBBUF_TXQBASE_OFFSET  0x000004C8U /**< MSB Buffer TX Q Base
+							reg */
+#define XEMACPS_MSBBUF_RXQBASE_OFFSET  0x000004D4U /**< MSB Buffer RX Q Base
+							reg */
+#define XEMACPS_INTQ1_IER_OFFSET     0x00000600U /**< Interrupt Q1 Enable
+							reg */
+#define XEMACPS_INTQ1_IDR_OFFSET     0x00000620U /**< Interrupt Q1 Disable
+							reg */
+#define XEMACPS_INTQ1_IMR_OFFSET     0x00000640U /**< Interrupt Q1 Mask
+							reg */
+
+/* Define some bit positions for registers. */
+
+/** @name network control register bit definitions
+ * @{
+ */
+#define XEMACPS_NWCTRL_FLUSH_DPRAM_MASK	0x00040000U /**< Flush a packet from
+							Rx SRAM */
+#define XEMACPS_NWCTRL_ZEROPAUSETX_MASK 0x00000800U /**< Transmit zero quantum
+                                                         pause frame */
+#define XEMACPS_NWCTRL_PAUSETX_MASK     0x00000800U /**< Transmit pause frame */
+#define XEMACPS_NWCTRL_HALTTX_MASK      0x00000400U /**< Halt transmission
+                                                         after current frame */
+#define XEMACPS_NWCTRL_STARTTX_MASK     0x00000200U /**< Start tx (tx_go) */
+
+#define XEMACPS_NWCTRL_STATWEN_MASK     0x00000080U /**< Enable writing to
+                                                         stat counters */
+#define XEMACPS_NWCTRL_STATINC_MASK     0x00000040U /**< Increment statistic
+                                                         registers */
+#define XEMACPS_NWCTRL_STATCLR_MASK     0x00000020U /**< Clear statistic
+                                                         registers */
+#define XEMACPS_NWCTRL_MDEN_MASK        0x00000010U /**< Enable MDIO port */
+#define XEMACPS_NWCTRL_TXEN_MASK        0x00000008U /**< Enable transmit */
+#define XEMACPS_NWCTRL_RXEN_MASK        0x00000004U /**< Enable receive */
+#define XEMACPS_NWCTRL_LOOPEN_MASK      0x00000002U /**< local loopback */
+/*@}*/
+
+/** @name network configuration register bit definitions
+ * @{
+ */
+#define XEMACPS_NWCFG_BADPREAMBEN_MASK 0x20000000U /**< disable rejection of
+                                                        non-standard preamble */
+#define XEMACPS_NWCFG_IPDSTRETCH_MASK  0x10000000U /**< enable transmit IPG */
+#define XEMACPS_NWCFG_SGMIIEN_MASK     0x08000000U /**< SGMII Enable */
+#define XEMACPS_NWCFG_FCSIGNORE_MASK   0x04000000U /**< disable rejection of
+                                                        FCS error */
+#define XEMACPS_NWCFG_HDRXEN_MASK      0x02000000U /**< RX half duplex */
+#define XEMACPS_NWCFG_RXCHKSUMEN_MASK  0x01000000U /**< enable RX checksum
+                                                        offload */
+#define XEMACPS_NWCFG_PAUSECOPYDI_MASK 0x00800000U /**< Do not copy pause
+                                                        Frames to memory */
+#define XEMACPS_NWCFG_DWIDTH_64_MASK   0x00200000U /**< 64 bit Data bus width */
+#define XEMACPS_NWCFG_MDC_SHIFT_MASK   18U	   /**< shift bits for MDC */
+#define XEMACPS_NWCFG_MDCCLKDIV_MASK   0x001C0000U /**< MDC Mask PCLK divisor */
+#define XEMACPS_NWCFG_FCSREM_MASK      0x00020000U /**< Discard FCS from
+                                                        received frames */
+#define XEMACPS_NWCFG_LENERRDSCRD_MASK 0x00010000U
+/**< RX length error discard */
+#define XEMACPS_NWCFG_RXOFFS_MASK      0x0000C000U /**< RX buffer offset */
+#define XEMACPS_NWCFG_PAUSEEN_MASK     0x00002000U /**< Enable pause RX */
+#define XEMACPS_NWCFG_RETRYTESTEN_MASK 0x00001000U /**< Retry test */
+#define XEMACPS_NWCFG_XTADDMACHEN_MASK 0x00000200U
+/**< External address match enable */
+#define XEMACPS_NWCFG_PCSSEL_MASK      0x00000800U /**< PCS Select */
+#define XEMACPS_NWCFG_1000_MASK        0x00000400U /**< 1000 Mbps */
+#define XEMACPS_NWCFG_1536RXEN_MASK    0x00000100U /**< Enable 1536 byte
+                                                        frames reception */
+#define XEMACPS_NWCFG_UCASTHASHEN_MASK 0x00000080U /**< Receive unicast hash
+                                                        frames */
+#define XEMACPS_NWCFG_MCASTHASHEN_MASK 0x00000040U /**< Receive multicast hash
+                                                        frames */
+#define XEMACPS_NWCFG_BCASTDI_MASK     0x00000020U /**< Do not receive
+                                                        broadcast frames */
+#define XEMACPS_NWCFG_COPYALLEN_MASK   0x00000010U /**< Copy all frames */
+#define XEMACPS_NWCFG_JUMBO_MASK       0x00000008U /**< Jumbo frames */
+#define XEMACPS_NWCFG_NVLANDISC_MASK   0x00000004U /**< Receive only VLAN
+                                                        frames */
+#define XEMACPS_NWCFG_FDEN_MASK        0x00000002U/**< full duplex */
+#define XEMACPS_NWCFG_100_MASK         0x00000001U /**< 100 Mbps */
+#define XEMACPS_NWCFG_RESET_MASK       0x00080000U/**< reset value */
+/*@}*/
+
+/** @name network status register bit definitaions
+ * @{
+ */
+#define XEMACPS_NWSR_MDIOIDLE_MASK     0x00000004U /**< PHY management idle */
+#define XEMACPS_NWSR_MDIO_MASK         0x00000002U /**< Status of mdio_in */
+/*@}*/
+
+
+/** @name MAC address register word 1 mask
+ * @{
+ */
+#define XEMACPS_LADDR_MACH_MASK        0x0000FFFFU /**< Address bits[47:32]
+                                                      bit[31:0] are in BOTTOM */
+/*@}*/
+
+
+/** @name DMA control register bit definitions
+ * @{
+ */
+#define XEMACPS_DMACR_ADDR_WIDTH_64		0x40000000U /**< 64 bit address bus */
+#define XEMACPS_DMACR_TXEXTEND_MASK		0x20000000U /**< Tx Extended desc mode */
+#define XEMACPS_DMACR_RXEXTEND_MASK		0x10000000U /**< Rx Extended desc mode */
+#define XEMACPS_DMACR_RXBUF_MASK		0x00FF0000U /**< Mask bit for RX buffer
+													size */
+#define XEMACPS_DMACR_RXBUF_SHIFT 		16U	/**< Shift bit for RX buffer
+												size */
+#define XEMACPS_DMACR_TCPCKSUM_MASK		0x00000800U /**< enable/disable TX
+													    checksum offload */
+#define XEMACPS_DMACR_TXSIZE_MASK		0x00000400U /**< TX buffer memory size */
+#define XEMACPS_DMACR_RXSIZE_MASK		0x00000300U /**< RX buffer memory size */
+#define XEMACPS_DMACR_ENDIAN_MASK		0x00000080U /**< endian configuration */
+#define XEMACPS_DMACR_BLENGTH_MASK		0x0000001FU /**< buffer burst length */
+#define XEMACPS_DMACR_SINGLE_AHB_BURST	0x00000001U /**< single AHB bursts */
+#define XEMACPS_DMACR_INCR4_AHB_BURST	0x00000004U /**< 4 bytes AHB bursts */
+#define XEMACPS_DMACR_INCR8_AHB_BURST	0x00000008U /**< 8 bytes AHB bursts */
+#define XEMACPS_DMACR_INCR16_AHB_BURST	0x00000010U /**< 16 bytes AHB bursts */
+/*@}*/
+
+/** @name transmit status register bit definitions
+ * @{
+ */
+#define XEMACPS_TXSR_HRESPNOK_MASK    0x00000100U /**< Transmit hresp not OK */
+#define XEMACPS_TXSR_URUN_MASK        0x00000040U /**< Transmit underrun */
+#define XEMACPS_TXSR_TXCOMPL_MASK     0x00000020U /**< Transmit completed OK */
+#define XEMACPS_TXSR_BUFEXH_MASK      0x00000010U /**< Transmit buffs exhausted
+                                                       mid frame */
+#define XEMACPS_TXSR_TXGO_MASK        0x00000008U /**< Status of go flag */
+#define XEMACPS_TXSR_RXOVR_MASK       0x00000004U /**< Retry limit exceeded */
+#define XEMACPS_TXSR_FRAMERX_MASK     0x00000002U /**< Collision tx frame */
+#define XEMACPS_TXSR_USEDREAD_MASK    0x00000001U /**< TX buffer used bit set */
+
+#define XEMACPS_TXSR_ERROR_MASK      ((u32)XEMACPS_TXSR_HRESPNOK_MASK | \
+                                       (u32)XEMACPS_TXSR_URUN_MASK | \
+                                       (u32)XEMACPS_TXSR_BUFEXH_MASK | \
+                                       (u32)XEMACPS_TXSR_RXOVR_MASK | \
+                                       (u32)XEMACPS_TXSR_FRAMERX_MASK | \
+                                       (u32)XEMACPS_TXSR_USEDREAD_MASK)
+/*@}*/
+
+/**
+ * @name receive status register bit definitions
+ * @{
+ */
+#define XEMACPS_RXSR_HRESPNOK_MASK    0x00000008U /**< Receive hresp not OK */
+#define XEMACPS_RXSR_RXOVR_MASK       0x00000004U /**< Receive overrun */
+#define XEMACPS_RXSR_FRAMERX_MASK     0x00000002U /**< Frame received OK */
+#define XEMACPS_RXSR_BUFFNA_MASK      0x00000001U /**< RX buffer used bit set */
+
+#define XEMACPS_RXSR_ERROR_MASK      ((u32)XEMACPS_RXSR_HRESPNOK_MASK | \
+                                       (u32)XEMACPS_RXSR_RXOVR_MASK | \
+                                       (u32)XEMACPS_RXSR_BUFFNA_MASK)
+
+#define XEMACPS_SR_ALL_MASK	0xFFFFFFFFU /**< Mask for full register */
+
+/*@}*/
+
+/**
+ * @name Interrupt Q1 status register bit definitions
+ * @{
+ */
+#define XEMACPS_INTQ1SR_TXCOMPL_MASK	0x00000080U /**< Transmit completed OK */
+#define XEMACPS_INTQ1SR_TXERR_MASK	0x00000040U /**< Transmit AMBA Error */
+
+#define XEMACPS_INTQ1_IXR_ALL_MASK	((u32)XEMACPS_INTQ1SR_TXCOMPL_MASK | \
+					 (u32)XEMACPS_INTQ1SR_TXERR_MASK)
+
+/*@}*/
+
+/**
+ * @name interrupts bit definitions
+ * Bits definitions are same in XEMACPS_ISR_OFFSET,
+ * XEMACPS_IER_OFFSET, XEMACPS_IDR_OFFSET, and XEMACPS_IMR_OFFSET
+ * @{
+ */
+#define XEMACPS_IXR_PTPPSTX_MASK	0x02000000U /**< PTP Pdelay_resp TXed */
+#define XEMACPS_IXR_PTPPDRTX_MASK	0x01000000U /**< PTP Pdelay_req TXed */
+#define XEMACPS_IXR_PTPPSRX_MASK	0x00800000U /**< PTP Pdelay_resp RXed */
+#define XEMACPS_IXR_PTPPDRRX_MASK	0x00400000U /**< PTP Pdelay_req RXed */
+
+#define XEMACPS_IXR_PTPSTX_MASK		0x00200000U /**< PTP Sync TXed */
+#define XEMACPS_IXR_PTPDRTX_MASK	0x00100000U /**< PTP Delay_req TXed */
+#define XEMACPS_IXR_PTPSRX_MASK		0x00080000U /**< PTP Sync RXed */
+#define XEMACPS_IXR_PTPDRRX_MASK	0x00040000U /**< PTP Delay_req RXed */
+
+#define XEMACPS_IXR_PAUSETX_MASK    0x00004000U	/**< Pause frame transmitted */
+#define XEMACPS_IXR_PAUSEZERO_MASK  0x00002000U	/**< Pause time has reached
+                                                     zero */
+#define XEMACPS_IXR_PAUSENZERO_MASK 0x00001000U	/**< Pause frame received */
+#define XEMACPS_IXR_HRESPNOK_MASK   0x00000800U	/**< hresp not ok */
+#define XEMACPS_IXR_RXOVR_MASK      0x00000400U	/**< Receive overrun occurred */
+#define XEMACPS_IXR_TXCOMPL_MASK    0x00000080U	/**< Frame transmitted ok */
+#define XEMACPS_IXR_TXEXH_MASK      0x00000040U	/**< Transmit err occurred or
+                                                     no buffers*/
+#define XEMACPS_IXR_RETRY_MASK      0x00000020U	/**< Retry limit exceeded */
+#define XEMACPS_IXR_URUN_MASK       0x00000010U	/**< Transmit underrun */
+#define XEMACPS_IXR_TXUSED_MASK     0x00000008U	/**< Tx buffer used bit read */
+#define XEMACPS_IXR_RXUSED_MASK     0x00000004U	/**< Rx buffer used bit read */
+#define XEMACPS_IXR_FRAMERX_MASK    0x00000002U	/**< Frame received ok */
+#define XEMACPS_IXR_MGMNT_MASK      0x00000001U	/**< PHY management complete */
+#define XEMACPS_IXR_ALL_MASK        0x00007FFFU	/**< Everything! */
+
+#define XEMACPS_IXR_TX_ERR_MASK    ((u32)XEMACPS_IXR_TXEXH_MASK |         \
+                                     (u32)XEMACPS_IXR_RETRY_MASK |         \
+                                     (u32)XEMACPS_IXR_URUN_MASK)
+
+
+#define XEMACPS_IXR_RX_ERR_MASK    ((u32)XEMACPS_IXR_HRESPNOK_MASK |      \
+                                     (u32)XEMACPS_IXR_RXUSED_MASK |        \
+                                     (u32)XEMACPS_IXR_RXOVR_MASK)
+
+/*@}*/
+
+/** @name PHY Maintenance bit definitions
+ * @{
+ */
+#define XEMACPS_PHYMNTNC_OP_MASK    0x40020000U	/**< operation mask bits */
+#define XEMACPS_PHYMNTNC_OP_R_MASK  0x20000000U	/**< read operation */
+#define XEMACPS_PHYMNTNC_OP_W_MASK  0x10000000U	/**< write operation */
+#define XEMACPS_PHYMNTNC_ADDR_MASK  0x0F800000U	/**< Address bits */
+#define XEMACPS_PHYMNTNC_REG_MASK   0x007C0000U	/**< register bits */
+#define XEMACPS_PHYMNTNC_DATA_MASK  0x00000FFFU	/**< data bits */
+#define XEMACPS_PHYMNTNC_PHAD_SHFT_MSK   23U	/**< Shift bits for PHYAD */
+#define XEMACPS_PHYMNTNC_PREG_SHFT_MSK   18U	/**< Shift bits for PHREG */
+/*@}*/
+
+/** @name RX watermark bit definitions
+ * @{
+ */
+#define XEMACPS_RXWM_HIGH_MASK		0x0000FFFFU	/**< RXWM high mask */
+#define XEMACPS_RXWM_LOW_MASK		0xFFFF0000U	/**< RXWM low mask */
+#define XEMACPS_RXWM_LOW_SHFT_MSK	16U	/**< Shift for RXWM low */
+/*@}*/
+
+/* Transmit buffer descriptor status words offset
+ * @{
+ */
+#define XEMACPS_BD_ADDR_OFFSET  0x00000000U /**< word 0/addr of BDs */
+#define XEMACPS_BD_STAT_OFFSET  0x00000004U /**< word 1/status of BDs */
+#define XEMACPS_BD_ADDR_HI_OFFSET  0x00000008U /**< word 2/addr of BDs */
+
+/*
+ * @}
+ */
+
+/* Transmit buffer descriptor status words bit positions.
+ * Transmit buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit address pointing to the location of
+ * the transmit data.
+ * The following register - word1, consists of various information to control
+ * the XEmacPs transmit process.  After transmit, this is updated with status
+ * information, whether the frame was transmitted OK or why it had failed.
+ * @{
+ */
+#define XEMACPS_TXBUF_USED_MASK  0x80000000U /**< Used bit. */
+#define XEMACPS_TXBUF_WRAP_MASK  0x40000000U /**< Wrap bit, last descriptor */
+#define XEMACPS_TXBUF_RETRY_MASK 0x20000000U /**< Retry limit exceeded */
+#define XEMACPS_TXBUF_URUN_MASK  0x10000000U /**< Transmit underrun occurred */
+#define XEMACPS_TXBUF_EXH_MASK   0x08000000U /**< Buffers exhausted */
+#define XEMACPS_TXBUF_TCP_MASK   0x04000000U /**< Late collision. */
+#define XEMACPS_TXBUF_NOCRC_MASK 0x00010000U /**< No CRC */
+#define XEMACPS_TXBUF_LAST_MASK  0x00008000U /**< Last buffer */
+#define XEMACPS_TXBUF_LEN_MASK   0x00003FFFU /**< Mask for length field */
+/*
+ * @}
+ */
+
+/* Receive buffer descriptor status words bit positions.
+ * Receive buffer descriptor consists of two 32-bit registers,
+ * the first - word0 contains a 32-bit word aligned address pointing to the
+ * address of the buffer. The lower two bits make up the wrap bit indicating
+ * the last descriptor and the ownership bit to indicate it has been used by
+ * the XEmacPs.
+ * The following register - word1, contains status information regarding why
+ * the frame was received (the filter match condition) as well as other
+ * useful info.
+ * @{
+ */
+#define XEMACPS_RXBUF_BCAST_MASK     0x80000000U /**< Broadcast frame */
+#define XEMACPS_RXBUF_MULTIHASH_MASK 0x40000000U /**< Multicast hashed frame */
+#define XEMACPS_RXBUF_UNIHASH_MASK   0x20000000U /**< Unicast hashed frame */
+#define XEMACPS_RXBUF_EXH_MASK       0x08000000U /**< buffer exhausted */
+#define XEMACPS_RXBUF_AMATCH_MASK    0x06000000U /**< Specific address
+                                                      matched */
+#define XEMACPS_RXBUF_IDFOUND_MASK   0x01000000U /**< Type ID matched */
+#define XEMACPS_RXBUF_IDMATCH_MASK   0x00C00000U /**< ID matched mask */
+#define XEMACPS_RXBUF_VLAN_MASK      0x00200000U /**< VLAN tagged */
+#define XEMACPS_RXBUF_PRI_MASK       0x00100000U /**< Priority tagged */
+#define XEMACPS_RXBUF_VPRI_MASK      0x000E0000U /**< Vlan priority */
+#define XEMACPS_RXBUF_CFI_MASK       0x00010000U /**< CFI frame */
+#define XEMACPS_RXBUF_EOF_MASK       0x00008000U /**< End of frame. */
+#define XEMACPS_RXBUF_SOF_MASK       0x00004000U /**< Start of frame. */
+#define XEMACPS_RXBUF_LEN_MASK       0x00001FFFU /**< Mask for length field */
+#define XEMACPS_RXBUF_LEN_JUMBO_MASK 0x00003FFFU /**< Mask for jumbo length */
+
+#define XEMACPS_RXBUF_WRAP_MASK      0x00000002U /**< Wrap bit, last BD */
+#define XEMACPS_RXBUF_NEW_MASK       0x00000001U /**< Used bit.. */
+#define XEMACPS_RXBUF_ADD_MASK       0xFFFFFFFCU /**< Mask for address */
+/*
+ * @}
+ */
+
+/*
+ * Define appropriate I/O access method to memory mapped I/O or other
+ * interface if necessary.
+ */
+
+#define XEmacPs_In32  Xil_In32
+#define XEmacPs_Out32 Xil_Out32
+
+
+/****************************************************************************/
+/**
+*
+* Read the given register.
+*
+* @param    BaseAddress is the base address of the device
+* @param    RegOffset is the register offset to be read
+*
+* @return   The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XEmacPs_ReadReg(u32 BaseAddress, u32 RegOffset)
+*
+*****************************************************************************/
+#define XEmacPs_ReadReg(BaseAddress, RegOffset) \
+    XEmacPs_In32((BaseAddress) + (u32)(RegOffset))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given register.
+*
+* @param    BaseAddress is the base address of the device
+* @param    RegOffset is the register offset to be written
+* @param    Data is the 32-bit value to write to the register
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XEmacPs_WriteReg(u32 BaseAddress, u32 RegOffset,
+*         u32 Data)
+*
+*****************************************************************************/
+#define XEmacPs_WriteReg(BaseAddress, RegOffset, Data) \
+    XEmacPs_Out32((BaseAddress) + (u32)(RegOffset), (u32)(Data))
+
+/************************** Function Prototypes *****************************/
+/*
+ * Perform reset operation to the emacps interface
+ */
+void XEmacPs_ResetHw(u32 BaseAddr);
+
+#ifdef __cplusplus
+  }
+#endif
+
+#endif /* end of protection macro */
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_intr.c b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_intr.c
new file mode 100644
index 0000000..2dbdb0b
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/emacps/src/xemacps_intr.c
@@ -0,0 +1,242 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2020 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xemacps_intr.c
+* @addtogroup emacps_v3_16
+* @{
+*
+* Functions in this file implement general purpose interrupt processing related
+* functionality. See xemacps.h for a detailed description of the driver.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a wsy  01/10/10 First release
+* 1.03a asa  01/24/13 Fix for CR #692702 which updates error handling for
+*		      Rx errors. Under heavy Rx traffic, there will be a large
+*		      number of errors related to receive buffer not available.
+*		      Because of a HW bug (SI #692601), under such heavy errors,
+*		      the Rx data path can become unresponsive. To reduce the
+*		      probabilities for hitting this HW bug, the SW writes to
+*		      bit 18 to flush a packet from Rx DPRAM immediately. The
+*		      changes for it are done in the function
+*		      XEmacPs_IntrHandler.
+* 2.1   srt  07/15/14 Add support for Zynq Ultrascale Mp GEM specification
+*		       and 64-bit changes.
+* 3.0   kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.1   hk   07/27/15 Do not call error handler with '0' error code when
+*                     there is no error. CR# 869403
+* </pre>
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xemacps.h"
+
+/************************** Constant Definitions *****************************/
+
+
+/**************************** Type Definitions *******************************/
+
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+
+/************************** Variable Definitions *****************************/
+
+
+/*****************************************************************************/
+/**
+ * Install an asynchronous handler function for the given HandlerType:
+ *
+ * @param InstancePtr is a pointer to the instance to be worked on.
+ * @param HandlerType indicates what interrupt handler type is.
+ *        XEMACPS_HANDLER_DMASEND, XEMACPS_HANDLER_DMARECV and
+ *        XEMACPS_HANDLER_ERROR.
+ * @param FuncPointer is the pointer to the callback function
+ * @param CallBackRef is the upper layer callback reference passed back when
+ *        when the callback function is invoked.
+ *
+ * @return
+ *
+ * None.
+ *
+ * @note
+ * There is no assert on the CallBackRef since the driver doesn't know what
+ * it is.
+ *
+ *****************************************************************************/
+LONG XEmacPs_SetHandler(XEmacPs *InstancePtr, u32 HandlerType,
+			void *FuncPointer, void *CallBackRef)
+{
+	LONG Status;
+	Xil_AssertNonvoid(InstancePtr != NULL);
+	Xil_AssertNonvoid(FuncPointer != NULL);
+	Xil_AssertNonvoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	switch (HandlerType) {
+	case XEMACPS_HANDLER_DMASEND:
+		Status = (LONG)(XST_SUCCESS);
+		InstancePtr->SendHandler = ((XEmacPs_Handler)(void *)FuncPointer);
+		InstancePtr->SendRef = CallBackRef;
+		break;
+	case XEMACPS_HANDLER_DMARECV:
+		Status = (LONG)(XST_SUCCESS);
+		InstancePtr->RecvHandler = ((XEmacPs_Handler)(void *)FuncPointer);
+		InstancePtr->RecvRef = CallBackRef;
+		break;
+	case XEMACPS_HANDLER_ERROR:
+		Status = (LONG)(XST_SUCCESS);
+		InstancePtr->ErrorHandler = ((XEmacPs_ErrHandler)(void *)FuncPointer);
+		InstancePtr->ErrorRef = CallBackRef;
+		break;
+	default:
+		Status = (LONG)(XST_INVALID_PARAM);
+		break;
+	}
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+* Master interrupt handler for EMAC driver. This routine will query the
+* status of the device, bump statistics, and invoke user callbacks.
+*
+* This routine must be connected to an interrupt controller using OS/BSP
+* specific methods.
+*
+* @param XEmacPsPtr is a pointer to the XEMACPS instance that has caused the
+*        interrupt.
+*
+******************************************************************************/
+void XEmacPs_IntrHandler(void *XEmacPsPtr)
+{
+	u32 RegISR;
+	u32 RegSR;
+	u32 RegCtrl;
+	u32 RegQ1ISR = 0U;
+	XEmacPs *InstancePtr = (XEmacPs *) XEmacPsPtr;
+
+	Xil_AssertVoid(InstancePtr != NULL);
+	Xil_AssertVoid(InstancePtr->IsReady == (u32)XIL_COMPONENT_IS_READY);
+
+	/* This ISR will try to handle as many interrupts as it can in a single
+	 * call. However, in most of the places where the user's error handler
+         * is called, this ISR exits because it is expected that the user will
+         * reset the device in nearly all instances.
+	 */
+	RegISR = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_ISR_OFFSET);
+
+	/* Read Transmit Q1 ISR */
+
+	if (InstancePtr->Version > 2)
+		RegQ1ISR = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_INTQ1_STS_OFFSET);
+
+	/* Clear the interrupt status register */
+	XEmacPs_WriteReg(InstancePtr->Config.BaseAddress, XEMACPS_ISR_OFFSET,
+			   RegISR);
+
+	/* Receive complete interrupt */
+	if ((RegISR & XEMACPS_IXR_FRAMERX_MASK) != 0x00000000U) {
+		/* Clear RX status register RX complete indication but preserve
+		 * error bits if there is any */
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_RXSR_OFFSET,
+				   ((u32)XEMACPS_RXSR_FRAMERX_MASK |
+				   (u32)XEMACPS_RXSR_BUFFNA_MASK));
+		InstancePtr->RecvHandler(InstancePtr->RecvRef);
+	}
+
+	/* Transmit Q1 complete interrupt */
+	if ((InstancePtr->Version > 2) &&
+			((RegQ1ISR & XEMACPS_INTQ1SR_TXCOMPL_MASK) != 0x00000000U)) {
+		/* Clear TX status register TX complete indication but preserve
+		 * error bits if there is any */
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_INTQ1_STS_OFFSET,
+				   XEMACPS_INTQ1SR_TXCOMPL_MASK);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_TXSR_OFFSET,
+				   ((u32)XEMACPS_TXSR_TXCOMPL_MASK |
+				   (u32)XEMACPS_TXSR_USEDREAD_MASK));
+		InstancePtr->SendHandler(InstancePtr->SendRef);
+	}
+
+	/* Transmit complete interrupt */
+	if ((RegISR & XEMACPS_IXR_TXCOMPL_MASK) != 0x00000000U) {
+		/* Clear TX status register TX complete indication but preserve
+		 * error bits if there is any */
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_TXSR_OFFSET,
+				   ((u32)XEMACPS_TXSR_TXCOMPL_MASK |
+				   (u32)XEMACPS_TXSR_USEDREAD_MASK));
+		InstancePtr->SendHandler(InstancePtr->SendRef);
+	}
+
+	/* Receive error conditions interrupt */
+	if ((RegISR & XEMACPS_IXR_RX_ERR_MASK) != 0x00000000U) {
+		/* Clear RX status register */
+		RegSR = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					  XEMACPS_RXSR_OFFSET);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_RXSR_OFFSET, RegSR);
+
+		/* Fix for CR # 692702. Write to bit 18 of net_ctrl
+		 * register to flush a packet out of Rx SRAM upon
+		 * an error for receive buffer not available. */
+		if ((RegISR & XEMACPS_IXR_RXUSED_MASK) != 0x00000000U) {
+			RegCtrl =
+			XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+						XEMACPS_NWCTRL_OFFSET);
+			RegCtrl |= (u32)XEMACPS_NWCTRL_FLUSH_DPRAM_MASK;
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+					XEMACPS_NWCTRL_OFFSET, RegCtrl);
+		}
+
+		if(RegSR != 0) {
+			InstancePtr->ErrorHandler(InstancePtr->ErrorRef,
+						XEMACPS_RECV, RegSR);
+		}
+	}
+
+        /* When XEMACPS_IXR_TXCOMPL_MASK is flagged, XEMACPS_IXR_TXUSED_MASK
+         * will be asserted the same time.
+         * Have to distinguish this bit to handle the real error condition.
+         */
+	/* Transmit Q1 error conditions interrupt */
+        if ((InstancePtr->Version > 2) &&
+			((RegQ1ISR & XEMACPS_INTQ1SR_TXERR_MASK) != 0x00000000U) &&
+            ((RegQ1ISR & XEMACPS_INTQ1SR_TXCOMPL_MASK) != 0x00000000U)) {
+			/* Clear Interrupt Q1 status register */
+			XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_INTQ1_STS_OFFSET, RegQ1ISR);
+			InstancePtr->ErrorHandler(InstancePtr->ErrorRef, XEMACPS_SEND,
+					  RegQ1ISR);
+	   }
+
+	/* Transmit error conditions interrupt */
+        if (((RegISR & XEMACPS_IXR_TX_ERR_MASK) != 0x00000000U) &&
+            (!(RegISR & XEMACPS_IXR_TXCOMPL_MASK) != 0x00000000U)) {
+		/* Clear TX status register */
+		RegSR = XEmacPs_ReadReg(InstancePtr->Config.BaseAddress,
+					  XEMACPS_TXSR_OFFSET);
+		XEmacPs_WriteReg(InstancePtr->Config.BaseAddress,
+				   XEMACPS_TXSR_OFFSET, RegSR);
+		InstancePtr->ErrorHandler(InstancePtr->ErrorRef, XEMACPS_SEND,
+					  RegSR);
+	}
+
+}
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic.h b/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic.h
new file mode 100644
index 0000000..28043f6
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic.h
@@ -0,0 +1,601 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xscugic.h
+* @addtogroup scugic_v4_7
+* @{
+* @details
+*
+* The generic interrupt controller driver component.
+*
+* The interrupt controller driver uses the idea of priority for the various
+* handlers. Priority is an integer within the range of 1 and 31 inclusive with
+* default of 1 being the highest priority interrupt source. The priorities
+* of the various sources can be dynamically altered as needed through
+* hardware configuration.
+*
+* The generic interrupt controller supports the following
+* features:
+*
+*   - specific individual interrupt enabling/disabling
+*   - specific individual interrupt acknowledging
+*   - attaching specific callback function to handle interrupt source
+*   - assigning desired priority to interrupt source if default is not
+*     acceptable.
+*
+* Details about connecting the interrupt handler of the driver are contained
+* in the source file specific to interrupt processing, xscugic_intr.c.
+*
+* This driver is intended to be RTOS and processor independent.  It works with
+* physical addresses only.  Any needs for dynamic memory management, threads
+* or thread mutual exclusion, virtual memory, or cache control must be
+* satisfied by the layer above this driver.
+*
+* <b>Interrupt Vector Tables</b>
+*
+* The device ID of the interrupt controller device is used by the driver as a
+* direct index into the configuration data table. The user should populate the
+* vector table with handlers and callbacks at run-time using the
+* XScuGic_Connect() and XScuGic_Disconnect() functions.
+*
+* Each vector table entry corresponds to a device that can generate an
+* interrupt. Each entry contains an interrupt handler function and an
+* argument to be passed to the handler when an interrupt occurs.  The
+* user must use XScuGic_Connect() when the interrupt handler takes an
+* argument other than the base address.
+*
+* <b>Nested Interrupts Processing</b>
+*
+* Nested interrupts are not supported by this driver.
+*
+* NOTE:
+* The generic interrupt controller is not a part of the snoop control unit
+* as indicated by the prefix "scu" in the name of the driver.
+* It is an independent module in APU.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- ---------------------------------------------------------
+* 1.00a drg  01/19/00 First release
+* 1.01a sdm  11/09/11 The XScuGic and XScuGic_Config structures have changed.
+*		      The HandlerTable (of type XScuGic_VectorTableEntry) is
+*		      moved to XScuGic_Config structure from XScuGic structure.
+*
+*		      The "Config" entry in XScuGic structure is made as
+*		      pointer for better efficiency.
+*
+*		      A new file named as xscugic_hw.c is now added. It is
+*		      to implement low level driver routines without using
+*		      any xscugic instance pointer. They are useful when the
+*		      user wants to use xscugic through device id or
+*		      base address. The driver routines provided are explained
+*		      below.
+*		      XScuGic_DeviceInitialize that takes device id as
+*		      argument and initializes the device (without calling
+*		      XScuGic_CfgInitialize).
+*		      XScuGic_DeviceInterruptHandler that takes device id
+*		      as argument and calls appropriate handlers from the
+*		      HandlerTable.
+*		      XScuGic_RegisterHandler that registers a new handler
+*		      by taking xscugic hardware base address as argument.
+*		      LookupConfigByBaseAddress is used to return the
+*		      corresponding config structure from XScuGic_ConfigTable
+*		      based on the scugic base address passed.
+* 1.02a sdm  12/20/11 Removed AckBeforeService from the XScuGic_Config
+*		      structure.
+* 1.03a srt  02/27/13 Moved Offset calculation macros from *.c and *_hw.c to
+*		      *_hw.h
+*		      Added APIs
+*			- XScuGic_SetPriTrigTypeByDistAddr()
+*			- XScuGic_GetPriTrigTypeByDistAddr()
+*		      (CR 702687)
+*			Added support to direct interrupts to the appropriate CPU. Earlier
+*			  interrupts were directed to CPU1 (hard coded). Now depending
+*			  upon the CPU selected by the user (xparameters.h), interrupts
+*			  will be directed to the relevant CPU. This fixes CR 699688.
+* 1.04a hk   05/04/13 Assigned EffectiveAddr to CpuBaseAddress in
+*			  XScuGic_CfgInitialize. Fix for CR#704400 to remove warnings.
+*			  Moved functions XScuGic_SetPriTrigTypeByDistAddr and
+*             XScuGic_GetPriTrigTypeByDistAddr to xscugic_hw.c.
+*			  This is fix for CR#705621.
+* 1.05a hk   06/26/13 Modified tcl to export external interrupts correctly to
+*                     xparameters.h. Fix for CR's 690505, 708928 & 719359.
+* 2.0   adk  12/10/13 Updated as per the New Tcl API's
+* 2.1   adk  25/04/14 Fixed the CR:789373 changes are made in the driver tcl file.
+* 3.00  kvn  02/13/15 Modified code for MISRA-C:2012 compliance.
+* 3.2   asa  02/29/16 Modified DistributorInit function for Zynq AMP case. The
+*			  distributor is left uninitialized for Zynq AMP. It is assumed
+*             that the distributor will be initialized by Linux master. However
+*             for CortexR5 case, the earlier code is left unchanged where the
+*             the interrupt processor target registers in the distributor is
+*             initialized with the corresponding CPU ID on which the application
+*             built over the scugic driver runs.
+*             These changes fix CR#937243.
+*
+* 3.4   asa  04/07/16 Created a new static function DoDistributorInit to simplify
+*            the flow and avoid code duplication. Changes are made for
+*            USE_AMP use case for R5. In a scenario (in R5 split mode) when
+*            one R5 is operating with A53 in open amp config and other
+*            R5 running baremetal app, the existing code
+*            had the potential to stop the whole AMP solution to work (if
+*            for some reason the R5 running the baremetal app tasked to
+*            initialize the Distributor hangs or crashes before initializing).
+*            Changes are made so that the R5 under AMP first checks if
+*            the distributor is enabled or not and if not, it does the
+*            standard Distributor initialization.
+*            This fixes the CR#952962.
+* 3.6   ms   01/23/17 Modified xil_printf statement in main function for all
+*                     examples to ensure that "Successfully ran" and "Failed"
+*                     strings are available in all examples. This is a fix
+*                     for CR-965028.
+*       kvn  02/17/17 Add support for changing GIC CPU master at run time.
+*       kvn  02/28/17 Make the CpuId as static variable and Added new
+*                     XScugiC_GetCpuId to access CpuId.
+*       ms   03/17/17 Added readme.txt file in examples folder for doxygen
+*                     generation.
+* 3.7   ms   04/11/17 Modified tcl file to add suffix U for all macro
+*                     definitions of scugic in xparameters.h
+* 3.8   mus  07/05/17 Updated scugic.tcl to add support for interrupts connected
+*                     through util_reduced_vector IP(OR gate)
+*       mus  07/05/17 Updated xdefine_zynq_canonical_xpars proc to initialize
+*                     the HandlerTable in XScuGic_ConfigTable to 0, it removes
+*                     the compilation warning in xscugic_g.c. Fix for CR#978736.
+*       mus  07/25/17 Updated xdefine_gic_params proc to export correct canonical
+*                     definitions for pl to ps interrupts.Fix for CR#980534
+* 3.9   mus  02/21/18 Added new API's XScuGic_UnmapAllInterruptsFromCpu and
+*                     XScuGic_InterruptUnmapFromCpu, These API's can be used
+*                     by applications to unmap specific/all interrupts from
+*                     target CPU.
+* 3.10  aru  08/23/18 Resolved MISRA-C:2012 compliance mandatory violations
+* 4.0   mus  11/22/18 Fixed bugs in software interrupt generation through
+*                      XScuGic_SoftwareIntr API
+* 4.1   asa  03/30/19 Made changes not to direct each interrupt to all
+*                     available CPUs by default. This was breaking AMP
+*                     behavior. Instead every time an interrupt enable
+*                     request is received, the interrupt was mapped to
+*                     the respective CPU. There were several other changes
+*                     made to implement this. This set of changes was to
+*                     fix CR-1024716.
+* 4.1   mus  06/19/19 Added API's XScuGic_MarkCoreAsleep and
+*                     XScuGic_MarkCoreAwake to mark processor core as
+*                     asleep or awake. Fix for CR#1027220.
+* 4.5   asa  03/07/21 Included a header file xil_spinlock.h to ensure that
+*                     GIC driver can use newly introduced spinlock
+*                     functionality.
+* 4.6	sk   08/05/21 Fix scugic misrac violations.
+* 4.7   dp   11/22/21 Added new API XScuGic_IsInitialized() to check and return
+*                     the GIC initialization status.
+*
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XSCUGIC_H /* prevent circular inclusions */
+#define XSCUGIC_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+#include "xstatus.h"
+#include "xil_io.h"
+#include "xscugic_hw.h"
+#include "xil_exception.h"
+#include "xil_spinlock.h"
+
+/************************** Constant Definitions *****************************/
+
+#define EFUSE_STATUS_OFFSET   0x10
+#define EFUSE_STATUS_CPU_MASK 0x80
+
+#if !defined (ARMR5) && !defined (__aarch64__) && !defined (ARMA53_32)
+#define ARMA9
+#endif
+
+#define XSCUGIC500_DCTLR_ARE_NS_ENABLE  0x20
+#define XSCUGIC500_DCTLR_ARE_S_ENABLE  0x10
+/**************************** Type Definitions *******************************/
+
+/* The following data type defines each entry in an interrupt vector table.
+ * The callback reference is the base address of the interrupting device
+ * for the low level driver and an instance pointer for the high level driver.
+ */
+typedef struct
+{
+	Xil_InterruptHandler Handler;
+	void *CallBackRef;
+} XScuGic_VectorTableEntry;
+
+/**
+ * This typedef contains configuration information for the device.
+ */
+typedef struct
+{
+	u16 DeviceId;		/**< Unique ID  of device */
+	u32 CpuBaseAddress;	/**< CPU Interface Register base address */
+	u32 DistBaseAddress;	/**< Distributor Register base address */
+	XScuGic_VectorTableEntry HandlerTable[XSCUGIC_MAX_NUM_INTR_INPUTS];/**<
+				 Vector table of interrupt handlers */
+} XScuGic_Config;
+
+/**
+ * The XScuGic driver instance data. The user is required to allocate a
+ * variable of this type for every intc device in the system. A pointer
+ * to a variable of this type is then passed to the driver API functions.
+ */
+typedef struct
+{
+	XScuGic_Config *Config;  /**< Configuration table entry */
+	u32 IsReady;		 /**< Device is initialized and ready */
+	u32 UnhandledInterrupts; /**< Intc Statistics */
+} XScuGic;
+
+/************************** Variable Definitions *****************************/
+
+extern XScuGic_Config XScuGic_ConfigTable[];	/**< Config table */
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************/
+/**
+*
+* Write the given CPU Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be written
+* @param    Data is the 32-bit value to write to the register
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XScuGic_CPUWriteReg(XScuGic *InstancePtr, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XScuGic_CPUWriteReg(InstancePtr, RegOffset, Data) \
+(XScuGic_WriteReg(((InstancePtr)->Config->CpuBaseAddress), (RegOffset), \
+					((u32)(Data))))
+
+/****************************************************************************/
+/**
+*
+* Read the given CPU Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be read
+*
+* @return   The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XScuGic_CPUReadReg(XScuGic *InstancePtr, u32 RegOffset)
+*
+*****************************************************************************/
+#define XScuGic_CPUReadReg(InstancePtr, RegOffset) \
+	(XScuGic_ReadReg(((InstancePtr)->Config->CpuBaseAddress), (RegOffset)))
+
+/****************************************************************************/
+/**
+*
+* Write the given Distributor Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be written
+* @param    Data is the 32-bit value to write to the register
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XScuGic_DistWriteReg(XScuGic *InstancePtr, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XScuGic_DistWriteReg(InstancePtr, RegOffset, Data) \
+(XScuGic_WriteReg(((InstancePtr)->Config->DistBaseAddress), (RegOffset), \
+					((u32)(Data))))
+
+/****************************************************************************/
+/**
+*
+* Read the given Distributor Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be read
+*
+* @return   The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XScuGic_DistReadReg(XScuGic *InstancePtr, u32 RegOffset)
+*
+*****************************************************************************/
+#define XScuGic_DistReadReg(InstancePtr, RegOffset) \
+(XScuGic_ReadReg(((InstancePtr)->Config->DistBaseAddress), (RegOffset)))
+
+/****************************************************************************/
+/**
+*
+* Write the given ReDistributor Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be written
+* @param    Data is the 32-bit value to write to the register
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XScuGic_DistWriteReg(XScuGic *InstancePtr, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XScuGic_ReDistWriteReg(InstancePtr, RegOffset, Data) \
+(XScuGic_WriteReg(((InstancePtr)->Config->DistBaseAddress)+ \
+				   XSCUGIC_RDIST_OFFSET, (RegOffset), ((u32)(Data))))
+
+/****************************************************************************/
+/**
+*
+* Read the given ReDistributor Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be read
+*
+* @return   The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XScuGic_DistReadReg(XScuGic *InstancePtr, u32 RegOffset)
+*
+*****************************************************************************/
+#define XScuGic_ReDistReadReg(InstancePtr, RegOffset) \
+(XScuGic_ReadReg((((InstancePtr)->Config->DistBaseAddress)+ \
+XSCUGIC_RDIST_OFFSET), (RegOffset)))
+
+/****************************************************************************/
+/**
+*
+* Write the given ReDistributor SGI PPI Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be written
+* @param    Data is the 32-bit value to write to the register
+*
+* @return   None.
+*
+* @note
+* C-style signature:
+*    void XScuGic_DistWriteReg(XScuGic *InstancePtr, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XScuGic_ReDistSGIPPIWriteReg(InstancePtr, RegOffset, Data) \
+(XScuGic_WriteReg(((InstancePtr)->Config->DistBaseAddress)+ \
+				   XSCUGIC_RDIST_SGI_PPI_OFFSET, (RegOffset), ((u32)(Data))))
+
+/****************************************************************************/
+/**
+*
+* Read the given ReDistributor SGI PPI Interface register
+*
+* @param    InstancePtr is a pointer to the instance to be worked on.
+* @param    RegOffset is the register offset to be read
+*
+* @return   The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XScuGic_DistReadReg(XScuGic *InstancePtr, u32 RegOffset)
+*
+*****************************************************************************/
+#define XScuGic_ReDistSGIPPIReadReg(InstancePtr, RegOffset) \
+(XScuGic_ReadReg((((InstancePtr)->Config->DistBaseAddress)+ \
+					XSCUGIC_RDIST_SGI_PPI_OFFSET), (RegOffset)))
+
+/****************************************************************************/
+/**
+* This function enables system register interface for GIC CPU Interface
+*
+* @param	value to be written
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_Enable_SystemReg_CPU_Interface_EL3() mtcp(S3_6_C12_C12_5, 0xF);
+#define XScuGic_Enable_SystemReg_CPU_Interface_EL1() mtcp(S3_0_C12_C12_5, 0xF);
+/****************************************************************************/
+/**
+* This function enables Grou0 interrupts
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_Enable_Group0_Interrupts() mtcp(S3_0_C12_C12_6,0x1);
+/****************************************************************************/
+/**
+* This function enables Group1 interrupts
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#if defined (__aarch64__)
+#if EL1_NONSECURE
+#define XScuGic_Enable_Group1_Interrupts() \
+		mtcp (S3_0_C12_C12_7, 0x1 | mfcp(S3_0_C12_C12_7) );
+#else
+#define XScuGic_Enable_Group1_Interrupts() \
+		mtcp (S3_6_C12_C12_7, 0x1 | mfcp(S3_6_C12_C12_7) );
+#endif
+#endif
+/****************************************************************************/
+/**
+* This function writes to ICC_SGI0R_EL1
+*
+* @param	value to be written
+*
+* @return	None.
+*
+* @note     None.
+*
+*****************************************************************************/
+#define XScuGic_WriteICC_SGI0R_EL1(val) mtcp(S3_0_C12_C11_7,val)
+
+/****************************************************************************/
+/**
+* This function writes to ICC_SGI1R_EL1
+*
+* @param	value to be written
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_WriteICC_SGI1R_EL1(val) mtcp(S3_0_C12_C11_5,val)
+
+/****************************************************************************/
+/**
+* This function reads ICC_SGI1R_EL1 register
+*
+* @param	None
+*
+* @return	Value of ICC_SGI1R_EL1 register
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_ReadICC_SGI1R_EL1() mfcp(S3_0_C12_C11_5)
+/****************************************************************************/
+/**
+* This function sets interrupt priority filter
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_set_priority_filter(val)  __asm__ __volatile__("msr  S3_0_C4_C6_0,%0"  : : "r" (val))
+/****************************************************************************/
+/**
+* This function returns interrupt id of highest priority pending interrupt
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#if defined (__aarch64__)
+#if EL3
+#define XScuGic_get_IntID()  mfcp(S3_0_C12_C8_0)
+#else
+#define XScuGic_get_IntID()  mfcp(S3_0_C12_C12_0)
+#endif
+#endif
+/****************************************************************************/
+/**
+* This function acks the interrupt
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#if  defined (__aarch64__)
+#if EL3
+#define XScuGic_ack_Int(val)   mtcp(S3_0_C12_C8_1,val)
+#else
+#define XScuGic_ack_Int(val)   mtcp(S3_0_C12_C12_1,val)
+#endif
+#endif
+/****************************************************************************/
+/**
+* This macro returns bit position for the specific interrupt's trigger type
+* configuration within GICR_ICFGR0/GICR_ICFGR1 register
+*
+* @param	None.
+*
+* @return	None.
+*
+* @note        None.
+*
+*****************************************************************************/
+#define XScuGic_Get_Rdist_Int_Trigger_Index(IntrId)  (((Int_Id%16) & 0x1f) << 2) +1
+/************************** Function Prototypes ******************************/
+
+/*
+ * Required functions in xscugic.c
+ */
+
+s32  XScuGic_Connect(XScuGic *InstancePtr, u32 Int_Id,
+			Xil_InterruptHandler Handler, void *CallBackRef);
+void XScuGic_Disconnect(XScuGic *InstancePtr, u32 Int_Id);
+
+void XScuGic_Enable(XScuGic *InstancePtr, u32 Int_Id);
+void XScuGic_Disable(XScuGic *InstancePtr, u32 Int_Id);
+
+s32  XScuGic_CfgInitialize(XScuGic *InstancePtr, XScuGic_Config *ConfigPtr,
+							u32 EffectiveAddr);
+
+s32  XScuGic_SoftwareIntr(XScuGic *InstancePtr, u32 Int_Id, u32 Cpu_Identifier);
+
+void XScuGic_GetPriorityTriggerType(XScuGic *InstancePtr, u32 Int_Id,
+					u8 *Priority, u8 *Trigger);
+void XScuGic_SetPriorityTriggerType(XScuGic *InstancePtr, u32 Int_Id,
+					u8 Priority, u8 Trigger);
+void XScuGic_InterruptMaptoCpu(XScuGic *InstancePtr, u8 Cpu_Identifier, u32 Int_Id);
+void XScuGic_InterruptUnmapFromCpu(XScuGic *InstancePtr, u8 Cpu_Identifier, u32 Int_Id);
+void XScuGic_UnmapAllInterruptsFromCpu(XScuGic *InstancePtr, u8 Cpu_Identifier);
+void XScuGic_Stop(XScuGic *InstancePtr);
+void XScuGic_SetCpuID(u32 CpuCoreId);
+u32 XScuGic_GetCpuID(void);
+u8 XScuGic_IsInitialized(u32 DeviceId);
+/*
+ * Initialization functions in xscugic_sinit.c
+ */
+XScuGic_Config *XScuGic_LookupConfig(u16 DeviceId);
+XScuGic_Config *XScuGic_LookupConfigBaseAddr(UINTPTR BaseAddress);
+
+/*
+ * Interrupt functions in xscugic_intr.c
+ */
+void XScuGic_InterruptHandler(XScuGic *InstancePtr);
+
+/*
+ * Self-test functions in xscugic_selftest.c
+ */
+s32  XScuGic_SelfTest(XScuGic *InstancePtr);
+
+#if defined (GICv3)
+void XScuGic_MarkCoreAsleep(XScuGic *InstancePtr);
+void XScuGic_MarkCoreAwake(XScuGic *InstancePtr);
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+#endif            /* end of protection macro */
+/** @} */
diff --git a/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic_hw.h b/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic_hw.h
new file mode 100644
index 0000000..57b388b
--- /dev/null
+++ b/embeddedsw/XilinxProcessorIPLib/drivers/scugic/src/xscugic_hw.h
@@ -0,0 +1,697 @@
+/******************************************************************************
+* Copyright (C) 2010 - 2022 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xscugic_hw.h
+* @addtogroup scugic_v4_7
+* @{
+*
+* This header file contains identifiers and HW access functions (or
+* macros) that can be used to access the device.  The user should refer to the
+* hardware device specification for more details of the device operation.
+* The driver functions/APIs are defined in xscugic.h.
+*
+* This GIC device has two parts, a distributor and CPU interface(s). Each part
+* has separate register definition sections.
+*
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------------
+* 1.00a drg  01/19/10 First release
+* 1.01a sdm  11/09/11 "xil_exception.h" added as include.
+*		      Macros XScuGic_EnableIntr and XScuGic_DisableIntr are
+*		      added to enable or disable interrupts based on
+*		      Distributor Register base address. Normally users use
+*		      XScuGic instance and call XScuGic_Enable or
+*		      XScuGic_Disable to enable/disable interrupts. These
+*		      new macros are provided when user does not want to
+*		      use an instance pointer but still wants to enable or
+*		      disable interrupts.
+*		      Function prototypes for functions (present in newly
+*		      added file xscugic_hw.c) are added.
+* 1.03a srt  02/27/13 Moved Offset calculation macros from *_hw.c (CR
+*		      702687).
+* 1.04a hk   05/04/13 Fix for CR#705621. Moved function prototypes
+*		      XScuGic_SetPriTrigTypeByDistAddr and
+*         	      XScuGic_GetPriTrigTypeByDistAddr here from xscugic.h
+* 3.0	pkp  12/09/14 changed XSCUGIC_MAX_NUM_INTR_INPUTS for
+*		      Zynq Ultrascale Mp
+* 3.0   kvn  02/13/14 Modified code for MISRA-C:2012 compliance.
+* 3.2	pkp  11/09/15 Corrected the interrupt processsor target mask value
+*					  for CPU interface 2 i.e. XSCUGIC_SPI_CPU2_MASK
+* 3.9   mus  02/21/18 Added new API's XScuGic_InterruptUnmapFromCpuByDistAddr
+*					  and XScuGic_UnmapAllInterruptsFromCpuByDistAddr, These
+*					  API's can be used by applications to unmap specific/all
+*					  interrupts from target CPU. It fixes CR#992490.
+* 3.10  aru  08/23/18 Resolved MISRA-C:2012 compliance mandatory violations
+* 4.1   asa  03/30/19 Removed macros for XScuGic_EnableIntr, and
+*                     XScuGic_DisableIntr. These are now C functions. This
+*                     change was to fix CR-1024716.
+* 4.1   mus  06/12/19 Updated XSCUGIC_MAX_NUM_INTR_INPUTS for Versal.
+* 4.6	sk   06/07/21 Delete the commented macro code to fix the MISRA-C warning.
+* 4.6	sk   08/05/21 Fix Scugic Misrac violations.
+* 4.7	sk   12/10/21 Update XSCUGIC_SPI_INT_ID_START macro from signed to unsigned
+* 		      to fix misrac violation.
+* 4.7   mus  03/17/22 GICv3 coupled with A72 has different redistributor for
+*                     each core, and each redistributor has different address,
+*                     Updated #define for re-distributor address to have correct
+*                     value based on the cpu number. It fixes CR#1126156.
+*
+* </pre>
+*
+******************************************************************************/
+
+#ifndef XSCUGIC_HW_H /* prevent circular inclusions */
+#define XSCUGIC_HW_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+#include "xil_io.h"
+#include "xil_exception.h"
+#include "bspconfig.h"
+
+/************************** Constant Definitions *****************************/
+#if defined (versal) && !defined(ARMR5)
+#define GICv3
+#endif
+
+/*
+ * The maximum number of interrupts supported by the hardware.
+ */
+#ifdef PLATFORM_ZYNQ
+#define XSCUGIC_MAX_NUM_INTR_INPUTS    	95U /* Maximum number of interrupt defined by Zynq */
+#elif defined (versal)
+#define XSCUGIC_MAX_NUM_INTR_INPUTS    	192U
+#else
+#define XSCUGIC_MAX_NUM_INTR_INPUTS    	195U /* Maximum number of interrupt defined by Zynq Ultrascale Mp */
+#endif
+
+/*
+ * First Interrupt Id for SPI interrupts.
+ */
+#define XSCUGIC_SPI_INT_ID_START	0x20U
+/*
+ * The maximum priority value that can be used in the GIC.
+ */
+#define XSCUGIC_MAX_INTR_PRIO_VAL    	248U
+#define XSCUGIC_INTR_PRIO_MASK			0x000000F8U
+
+/** @name Distributor Interface Register Map
+ *
+ * Define the offsets from the base address for all Distributor registers of
+ * the interrupt controller, some registers may be reserved in the hardware
+ * device.
+ * @{
+ */
+#define XSCUGIC_DIST_EN_OFFSET		0x00000000U /**< Distributor Enable
+							Register */
+#define XSCUGIC_IC_TYPE_OFFSET		0x00000004U /**< Interrupt Controller
+							Type Register */
+#define XSCUGIC_DIST_IDENT_OFFSET	0x00000008U /**< Implementor ID
+							Register */
+#define XSCUGIC_SECURITY_OFFSET		0x00000080U /**< Interrupt Security
+							Register */
+#define XSCUGIC_ENABLE_SET_OFFSET	0x00000100U /**< Enable Set
+							Register */
+#define XSCUGIC_DISABLE_OFFSET		0x00000180U /**< Enable Clear Register */
+#define XSCUGIC_PENDING_SET_OFFSET	0x00000200U /**< Pending Set
+							Register */
+#define XSCUGIC_PENDING_CLR_OFFSET	0x00000280U /**< Pending Clear
+							Register */
+#define XSCUGIC_ACTIVE_OFFSET		0x00000300U /**< Active Status Register */
+#define XSCUGIC_PRIORITY_OFFSET		0x00000400U /**< Priority Level Register */
+#define XSCUGIC_SPI_TARGET_OFFSET	0x00000800U /**< SPI Target
+							Register 0x800-0x8FB */
+#define XSCUGIC_INT_CFG_OFFSET		0x00000C00U /**< Interrupt Configuration
+							Register 0xC00-0xCFC */
+#define XSCUGIC_PPI_STAT_OFFSET		0x00000D00U /**< PPI Status Register */
+#define XSCUGIC_SPI_STAT_OFFSET		0x00000D04U /**< SPI Status Register
+							0xd04-0xd7C */
+#define XSCUGIC_AHB_CONFIG_OFFSET	0x00000D80U /**< AHB Configuration
+							Register */
+#define XSCUGIC_SFI_TRIG_OFFSET		0x00000F00U /**< Software Triggered
+							Interrupt Register */
+#define XSCUGIC_PERPHID_OFFSET		0x00000FD0U /**< Peripheral ID Reg */
+#if defined (GICv3)
+#define XSCUGIC_PCELLID_OFFSET		0x0000FFF0U /**< Pcell ID Register */
+#else
+#define XSCUGIC_PCELLID_OFFSET		0x00000FF0U /**< Pcell ID Register */
+#endif
+/* @} */
+
+/** @name  Distributor Enable Register
+ * Controls if the distributor response to external interrupt inputs.
+ * @{
+ */
+#if defined (GICv3)
+#define XSCUGIC_EN_INT_MASK		0x00000003U /**< Interrupt In Enable */
+#else
+#define XSCUGIC_EN_INT_MASK		0x00000001U /**< Interrupt In Enable */
+#endif
+/* @} */
+
+/** @name  Interrupt Controller Type Register
+ * @{
+ */
+#define XSCUGIC_LSPI_MASK	0x0000F800U /**< Number of Lockable
+						Shared Peripheral
+						Interrupts*/
+#define XSCUGIC_DOMAIN_MASK	0x00000400U /**< Number os Security domains*/
+#define XSCUGIC_CPU_NUM_MASK	0x000000E0U /**< Number of CPU Interfaces */
+#define XSCUGIC_NUM_INT_MASK	0x0000001FU /**< Number of Interrupt IDs */
+/* @} */
+
+/** @name  Implementor ID Register
+ * Implementor and revision information.
+ * @{
+ */
+#define XSCUGIC_REV_MASK	0x00FFF000U /**< Revision Number */
+#define XSCUGIC_IMPL_MASK	0x00000FFFU /**< Implementor */
+/* @} */
+
+/** @name  Interrupt Security Registers
+ * Each bit controls the security level of an interrupt, either secure or non
+ * secure. These registers can only be accessed using secure read and write.
+ * There are registers for each of the CPU interfaces at offset 0x080.  A
+ * register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x084.
+ * @{
+ */
+#define XSCUGIC_INT_NS_MASK	0x00000001U /**< Each bit corresponds to an
+						INT_ID */
+/* @} */
+
+/** @name  Enable Set Register
+ * Each bit controls the enabling of an interrupt, a 0 is disabled, a 1 is
+ * enabled. Writing a 0 has no effect. Use the ENABLE_CLR register to set a
+ * bit to 0.
+ * There are registers for each of the CPU interfaces at offset 0x100. With up
+ * to 8 registers aliased to the same address. A register set for the SPI
+ * interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x104.
+ * @{
+ */
+#define XSCUGIC_INT_EN_MASK	0x00000001U /**< Each bit corresponds to an
+						INT_ID */
+/* @} */
+
+/** @name  Enable Clear Register
+ * Each bit controls the disabling of an interrupt, a 0 is disabled, a 1 is
+ * enabled. Writing a 0 has no effect. Writing a 1 disables an interrupt and
+ * sets the corresponding bit to 0.
+ * There are registers for each of the CPU interfaces at offset 0x180. With up
+ * to 8 registers aliased to the same address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x184.
+ * @{
+ */
+#define XSCUGIC_INT_CLR_MASK	0x00000001U /**< Each bit corresponds to an
+						INT_ID */
+/* @} */
+
+/** @name  Pending Set Register
+ * Each bit controls the Pending or Active and Pending state of an interrupt, a
+ * 0 is not pending, a 1 is pending. Writing a 0 has no effect. Writing a 1 sets
+ * an interrupt to the pending state.
+ * There are registers for each of the CPU interfaces at offset 0x200. With up
+ * to 8 registers aliased to the same address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x204.
+ * @{
+ */
+#define XSCUGIC_PEND_SET_MASK	0x00000001U /**< Each bit corresponds to an
+						INT_ID */
+/* @} */
+
+/** @name  Pending Clear Register
+ * Each bit can clear the Pending or Active and Pending state of an interrupt, a
+ * 0 is not pending, a 1 is pending. Writing a 0 has no effect. Writing a 1
+ * clears the pending state of an interrupt.
+ * There are registers for each of the CPU interfaces at offset 0x280. With up
+ * to 8 registers aliased to the same address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x284.
+ * @{
+ */
+#define XSCUGIC_PEND_CLR_MASK	0x00000001U /**< Each bit corresponds to an
+						INT_ID */
+/* @} */
+
+/** @name  Active Status Register
+ * Each bit provides the Active status of an interrupt, a
+ * 0 is not Active, a 1 is Active. This is a read only register.
+ * There are registers for each of the CPU interfaces at offset 0x300. With up
+ * to 8 registers aliased to each address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 32 of these registers staring at location 0x380.
+ * @{
+ */
+#define XSCUGIC_ACTIVE_MASK	0x00000001U /**< Each bit corresponds to an
+					      INT_ID */
+/* @} */
+
+/** @name  Priority Level Register
+ * Each byte in a Priority Level Register sets the priority level of an
+ * interrupt. Reading the register provides the priority level of an interrupt.
+ * There are registers for each of the CPU interfaces at offset 0x400 through
+ * 0x41C. With up to 8 registers aliased to each address.
+ * 0 is highest priority, 0xFF is lowest.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 255 of these registers staring at location 0x420.
+ * @{
+ */
+#define XSCUGIC_PRIORITY_MASK	0x000000FFU /**< Each Byte corresponds to an
+						INT_ID */
+#define XSCUGIC_PRIORITY_MAX	0x000000FFU /**< Highest value of a priority
+						actually the lowest priority*/
+/* @} */
+
+/** @name  SPI Target Register 0x800-0x8FB
+ * Each byte references a separate SPI and programs which of the up to 8 CPU
+ * interfaces are sent a Pending interrupt.
+ * There are registers for each of the CPU interfaces at offset 0x800 through
+ * 0x81C. With up to 8 registers aliased to each address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 255 of these registers staring at location 0x820.
+ *
+ * This driver does not support multiple CPU interfaces. These are included
+ * for complete documentation.
+ * @{
+ */
+#define XSCUGIC_SPI_CPU7_MASK	0x00000080U /**< CPU 7 Mask*/
+#define XSCUGIC_SPI_CPU6_MASK	0x00000040U /**< CPU 6 Mask*/
+#define XSCUGIC_SPI_CPU5_MASK	0x00000020U /**< CPU 5 Mask*/
+#define XSCUGIC_SPI_CPU4_MASK	0x00000010U /**< CPU 4 Mask*/
+#define XSCUGIC_SPI_CPU3_MASK	0x00000008U /**< CPU 3 Mask*/
+#define XSCUGIC_SPI_CPU2_MASK	0x00000004U /**< CPU 2 Mask*/
+#define XSCUGIC_SPI_CPU1_MASK	0x00000002U /**< CPU 1 Mask*/
+#define XSCUGIC_SPI_CPU0_MASK	0x00000001U /**< CPU 0 Mask*/
+/* @} */
+
+/** @name  Interrupt Configuration Register 0xC00-0xCFC
+ * The interrupt configuration registers program an SFI to be active HIGH level
+ * sensitive or rising edge sensitive.
+ * Each bit pair describes the configuration for an INT_ID.
+ * SFI    Read Only    b10 always
+ * PPI    Read Only    depending on how the PPIs are configured.
+ *                    b01    Active HIGH level sensitive
+ *                    b11 Rising edge sensitive
+ * SPI                LSB is read only.
+ *                    b01    Active HIGH level sensitive
+ *                    b11 Rising edge sensitive/
+ * There are registers for each of the CPU interfaces at offset 0xC00 through
+ * 0xC04. With up to 8 registers aliased to each address.
+ * A register set for the SPI interrupts is available to all CPU interfaces.
+ * There are up to 255 of these registers staring at location 0xC08.
+ * @{
+ */
+#define XSCUGIC_INT_CFG_MASK    0x00000003U    /**< */
+/* @} */
+
+/** @name  PPI Status Register
+ * Enables an external AMBA master to access the status of the PPI inputs.
+ * A CPU can only read the status of its local PPI signals and cannot read the
+ * status for other CPUs.
+ * This register is aliased for each CPU interface.
+ * @{
+ */
+#define XSCUGIC_PPI_C15_MASK	0x00008000U    /**< PPI Status */
+#define XSCUGIC_PPI_C14_MASK	0x00004000U    /**< PPI Status */
+#define XSCUGIC_PPI_C13_MASK	0x00002000U    /**< PPI Status */
+#define XSCUGIC_PPI_C12_MASK	0x00001000U    /**< PPI Status */
+#define XSCUGIC_PPI_C11_MASK	0x00000800U    /**< PPI Status */
+#define XSCUGIC_PPI_C10_MASK	0x00000400U    /**< PPI Status */
+#define XSCUGIC_PPI_C09_MASK	0x00000200U    /**< PPI Status */
+#define XSCUGIC_PPI_C08_MASK	0x00000100U    /**< PPI Status */
+#define XSCUGIC_PPI_C07_MASK	0x00000080U    /**< PPI Status */
+#define XSCUGIC_PPI_C06_MASK	0x00000040U    /**< PPI Status */
+#define XSCUGIC_PPI_C05_MASK	0x00000020U    /**< PPI Status */
+#define XSCUGIC_PPI_C04_MASK	0x00000010U    /**< PPI Status */
+#define XSCUGIC_PPI_C03_MASK	0x00000008U    /**< PPI Status */
+#define XSCUGIC_PPI_C02_MASK	0x00000004U    /**< PPI Status */
+#define XSCUGIC_PPI_C01_MASK	0x00000002U    /**< PPI Status */
+#define XSCUGIC_PPI_C00_MASK	0x00000001U    /**< PPI Status */
+/* @} */
+
+/** @name  SPI Status Register 0xd04-0xd7C
+ * Enables an external AMBA master to access the status of the SPI inputs.
+ * There are up to 63 registers if the maximum number of SPI inputs are
+ * configured.
+ * @{
+ */
+#define XSCUGIC_SPI_N_MASK    0x00000001U    /**< Each bit corresponds to an SPI
+					     input */
+/* @} */
+
+/** @name  AHB Configuration Register
+ * Provides the status of the CFGBIGEND input signal and allows the endianness
+ * of the GIC to be set.
+ * @{
+ */
+#define XSCUGIC_AHB_END_MASK       0x00000004U    /**< 0-GIC uses little Endian,
+                                                  1-GIC uses Big Endian */
+#define XSCUGIC_AHB_ENDOVR_MASK    0x00000002U    /**< 0-Uses CFGBIGEND control,
+                                                  1-use the AHB_END bit */
+#define XSCUGIC_AHB_TIE_OFF_MASK   0x00000001U    /**< State of CFGBIGEND */
+
+/* @} */
+
+/** @name  Software Triggered Interrupt Register
+ * Controls issuing of software interrupts.
+ * @{
+ */
+#define XSCUGIC_SFI_SELFTRIG_MASK	0x02010000U
+#define XSCUGIC_SFI_TRIG_TRGFILT_MASK    0x03000000U    /**< Target List filter
+                                                            b00-Use the target List
+                                                            b01-All CPUs except requester
+                                                            b10-To Requester
+                                                            b11-reserved */
+#define XSCUGIC_SFI_TRIG_CPU_MASK	0x00FF0000U    /**< CPU Target list */
+#define XSCUGIC_SFI_TRIG_SATT_MASK	0x00008000U    /**< 0= Use a secure interrupt */
+#define XSCUGIC_SFI_TRIG_INTID_MASK	0x0000000FU    /**< Set to the INTID
+                                                        signaled to the CPU*/
+/* @} */
+
+/** @name CPU Interface Register Map
+ *
+ * Define the offsets from the base address for all CPU registers of the
+ * interrupt controller, some registers may be reserved in the hardware device.
+ * @{
+ */
+#define XSCUGIC_CONTROL_OFFSET		0x00000000U /**< CPU Interface Control
+							Register */
+#define XSCUGIC_CPU_PRIOR_OFFSET	0x00000004U /**< Priority Mask Reg */
+#define XSCUGIC_BIN_PT_OFFSET		0x00000008U /**< Binary Point Register */
+#define XSCUGIC_INT_ACK_OFFSET		0x0000000CU /**< Interrupt ACK Reg */
+#define XSCUGIC_EOI_OFFSET		0x00000010U /**< End of Interrupt Reg */
+#define XSCUGIC_RUN_PRIOR_OFFSET	0x00000014U /**< Running Priority Reg */
+#define XSCUGIC_HI_PEND_OFFSET		0x00000018U /**< Highest Pending Interrupt
+							Register */
+#define XSCUGIC_ALIAS_BIN_PT_OFFSET	0x0000001CU /**< Aliased non-Secure
+						        Binary Point Register */
+
+/**<  0x00000020 to 0x00000FBC are reserved and should not be read or written
+ * to. */
+/* @} */
+
+
+/** @name Control Register
+ * CPU Interface Control register definitions
+ * All bits are defined here although some are not available in the non-secure
+ * mode.
+ * @{
+ */
+#define XSCUGIC_CNTR_SBPR_MASK	0x00000010U    /**< Secure Binary Pointer,
+                                                 0=separate registers,
+                                                 1=both use bin_pt_s */
+#define XSCUGIC_CNTR_FIQEN_MASK	0x00000008U    /**< Use nFIQ_C for secure
+                                                  interrupts,
+                                                  0= use IRQ for both,
+                                                  1=Use FIQ for secure, IRQ for non*/
+#define XSCUGIC_CNTR_ACKCTL_MASK	0x00000004U    /**< Ack control for secure or non secure */
+#define XSCUGIC_CNTR_EN_NS_MASK		0x00000002U    /**< Non Secure enable */
+#define XSCUGIC_CNTR_EN_S_MASK		0x00000001U    /**< Secure enable, 0=Disabled, 1=Enabled */
+/* @} */
+
+
+/** @name Binary Point Register
+ * Binary Point register definitions
+ * @{
+ */
+
+#define XSCUGIC_BIN_PT_MASK	0x00000007U  /**< Binary point mask value
+						Value  Secure  Non-secure
+						b000    0xFE    0xFF
+						b001    0xFC    0xFE
+						b010    0xF8    0xFC
+						b011    0xF0    0xF8
+						b100    0xE0    0xF0
+						b101    0xC0    0xE0
+						b110    0x80    0xC0
+						b111    0x00    0x80
+						*/
+/*@}*/
+
+/** @name Interrupt Acknowledge Register
+ * Interrupt Acknowledge register definitions
+ * Identifies the current Pending interrupt, and the CPU ID for software
+ * interrupts.
+ */
+#define XSCUGIC_ACK_INTID_MASK		0x000003FFU /**< Interrupt ID */
+#define XSCUGIC_CPUID_MASK		0x00000C00U /**< CPU ID */
+/* @} */
+
+/** @name End of Interrupt Register
+ * End of Interrupt register definitions
+ * Allows the CPU to signal the GIC when it completes an interrupt service
+ * routine.
+ */
+#define XSCUGIC_EOI_INTID_MASK		0x000003FFU /**< Interrupt ID */
+
+/* @} */
+
+/** @name Running Priority Register
+ * Running Priority register definitions
+ * Identifies the interrupt priority level of the highest priority active
+ * interrupt.
+ */
+#define XSCUGIC_RUN_PRIORITY_MASK	0x000000FFU    /**< Interrupt Priority */
+/* @} */
+
+#if defined (GICv3)
+#define XSCUGIC_IROUTER_BASE_OFFSET 0x6000U
+#endif
+/*
+ * Highest Pending Interrupt register definitions
+ * Identifies the interrupt priority of the highest priority pending interrupt
+ */
+#define XSCUGIC_PEND_INTID_MASK		0x000003FFU /**< Pending Interrupt ID */
+/* @} */
+#if defined (GICv3)
+/** @name ReDistributor Interface Register Map
+ *
+ * @{
+ */
+#define XSCUGIC_RDIST_OFFSET              (0x80000U + (XPAR_CPU_ID * 0x20000))
+#define XSCUGIC_RDIST_BASE_ADDRESS        (XPAR_SCUGIC_0_DIST_BASEADDR + XSCUGIC_RDIST_OFFSET)
+#define XSCUGIC_RDIST_SGI_PPI_OFFSET      (0x90000U + (XPAR_CPU_ID * 0x20000))
+#define XSCUGIC_RDIST_SGI_PPI_BASE_ADDRESS    (XPAR_SCUGIC_0_DIST_BASEADDR + XSCUGIC_RDIST_SGI_PPI_OFFSET)
+#define XSCUGIC_RDIST_ISENABLE_OFFSET     0x100U
+#define XSCUGIC_RDIST_IPRIORITYR_OFFSET   0x400U
+#define XSCUGIC_RDIST_IGROUPR_OFFSET      0x80U
+#define XSCUGIC_RDIST_GRPMODR_OFFSET      0xD00U
+#define XSCUGIC_RDIST_INT_CONFIG_OFFSET   0xC00U
+#define XSCUGIC_RDIST_WAKER_OFFSET        0x14U
+#define XSCUGIC_SGIR_EL1_INITID_SHIFT    24U
+
+/*
+ * GICR_IGROUPR  register definitions
+ */
+#if EL3
+#define XSCUGIC_DEFAULT_SECURITY    0x0U
+#else
+#define XSCUGIC_DEFAULT_SECURITY    0xFFFFFFFFU
+#endif
+/*
+ * GICR_WAKER  register definitions
+ */
+#define XSCUGIC_RDIST_WAKER_LOW_POWER_STATE_MASK    0x7
+#endif
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/****************************************************************************/
+/**
+*
+* Read the Interrupt Configuration Register offset for an interrupt id.
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_INT_CFG_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_INT_CFG_OFFSET + (((InterruptID)/16U) * 4U))
+
+/****************************************************************************/
+/**
+*
+* Read the Interrupt Priority Register offset for an interrupt id.
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_PRIORITY_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_PRIORITY_OFFSET + (((InterruptID)/4U) * 4U))
+
+/****************************************************************************/
+/**
+*
+* Read the Interrupt Routing Register offset for an interrupt id.
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_IROUTER_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_IROUTER_BASE_OFFSET + (InterruptID * 8))
+
+/****************************************************************************/
+/**
+*
+* Read the SPI Target Register offset for an interrupt id.
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_SPI_TARGET_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_SPI_TARGET_OFFSET + (((InterruptID)/4U) * 4U))
+/****************************************************************************/
+/**
+*
+* Read the SPI Target Register offset for an interrupt id.
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_SECURITY_TARGET_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_SECURITY_OFFSET + (((InterruptID)/32U)*4U))
+
+/****************************************************************************/
+/**
+*
+* Read the Re-distributor Interrupt configuration register offset
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_RDIST_INT_CONFIG_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_RDIST_INT_CONFIG_OFFSET + ((InterruptID /16)*4))
+
+/****************************************************************************/
+/**
+*
+* Read the Re-distributor Interrupt Priority register offset
+*
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_RDIST_INT_PRIORITY_OFFSET_CALC(InterruptID) \
+	((u32)XSCUGIC_RDIST_IPRIORITYR_OFFSET + (InterruptID * 4))
+/****************************************************************************/
+/**
+*
+* Read the Interrupt Clear-Enable Register offset for an interrupt ID
+*
+* @param	Register is the register offset for the clear/enable bank.
+* @param	InterruptID is the interrupt number.
+*
+* @return	The 32-bit value of the offset
+*
+* @note
+*
+*****************************************************************************/
+#define XSCUGIC_EN_DIS_OFFSET_CALC(Register, InterruptID) \
+		((Register) + (((InterruptID)/32U) * 4U))
+
+/****************************************************************************/
+/**
+*
+* Read the given Intc register.
+*
+* @param	BaseAddress is the base address of the device.
+* @param	RegOffset is the register offset to be read
+*
+* @return	The 32-bit value of the register
+*
+* @note
+* C-style signature:
+*    u32 XScuGic_ReadReg(u32 BaseAddress, u32 RegOffset)
+*
+*****************************************************************************/
+#define XScuGic_ReadReg(BaseAddress, RegOffset) \
+	(Xil_In32((BaseAddress) + (RegOffset)))
+
+
+/****************************************************************************/
+/**
+*
+* Write the given Intc register.
+*
+* @param	BaseAddress is the base address of the device.
+* @param	RegOffset is the register offset to be written
+* @param	Data is the 32-bit value to write to the register
+*
+* @return	None.
+*
+* @note
+* C-style signature:
+*    void XScuGic_WriteReg(u32 BaseAddress, u32 RegOffset, u32 Data)
+*
+*****************************************************************************/
+#define XScuGic_WriteReg(BaseAddress, RegOffset, Data) \
+	(Xil_Out32(((BaseAddress) + (RegOffset)), ((u32)(Data))))
+
+
+/************************** Function Prototypes ******************************/
+
+void XScuGic_DeviceInterruptHandler(void *DeviceId);
+s32  XScuGic_DeviceInitialize(u32 DeviceId);
+void XScuGic_RegisterHandler(u32 BaseAddress, s32 InterruptID,
+			     Xil_InterruptHandler IntrHandler, void *CallBackRef);
+void XScuGic_SetPriTrigTypeByDistAddr(u32 DistBaseAddress, u32 Int_Id,
+                                        u8 Priority, u8 Trigger);
+void XScuGic_GetPriTrigTypeByDistAddr(u32 DistBaseAddress, u32 Int_Id,
+					u8 *Priority, u8 *Trigger);
+void XScuGic_InterruptMapFromCpuByDistAddr(u32 DistBaseAddress,
+							u8 Cpu_Id, u32 Int_Id);
+void XScuGic_InterruptUnmapFromCpuByDistAddr(u32 DistBaseAddress,
+											u8 Cpu_Id, u32 Int_Id);
+void XScuGic_UnmapAllInterruptsFromCpuByDistAddr(u32 DistBaseAddress,
+												u8 Cpu_Id);
+void XScuGic_EnableIntr (u32 DistBaseAddress, u32 Int_Id);
+void XScuGic_DisableIntr (u32 DistBaseAddress, u32 Int_Id);
+/************************** Variable Definitions *****************************/
+#ifdef __cplusplus
+}
+#endif
+
+#endif            /* end of protection macro */
+/** @} */
diff --git a/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_cache.h b/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_cache.h
new file mode 100644
index 0000000..b878d05
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_cache.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_cache.h
+*
+* @addtogroup a53_64_cache_apis Cortex A53 64bit Processor Cache Functions
+*
+* Cache functions provide access to cache related operations such as flush
+* and invalidate for instruction and data caches. It gives option to perform
+* the cache operations on a single cacheline, a range of memory and an entire
+* cache.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- -----------------------------------------------
+* 5.00 	pkp  05/29/14 First release
+* </pre>
+*
+******************************************************************************/
+#ifndef XIL_CACHE_H
+#define XIL_CACHE_H
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *@cond nocomments
+ */
+
+/************************** Constant Definitions *****************************/
+#define L1_DATA_PREFETCH_CONTROL_MASK  0xE000
+#define L1_DATA_PREFETCH_CONTROL_SHIFT  13
+
+/**
+ *@endcond
+ */
+
+/***************** Macros (Inline Functions) Definitions *********************/
+#define Xil_DCacheFlushRange Xil_DCacheInvalidateRange
+
+/************************** Function Prototypes ******************************/
+void Xil_DCacheEnable(void);
+void Xil_DCacheDisable(void);
+void Xil_DCacheInvalidate(void);
+void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len);
+void Xil_DCacheInvalidateLine(INTPTR adr);
+void Xil_DCacheFlush(void);
+void Xil_DCacheFlushLine(INTPTR adr);
+
+void Xil_ICacheEnable(void);
+void Xil_ICacheDisable(void);
+void Xil_ICacheInvalidate(void);
+void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len);
+void Xil_ICacheInvalidateLine(INTPTR adr);
+void Xil_ConfigureL1Prefetch(u8 num);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+/**
+* @} End of "addtogroup a53_64_cache_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_mmu.h b/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_mmu.h
new file mode 100644
index 0000000..396a0ab
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/arm/ARMv8/64bit/xil_mmu.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+* @file xil_mmu.h
+*
+* @addtogroup a53_64_mmu_apis Cortex A53 64bit Processor MMU Handling
+*
+* MMU function equip users to modify default memory attributes of MMU table as
+* per the need.
+*
+* @{
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who  Date     Changes
+* ----- ---- -------- ---------------------------------------------------
+* 5.00 	pkp  05/29/14 First release
+* </pre>
+*
+* @note
+*
+* None.
+*
+******************************************************************************/
+
+#ifndef XIL_MMU_H
+#define XIL_MMU_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/**************************** Type Definitions *******************************/
+
+/************************** Constant Definitions *****************************/
+
+/**
+ *@cond nocomments
+ */
+
+/* Memory type */
+#define NORM_NONCACHE 0x401UL 	/* Normal Non-cacheable*/
+#define STRONG_ORDERED 0x409UL	/* Strongly ordered (Device-nGnRnE)*/
+#define DEVICE_MEMORY 0x40DUL	/* Device memory (Device-nGnRE)*/
+#define RESERVED 0x0UL			/* reserved memory*/
+
+/* Normal write-through cacheable inner shareable*/
+#define NORM_WT_CACHE 0x711UL
+
+/* Normal write back cacheable inner-shareable */
+#define NORM_WB_CACHE 0x705UL
+
+/*
+ * shareability attribute only applicable to
+ * normal cacheable memory
+ */
+#define INNER_SHAREABLE (0x3 << 8)
+#define OUTER_SHAREABLE (0x2 << 8)
+#define NON_SHAREABLE	(~(0x3 << 8))
+
+/* Execution type */
+#define EXECUTE_NEVER ((0x1 << 53) | (0x1 << 54))
+
+/* Security type */
+#define NON_SECURE	(0x1 << 5)
+
+/************************** Variable Definitions *****************************/
+
+/************************** Function Prototypes ******************************/
+/**
+ *@endcond
+ */
+
+void Xil_SetTlbAttributes(UINTPTR Addr, u64 attrib);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XIL_MMU_H */
+/**
+* @} End of "addtogroup a53_64_mmu_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/arm/common/xil_exception.h b/embeddedsw/lib/bsp/standalone/src/arm/common/xil_exception.h
new file mode 100644
index 0000000..144d842
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/arm/common/xil_exception.h
@@ -0,0 +1,408 @@
+/******************************************************************************
+* Copyright (c) 2015 - 2022 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_exception.h
+*
+* This header file contains ARM Cortex A53,A9,R5 specific exception related APIs.
+* For exception related functions that can be used across all Xilinx supported
+* processors, please use xil_exception.h.
+*
+* @addtogroup arm_exception_apis ARM Processor Exception Handling
+* @{
+* ARM processors specific exception related APIs for cortex A53,A9 and R5 can
+* utilized for enabling/disabling IRQ, registering/removing handler for
+* exceptions or initializing exception vector table with null handler.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.2	pkp  	 28/05/15 First release
+* 6.0   mus      27/07/16 Consolidated file for a53,a9 and r5 processors
+* 6.7   mna      26/04/18 Add API Xil_GetExceptionRegisterHandler.
+* 6.7   asa      18/05/18 Update signature of API Xil_GetExceptionRegisterHandler.
+* 7.0   mus      01/03/19 Tweak Xil_ExceptionEnableMask and
+*                         Xil_ExceptionDisableMask macros to support legacy
+*                         examples for Cortexa72 EL3 exception level.
+* 7.3   mus      04/15/20 Added Xil_EnableNestedInterrupts and
+*                         Xil_DisableNestedInterrupts macros for ARMv8.
+*                         For Cortexa72, these macro's would not be supported
+*                         at EL3, as Cortexa72 is using GIC-500(GICv3),  which
+*                         triggeres only FIQ at EL3. Fix for CR#1062506
+* 7.6   mus      09/17/21 Updated flag checking to fix warning reported with
+*                         -Wundef compiler option CR#1110261
+* 7.7   mus      01/31/22 Few of the #defines in xil_exception.h in are treated
+*                         in different way based on "versal" flag. In existing
+*                         flow, this flag is defined only in xparameters.h and
+*                         BSP compiler flags, it is not defined in application
+*                         compiler flags. So, including xil_exception.h in
+*                         application source file, without including
+*                         xparameters.h results  in incorrect behavior.
+*                         Including xparameters.h in xil_exception.h to avoid
+*                         such issues. It fixes CR#1120498.
+* 7.7	sk	 03/02/22 Define XExc_VectorTableEntry structure to fix
+* 			  misra_c_2012_rule_5_6 violation.
+* 7.7	sk	 03/02/22 Add XExc_VectorTable as extern to fix misra_c_2012_
+* 			  rule_8_4 violation.
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
+#define XIL_EXCEPTION_H /* by using protection macros */
+
+/***************************** Include Files ********************************/
+
+#include "xil_types.h"
+#include "xpseudo_asm.h"
+#include "bspconfig.h"
+#include "xparameters.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constant Definitions ****************************/
+
+#define XIL_EXCEPTION_FIQ	XREG_CPSR_FIQ_ENABLE
+#define XIL_EXCEPTION_IRQ	XREG_CPSR_IRQ_ENABLE
+#define XIL_EXCEPTION_ALL	(XREG_CPSR_FIQ_ENABLE | XREG_CPSR_IRQ_ENABLE)
+
+#define XIL_EXCEPTION_ID_FIRST			0U
+#if defined (__aarch64__)
+#define XIL_EXCEPTION_ID_SYNC_INT		1U
+#define XIL_EXCEPTION_ID_IRQ_INT		2U
+#define XIL_EXCEPTION_ID_FIQ_INT		3U
+#define XIL_EXCEPTION_ID_SERROR_ABORT_INT		4U
+#define XIL_EXCEPTION_ID_LAST			5U
+#else
+#define XIL_EXCEPTION_ID_RESET			0U
+#define XIL_EXCEPTION_ID_UNDEFINED_INT		1U
+#define XIL_EXCEPTION_ID_SWI_INT		2U
+#define XIL_EXCEPTION_ID_PREFETCH_ABORT_INT	3U
+#define XIL_EXCEPTION_ID_DATA_ABORT_INT		4U
+#define XIL_EXCEPTION_ID_IRQ_INT		5U
+#define XIL_EXCEPTION_ID_FIQ_INT		6U
+#define XIL_EXCEPTION_ID_LAST			6U
+#endif
+
+/*
+ * XIL_EXCEPTION_ID_INT is defined for all Xilinx processors.
+ */
+#if defined (versal) && !defined(ARMR5) && EL3
+#define XIL_EXCEPTION_ID_INT    XIL_EXCEPTION_ID_FIQ_INT
+#else
+#define XIL_EXCEPTION_ID_INT	XIL_EXCEPTION_ID_IRQ_INT
+#endif
+
+/**************************** Type Definitions ******************************/
+
+/**
+ * This typedef is the exception handler function.
+ */
+typedef void (*Xil_ExceptionHandler)(void *data);
+typedef void (*Xil_InterruptHandler)(void *data);
+
+typedef struct {
+        Xil_ExceptionHandler Handler;
+        void *Data;
+} XExc_VectorTableEntry;
+
+extern XExc_VectorTableEntry XExc_VectorTable[];
+
+/**
+*@endcond
+*/
+
+/***************** Macros (Inline Functions) Definitions ********************/
+
+/****************************************************************************/
+/**
+* @brief	Enable Exceptions.
+*
+* @param	Mask: Value for enabling the exceptions.
+*
+* @return	None.
+*
+* @note		If bit is 0, exception is enabled.
+*			C-Style signature: void Xil_ExceptionEnableMask(Mask)
+*
+******************************************************************************/
+#if defined (versal) && !defined(ARMR5) && EL3
+/*
+ * Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
+ * only FIQ at EL3. Hence, tweaking this macro to always enable FIQ
+ * ignoring argument passed by user.
+ */
+#define Xil_ExceptionEnableMask(Mask)	\
+		mtcpsr(mfcpsr() & ~ ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
+#elif defined (__GNUC__) || defined (__ICCARM__)
+#define Xil_ExceptionEnableMask(Mask)	\
+		mtcpsr(mfcpsr() & ~ ((Mask) & XIL_EXCEPTION_ALL))
+#else
+#define Xil_ExceptionEnableMask(Mask)	\
+		{								\
+		  register u32 Reg __asm("cpsr"); \
+		  mtcpsr((Reg) & (~((Mask) & XIL_EXCEPTION_ALL))); \
+		}
+#endif
+/****************************************************************************/
+/**
+* @brief	Enable the IRQ exception.
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+#if defined (versal) && !defined(ARMR5) && EL3
+#define Xil_ExceptionEnable() \
+                Xil_ExceptionEnableMask(XIL_EXCEPTION_FIQ)
+#else
+#define Xil_ExceptionEnable() \
+		Xil_ExceptionEnableMask(XIL_EXCEPTION_IRQ)
+#endif
+
+/****************************************************************************/
+/**
+* @brief	Disable Exceptions.
+*
+* @param	Mask: Value for disabling the exceptions.
+*
+* @return	None.
+*
+* @note		If bit is 1, exception is disabled.
+*			C-Style signature: Xil_ExceptionDisableMask(Mask)
+*
+******************************************************************************/
+#if defined (versal) && !defined(ARMR5) && EL3
+/*
+ * Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
+ * only FIQ at EL3. Hence, tweaking this macro to always disable FIQ
+ * ignoring argument passed by user.
+ */
+#define Xil_ExceptionDisableMask(Mask)	\
+		mtcpsr(mfcpsr() | ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
+#elif defined (__GNUC__) || defined (__ICCARM__)
+#define Xil_ExceptionDisableMask(Mask)	\
+		mtcpsr(mfcpsr() | ((Mask) & XIL_EXCEPTION_ALL))
+#else
+#define Xil_ExceptionDisableMask(Mask)	\
+		{									\
+		  register u32 Reg __asm("cpsr"); \
+		  mtcpsr((Reg) | ((Mask) & XIL_EXCEPTION_ALL)); \
+		}
+#endif
+/****************************************************************************/
+/**
+* Disable the IRQ exception.
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+#define Xil_ExceptionDisable() \
+		Xil_ExceptionDisableMask(XIL_EXCEPTION_IRQ)
+
+#if ( defined (PLATFORM_ZYNQMP) && defined (EL3) && (EL3==1) )
+/****************************************************************************/
+/**
+* @brief	Enable nested interrupts by clearing the I bit in DAIF.This
+*			macro is defined for Cortex-A53 64 bit mode BSP configured to run
+*			at EL3.. However,it is not defined for Versal Cortex-A72 BSP
+*			configured to run at EL3. Reason is, Cortex-A72 is coupled
+*			with GIC-500(GICv3 specifications) and it triggers only FIQ at EL3.
+*
+* @return   None.
+*
+* @note     This macro is supposed to be used from interrupt handlers. In the
+*			interrupt handler the interrupts are disabled by default (I bit
+*			is set as 1). To allow nesting of interrupts, this macro should be
+*			used. It clears the I bit. Once that bit is cleared and provided the
+*			preemption of interrupt conditions are met in the GIC, nesting of
+*			interrupts will start happening.
+*			Caution: This macro must be used with caution. Before calling this
+*			macro, the user must ensure that the source of the current IRQ
+*			is appropriately cleared. Otherwise, as soon as we clear the I
+*			bit, there can be an infinite loop of interrupts with an
+*			eventual crash (all the stack space getting consumed).
+******************************************************************************/
+#define Xil_EnableNestedInterrupts() \
+                __asm__ __volatile__ ("mrs    X1, ELR_EL3"); \
+                __asm__ __volatile__ ("mrs    X2, SPSR_EL3");  \
+                __asm__ __volatile__ ("stp    X1,X2, [sp,#-0x10]!"); \
+                __asm__ __volatile__ ("mrs    X1, DAIF");  \
+                __asm__ __volatile__ ("bic    X1,X1,#(0x1<<7)");  \
+                __asm__ __volatile__ ("msr    DAIF, X1");  \
+
+/****************************************************************************/
+/**
+* @brief	Disable the nested interrupts by setting the I bit in DAIF. This
+*			macro is defined for Cortex-A53 64 bit mode BSP configured to run
+*			at EL3.
+*
+* @return   None.
+*
+* @note     This macro is meant to be called in the interrupt service routines.
+*			This macro cannot be used independently. It can only be used when
+*			nesting of interrupts have been enabled by using the macro
+*			Xil_EnableNestedInterrupts(). In a typical flow, the user first
+*			calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
+*			point. The user then must call this macro before exiting the interrupt
+*			service routine. This macro puts the ARM back in IRQ mode and
+*			hence sets back the I bit.
+******************************************************************************/
+#define Xil_DisableNestedInterrupts() \
+                __asm__ __volatile__ ("ldp    X1,X2, [sp,#0x10]!"); \
+                __asm__ __volatile__ ("msr    ELR_EL3, X1"); \
+                __asm__ __volatile__ ("msr    SPSR_EL3, X2"); \
+                __asm__ __volatile__ ("mrs    X1, DAIF");  \
+                __asm__ __volatile__ ("orr    X1, X1, #(0x1<<7)"); \
+                __asm__ __volatile__ ("msr    DAIF, X1");  \
+
+#elif (defined (EL1_NONSECURE) && (EL1_NONSECURE==1))
+/****************************************************************************/
+/**
+* @brief	Enable nested interrupts by clearing the I bit in DAIF.This
+*			macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
+*			BSP configured to run at EL1 NON SECURE
+*
+* @return   None.
+*
+* @note     This macro is supposed to be used from interrupt handlers. In the
+*			interrupt handler the interrupts are disabled by default (I bit
+*			is set as 1). To allow nesting of interrupts, this macro should be
+*			used. It clears the I bit. Once that bit is cleared and provided the
+*			preemption of interrupt conditions are met in the GIC, nesting of
+*			interrupts will start happening.
+*			Caution: This macro must be used with caution. Before calling this
+*			macro, the user must ensure that the source of the current IRQ
+*			is appropriately cleared. Otherwise, as soon as we clear the I
+*			bit, there can be an infinite loop of interrupts with an
+*			eventual crash (all the stack space getting consumed).
+******************************************************************************/
+#define Xil_EnableNestedInterrupts() \
+                __asm__ __volatile__ ("mrs    X1, ELR_EL1"); \
+                __asm__ __volatile__ ("mrs    X2, SPSR_EL1");  \
+                __asm__ __volatile__ ("stp    X1,X2, [sp,#-0x10]!"); \
+                __asm__ __volatile__ ("mrs    X1, DAIF");  \
+                __asm__ __volatile__ ("bic    X1,X1,#(0x1<<7)");  \
+                __asm__ __volatile__ ("msr    DAIF, X1");  \
+
+/****************************************************************************/
+/**
+* @brief	Disable the nested interrupts by setting the I bit in DAIF. This
+*			macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
+*			BSP configured to run at EL1 NON SECURE
+*
+* @return   None.
+*
+* @note     This macro is meant to be called in the interrupt service routines.
+*			This macro cannot be used independently. It can only be used when
+*			nesting of interrupts have been enabled by using the macro
+*			Xil_EnableNestedInterrupts(). In a typical flow, the user first
+*			calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
+*			point. The user then must call this macro before exiting the interrupt
+*			service routine. This macro puts the ARM back in IRQ mode and
+*			hence sets back the I bit.
+******************************************************************************/
+#define Xil_DisableNestedInterrupts() \
+                __asm__ __volatile__ ("ldp    X1,X2, [sp,#0x10]!"); \
+                __asm__ __volatile__ ("msr    ELR_EL1, X1"); \
+                __asm__ __volatile__ ("msr    SPSR_EL1, X2"); \
+                __asm__ __volatile__ ("mrs    X1, DAIF");  \
+                __asm__ __volatile__ ("orr    X1, X1, #(0x1<<7)"); \
+                __asm__ __volatile__ ("msr    DAIF, X1");  \
+
+#elif (!defined (__aarch64__) && !defined (ARMA53_32))
+/****************************************************************************/
+/**
+* @brief	Enable nested interrupts by clearing the I and F bits in CPSR. This
+* 			API is defined for cortex-a9 and cortex-r5.
+*
+* @return   None.
+*
+* @note     This macro is supposed to be used from interrupt handlers. In the
+*			interrupt handler the interrupts are disabled by default (I and F
+*			are 1). To allow nesting of interrupts, this macro should be
+*			used. It clears the I and F bits by changing the ARM mode to
+*			system mode. Once these bits are cleared and provided the
+*			preemption of interrupt conditions are met in the GIC, nesting of
+*			interrupts will start happening.
+*			Caution: This macro must be used with caution. Before calling this
+*			macro, the user must ensure that the source of the current IRQ
+*			is appropriately cleared. Otherwise, as soon as we clear the I and
+*			F bits, there can be an infinite loop of interrupts with an
+*			eventual crash (all the stack space getting consumed).
+******************************************************************************/
+#define Xil_EnableNestedInterrupts() \
+		__asm__ __volatile__ ("stmfd   sp!, {lr}"); \
+		__asm__ __volatile__ ("mrs     lr, spsr");  \
+		__asm__ __volatile__ ("stmfd   sp!, {lr}"); \
+		__asm__ __volatile__ ("msr     cpsr_c, #0x1F"); \
+		__asm__ __volatile__ ("stmfd   sp!, {lr}");
+/****************************************************************************/
+/**
+* @brief	Disable the nested interrupts by setting the I and F bits. This API
+*			is defined for cortex-a9 and cortex-r5.
+*
+* @return   None.
+*
+* @note     This macro is meant to be called in the interrupt service routines.
+*			This macro cannot be used independently. It can only be used when
+*			nesting of interrupts have been enabled by using the macro
+*			Xil_EnableNestedInterrupts(). In a typical flow, the user first
+*			calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
+*			point. The user then must call this macro before exiting the interrupt
+*			service routine. This macro puts the ARM back in IRQ/FIQ mode and
+*			hence sets back the I and F bits.
+******************************************************************************/
+#define Xil_DisableNestedInterrupts() \
+		__asm__ __volatile__ ("ldmfd   sp!, {lr}");   \
+		__asm__ __volatile__ ("msr     cpsr_c, #0x92"); \
+		__asm__ __volatile__ ("ldmfd   sp!, {lr}"); \
+		__asm__ __volatile__ ("msr     spsr_cxsf, lr"); \
+		__asm__ __volatile__ ("ldmfd   sp!, {lr}"); \
+
+#endif
+/************************** Variable Definitions ****************************/
+
+/************************** Function Prototypes *****************************/
+
+extern void Xil_ExceptionRegisterHandler(u32 Exception_id,
+					 Xil_ExceptionHandler Handler,
+					 void *Data);
+
+extern void Xil_ExceptionRemoveHandler(u32 Exception_id);
+extern void Xil_GetExceptionRegisterHandler(u32 Exception_id,
+					Xil_ExceptionHandler *Handler, void **Data);
+
+extern void Xil_ExceptionInit(void);
+#if defined (__aarch64__)
+void Xil_SyncAbortHandler(void *CallBackRef);
+void Xil_SErrorAbortHandler(void *CallBackRef);
+#else
+extern void Xil_DataAbortHandler(void *CallBackRef);
+extern void Xil_PrefetchAbortHandler(void *CallBackRef);
+extern void Xil_UndefinedExceptionHandler(void *CallBackRef);
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* XIL_EXCEPTION_H */
+/**
+* @} End of "addtogroup arm_exception_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/common/xil_assert.c b/embeddedsw/lib/bsp/standalone/src/common/xil_assert.c
new file mode 100644
index 0000000..b3dd7e9
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/common/xil_assert.c
@@ -0,0 +1,126 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.c
+* @addtogroup common_assert_apis Assert APIs and Macros
+* @{
+*
+* This file contains basic assert related functions for Xilinx software IP.
+*
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 Initial release
+* 6.0   kvn  05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/**************************** Type Definitions *******************************/
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+/************************** Variable Definitions *****************************/
+
+/**
+ * @brief This variable allows testing to be done easier with asserts. An assert
+ * sets this variable such that a driver can evaluate this variable
+ * to determine if an assert occurred.
+ */
+u32 Xil_AssertStatus;
+
+/**
+ * @brief This variable allows the assert functionality to be changed for testing
+ * such that it does not wait infinitely. Use the debugger to disable the
+ * waiting during testing of asserts.
+ */
+s32 Xil_AssertWait = 1;
+
+/* The callback function to be invoked when an assert is taken */
+static Xil_AssertCallback Xil_AssertCallbackRoutine = NULL;
+
+/************************** Function Prototypes ******************************/
+
+/*****************************************************************************/
+/**
+*
+* @brief    Implement assert. Currently, it calls a user-defined callback
+*           function if one has been set.  Then, it potentially enters an
+*           infinite loop depending on the value of the Xil_AssertWait
+*           variable.
+*
+* @param    File: filename of the source
+* @param    Line: linenumber within File
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+void Xil_Assert(const char8 *File, s32 Line)
+{
+	/* if the callback has been set then invoke it */
+	if (Xil_AssertCallbackRoutine != 0) {
+		(*Xil_AssertCallbackRoutine)(File, Line);
+	}
+
+	/* if specified, wait indefinitely such that the assert will show up
+	 * in testing
+	 */
+	while (Xil_AssertWait != 0) {
+	}
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Set up a callback function to be invoked when an assert occurs.
+*           If a callback is already installed, then it will be replaced.
+*
+* @param    Routine: callback to be invoked when an assert is taken
+*
+* @return   None.
+*
+* @note     This function has no effect if NDEBUG is set
+*
+******************************************************************************/
+void Xil_AssertSetCallback(Xil_AssertCallback Routine)
+{
+	Xil_AssertCallbackRoutine = Routine;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Null handler function. This follows the XInterruptHandler
+*           signature for interrupt handlers. It can be used to assign a null
+*           handler (a stub) to an interrupt controller vector table.
+*
+* @param    NullParameter: arbitrary void pointer and not used.
+*
+* @return   None.
+*
+* @note     None.
+*
+******************************************************************************/
+void XNullHandler(void *NullParameter)
+{
+	(void) NullParameter;
+}
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/common/xil_assert.h b/embeddedsw/lib/bsp/standalone/src/common/xil_assert.h
new file mode 100644
index 0000000..e8b87b5
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/common/xil_assert.h
@@ -0,0 +1,176 @@
+/******************************************************************************
+* Copyright (c) 2009 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_assert.h
+*
+* @addtogroup common_assert_apis Assert APIs and Macros
+*
+* The xil_assert.h file contains assert related functions and macros.
+* Assert APIs/Macros specifies that a application program satisfies certain
+* conditions at particular points in its execution. These function can be
+* used by application programs to ensure that, application code is satisfying
+* certain conditions.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 First release
+* 6.0   kvn  05/31/16 Make Xil_AsserWait a global variable
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XIL_ASSERT_H	/* prevent circular inclusions */
+#define XIL_ASSERT_H	/* by using protection macros */
+
+#include "xil_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/***************************** Include Files *********************************/
+
+
+/************************** Constant Definitions *****************************/
+
+#define XIL_ASSERT_NONE     0U
+#define XIL_ASSERT_OCCURRED 1U
+#define XNULL NULL
+
+extern u32 Xil_AssertStatus;
+extern s32 Xil_AssertWait;
+extern void Xil_Assert(const char8 *File, s32 Line);
+/**
+ *@endcond
+ */
+void XNullHandler(void *NullParameter);
+
+/**
+ * This data type defines a callback to be invoked when an
+ * assert occurs. The callback is invoked only when asserts are enabled
+ */
+typedef void (*Xil_AssertCallback) (const char8 *File, s32 Line);
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+#ifndef NDEBUG
+
+/*****************************************************************************/
+/**
+* @brief    This assert macro is to be used for void functions. This in
+*           conjunction with the Xil_AssertWait boolean can be used to
+*           accommodate tests so that asserts which fail allow execution to
+*           continue.
+*
+* @param    Expression: expression to be evaluated. If it evaluates to
+*           false, the assert occurs.
+*
+* @return   Returns void unless the Xil_AssertWait variable is true, in which
+*           case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertVoid(Expression)                \
+{                                                  \
+    if (Expression) {                              \
+        Xil_AssertStatus = XIL_ASSERT_NONE;       \
+    } else {                                       \
+        Xil_Assert(__FILE__, __LINE__);            \
+        Xil_AssertStatus = XIL_ASSERT_OCCURRED;   \
+        return;                                    \
+    }                                              \
+}
+
+/*****************************************************************************/
+/**
+* @brief    This assert macro is to be used for functions that do return a
+*           value. This in conjunction with the Xil_AssertWait boolean can be
+*           used to accommodate tests so that asserts which fail allow execution
+*           to continue.
+*
+* @param    Expression: expression to be evaluated. If it evaluates to false,
+*           the assert occurs.
+*
+* @return   Returns 0 unless the Xil_AssertWait variable is true, in which
+* 	        case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertNonvoid(Expression)             \
+{                                                  \
+    if (Expression) {                              \
+        Xil_AssertStatus = XIL_ASSERT_NONE;       \
+    } else {                                       \
+        Xil_Assert(__FILE__, __LINE__);            \
+        Xil_AssertStatus = XIL_ASSERT_OCCURRED;   \
+        return 0;                                  \
+    }                                              \
+}
+
+/*****************************************************************************/
+/**
+* @brief     Always assert. This assert macro is to be used for void functions.
+*            Use for instances where an assert should always occur.
+*
+* @return    Returns void unless the Xil_AssertWait variable is true, in which
+*	         case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertVoidAlways()                   \
+{                                                  \
+   Xil_Assert(__FILE__, __LINE__);                 \
+   Xil_AssertStatus = XIL_ASSERT_OCCURRED;        \
+   return;                                         \
+}
+
+/*****************************************************************************/
+/**
+* @brief   Always assert. This assert macro is to be used for functions that
+*          do return a value. Use for instances where an assert should always
+*          occur.
+*
+* @return Returns void unless the Xil_AssertWait variable is true, in which
+*	      case no return is made and an infinite loop is entered.
+*
+******************************************************************************/
+#define Xil_AssertNonvoidAlways()                \
+{                                                  \
+   Xil_Assert(__FILE__, __LINE__);                 \
+   Xil_AssertStatus = XIL_ASSERT_OCCURRED;        \
+   return 0;                                       \
+}
+
+
+#else
+
+#define Xil_AssertVoid(Expression)
+#define Xil_AssertVoidAlways()
+#define Xil_AssertNonvoid(Expression)
+#define Xil_AssertNonvoidAlways()
+
+#endif
+
+/************************** Function Prototypes ******************************/
+
+void Xil_AssertSetCallback(Xil_AssertCallback Routine);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* end of protection macro */
+/**
+* @} End of "addtogroup common_assert_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/common/xil_io.h b/embeddedsw/lib/bsp/standalone/src/common/xil_io.h
new file mode 100644
index 0000000..853ef6b
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/common/xil_io.h
@@ -0,0 +1,412 @@
+/******************************************************************************
+* Copyright (c) 2014 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_io.h
+*
+* @addtogroup common_io_interfacing_apis Register IO interfacing APIs
+*
+* The xil_io.h file contains the interface for the general I/O component, which
+* encapsulates the Input/Output functions for the processors that do not
+* require any special I/O handling.
+*
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who      Date     Changes
+* ----- -------- -------- -----------------------------------------------
+* 5.00 	pkp  	 05/29/14 First release
+* 6.00  mus      08/19/16 Remove checking of __LITTLE_ENDIAN__ flag for
+*                         ARM processors
+* 7.20  har      01/03/20 Added Xil_SecureOut32 for avoiding blindwrite for
+*                         CR-1049218
+* 7.30  kpt      09/21/20 Moved Xil_EndianSwap16 and Xil_EndianSwap32 to
+*                         xil_io.h and made them as static inline
+*       am       10/13/20 Changed the return type of Xil_SecureOut32 function
+*                         from u32 to int
+* 7.50  dp       02/12/21 Fix compilation error in Xil_EndianSwap32() that occur
+*                         when -Werror=conversion compiler flag is enabled
+* 7.5   mus      05/17/21 Update the functions with comments. It fixes CR#1067739.
+*
+* </pre>
+******************************************************************************/
+
+#ifndef XIL_IO_H           /* prevent circular inclusions */
+#define XIL_IO_H           /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_printf.h"
+#include "xstatus.h"
+
+#if defined (__MICROBLAZE__)
+#include "mb_interface.h"
+#else
+#include "xpseudo_asm.h"
+#endif
+
+/************************** Function Prototypes ******************************/
+#ifdef ENABLE_SAFETY
+extern u32 XStl_RegUpdate(u32 RegAddr, u32 RegVal);
+#endif
+
+/***************** Macros (Inline Functions) Definitions *********************/
+#if defined __GNUC__
+#if defined (__MICROBLAZE__)
+#  define INST_SYNC		mbar(0)
+#  define DATA_SYNC		mbar(1)
+# else
+#  define SYNCHRONIZE_IO	dmb()
+#  define INST_SYNC		isb()
+#  define DATA_SYNC		dsb()
+# endif
+#else
+# define SYNCHRONIZE_IO
+# define INST_SYNC
+# define DATA_SYNC
+# define INST_SYNC
+# define DATA_SYNC
+#endif
+
+#if defined (__GNUC__) || defined (__ICCARM__) || defined (__MICROBLAZE__)
+#define INLINE inline
+#else
+#define INLINE __inline
+#endif
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by reading
+*           from the specified address and returning the 8 bit Value read from
+*            that address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 8 bit Value read from the specified input address.
+
+*
+******************************************************************************/
+static INLINE u8 Xil_In8(UINTPTR Addr)
+{
+	return *(volatile u8 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by reading from
+*           the specified address and returning the 16 bit Value read from that
+*           address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 16 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u16 Xil_In16(UINTPTR Addr)
+{
+	return *(volatile u16 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an input operation for a memory location by
+*           reading from the specified address and returning the 32 bit Value
+*           read  from that address.
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 32 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u32 Xil_In32(UINTPTR Addr)
+{
+	return *(volatile u32 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief     Performs an input operation for a memory location by reading the
+*            64 bit Value read  from that address.
+*
+*
+* @param	Addr: contains the address to perform the input operation
+*
+* @return	The 64 bit Value read from the specified input address.
+*
+******************************************************************************/
+static INLINE u64 Xil_In64(UINTPTR Addr)
+{
+	return *(volatile u64 *) Addr;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for an memory location by
+*           writing the 8 bit Value to the the specified address.
+*
+* @param	Addr: contains the address to perform the output operation
+* @param	Value: contains the 8 bit Value to be written at the specified
+*           address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out8(UINTPTR Addr, u8 Value)
+{
+	/* write 8 bit value to specified address */
+	volatile u8 *LocalAddr = (volatile u8 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*            16 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains the Value to be written at the specified address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out16(UINTPTR Addr, u16 Value)
+{
+	/* write 16 bit value to specified address */
+	volatile u16 *LocalAddr = (volatile u16 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*           32 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains the 32 bit Value to be written at the specified
+*           address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out32(UINTPTR Addr, u32 Value)
+{
+	/* write 32 bit value to specified address */
+#ifndef ENABLE_SAFETY
+	volatile u32 *LocalAddr = (volatile u32 *)Addr;
+	*LocalAddr = Value;
+#else
+	XStl_RegUpdate(Addr, Value);
+#endif
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Performs an output operation for a memory location by writing the
+*           64 bit Value to the the specified address.
+*
+* @param	Addr contains the address to perform the output operation
+* @param	Value contains 64 bit Value to be written at the specified address.
+*
+* @return	None.
+*
+******************************************************************************/
+static INLINE void Xil_Out64(UINTPTR Addr, u64 Value)
+{
+	/* write 64 bit value to specified address */
+	volatile u64 *LocalAddr = (volatile u64 *)Addr;
+	*LocalAddr = Value;
+}
+
+/*****************************************************************************/
+/**
+ *
+ * @brief	Performs an output operation for a memory location by writing the
+ *       	32 bit Value to the the specified address and then reading it
+ *       	back to verify the value written in the register.
+ *
+ * @param	Addr contains the address to perform the output operation
+ * @param	Value contains 32 bit Value to be written at the specified address
+ *
+ * @return	Returns Status
+ *        	- XST_SUCCESS on success
+ *        	- XST_FAILURE on failure
+ *
+ *****************************************************************************/
+static INLINE int Xil_SecureOut32(UINTPTR Addr, u32 Value)
+{
+	int Status = XST_FAILURE;
+	u32 ReadReg;
+	u32 ReadRegTemp;
+
+	/* writing 32 bit value to specified address */
+	Xil_Out32(Addr, Value);
+
+	/* verify value written to specified address with multiple reads */
+	ReadReg = Xil_In32(Addr);
+	ReadRegTemp = Xil_In32(Addr);
+
+	if( (ReadReg == Value) && (ReadRegTemp == Value) ) {
+		Status = XST_SUCCESS;
+	}
+
+	return Status;
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Perform a 16-bit endian conversion.
+*
+* @param	Data: 16 bit value to be converted
+*
+* @return	16 bit Data with converted endianness
+*
+******************************************************************************/
+static INLINE __attribute__((always_inline)) u16 Xil_EndianSwap16(u16 Data)
+{
+	return (u16) (((Data & 0xFF00U) >> 8U) | ((Data & 0x00FFU) << 8U));
+}
+
+/*****************************************************************************/
+/**
+*
+* @brief    Perform a 32-bit endian conversion.
+*
+* @param	Data: 32 bit value to be converted
+*
+* @return	32 bit data with converted endianness
+*
+******************************************************************************/
+static INLINE __attribute__((always_inline)) u32 Xil_EndianSwap32(u32 Data)
+{
+	u16 LoWord;
+	u16 HiWord;
+
+	/* get each of the half words from the 32 bit word */
+
+	LoWord = (u16) (Data & 0x0000FFFFU);
+	HiWord = (u16) ((Data & 0xFFFF0000U) >> 16U);
+
+	/* byte swap each of the 16 bit half words */
+
+	LoWord = (u16)(((LoWord & 0xFF00U) >> 8U) | ((LoWord & 0x00FFU) << 8U));
+	HiWord = (u16)(((HiWord & 0xFF00U) >> 8U) | ((HiWord & 0x00FFU) << 8U));
+
+	/* swap the half words before returning the value */
+
+	return ((((u32)LoWord) << (u32)16U) | (u32)HiWord);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+# define Xil_In16LE	Xil_In16
+# define Xil_In32LE	Xil_In32
+# define Xil_Out16LE	Xil_Out16
+# define Xil_Out32LE	Xil_Out32
+# define Xil_Htons	Xil_EndianSwap16
+# define Xil_Htonl	Xil_EndianSwap32
+# define Xil_Ntohs	Xil_EndianSwap16
+# define Xil_Ntohl	Xil_EndianSwap32
+# else
+# define Xil_In16BE	Xil_In16
+# define Xil_In32BE	Xil_In32
+# define Xil_Out16BE	Xil_Out16
+# define Xil_Out32BE	Xil_Out32
+# define Xil_Htons(Data) (Data)
+# define Xil_Htonl(Data) (Data)
+# define Xil_Ntohs(Data) (Data)
+# define Xil_Ntohl(Data) (Data)
+#endif
+#else
+# define Xil_In16LE	Xil_In16
+# define Xil_In32LE	Xil_In32
+# define Xil_Out16LE	Xil_Out16
+# define Xil_Out32LE	Xil_Out32
+# define Xil_Htons	Xil_EndianSwap16
+# define Xil_Htonl	Xil_EndianSwap32
+# define Xil_Ntohs	Xil_EndianSwap16
+# define Xil_Ntohl	Xil_EndianSwap32
+#endif
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE u16 Xil_In16BE(UINTPTR Addr)
+#else
+static INLINE u16 Xil_In16LE(UINTPTR Addr)
+#endif
+#else
+static INLINE u16 Xil_In16BE(UINTPTR Addr)
+#endif
+{
+	u16 value = Xil_In16(Addr);
+	return Xil_EndianSwap16(value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE u32 Xil_In32BE(UINTPTR Addr)
+#else
+static INLINE u32 Xil_In32LE(UINTPTR Addr)
+#endif
+#else
+static INLINE u32 Xil_In32BE(UINTPTR Addr)
+#endif
+{
+	u32 value = Xil_In32(Addr);
+	return Xil_EndianSwap32(value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
+#else
+static INLINE void Xil_Out16LE(UINTPTR Addr, u16 Value)
+#endif
+#else
+static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
+#endif
+{
+	Value = Xil_EndianSwap16(Value);
+	Xil_Out16(Addr, Value);
+}
+
+#if defined (__MICROBLAZE__)
+#ifdef __LITTLE_ENDIAN__
+static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
+#else
+static INLINE void Xil_Out32LE(UINTPTR Addr, u32 Value)
+#endif
+#else
+static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
+#endif
+{
+	Value = Xil_EndianSwap32(Value);
+	Xil_Out32(Addr, Value);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+/**
+* @} End of "addtogroup common_io_interfacing_apis".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/common/xil_types.h b/embeddedsw/lib/bsp/standalone/src/common/xil_types.h
new file mode 100644
index 0000000..1d18bfb
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/common/xil_types.h
@@ -0,0 +1,203 @@
+/******************************************************************************
+* Copyright (c) 2010 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xil_types.h
+*
+* @addtogroup common_types Basic Data types for Xilinx® Software IP
+*
+* The xil_types.h file contains basic types for Xilinx software IP. These data types
+* are applicable for all processors supported by Xilinx.
+* @{
+* <pre>
+* MODIFICATION HISTORY:
+*
+* Ver   Who    Date   Changes
+* ----- ---- -------- -------------------------------------------------------
+* 1.00a hbm  07/14/09 First release
+* 3.03a sdm  05/30/11 Added Xuint64 typedef and XUINT64_MSW/XUINT64_LSW macros
+* 5.00 	pkp  05/29/14 Made changes for 64 bit architecture
+*	srt  07/14/14 Use standard definitions from stdint.h and stddef.h
+*		      Define LONG and ULONG datatypes and mask values
+* 7.00  mus  01/07/19 Add cpp extern macro
+* 7.1   aru  08/19/19 Shift the value in UPPER_32_BITS only if it
+*                     is 64-bit processor
+* </pre>
+*
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+#ifndef XIL_TYPES_H	/* prevent circular inclusions */
+#define XIL_TYPES_H	/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stddef.h>
+
+/************************** Constant Definitions *****************************/
+
+#ifndef TRUE
+#  define TRUE		1U
+#endif
+
+#ifndef FALSE
+#  define FALSE		0U
+#endif
+
+#ifndef NULL
+#define NULL		0U
+#endif
+
+#define XIL_COMPONENT_IS_READY     0x11111111U  /**< In device drivers, This macro will be
+                                                 assigend to "IsReady" member of driver
+												 instance to indicate that driver
+												 instance is initialized and ready to use. */
+#define XIL_COMPONENT_IS_STARTED   0x22222222U  /**< In device drivers, This macro will be assigend to
+                                                 "IsStarted" member of driver instance
+												 to indicate that driver instance is
+												 started and it can be enabled. */
+
+/* @name New types
+ * New simple types.
+ * @{
+ */
+#ifndef __KERNEL__
+#ifndef XBASIC_TYPES_H
+/*
+ * guarded against xbasic_types.h.
+ */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+/** @}*/
+#define __XUINT64__
+typedef struct
+{
+	u32 Upper;
+	u32 Lower;
+} Xuint64;
+
+
+/*****************************************************************************/
+/**
+* @brief    Return the most significant half of the 64 bit data type.
+*
+* @param    x is the 64 bit word.
+*
+* @return   The upper 32 bits of the 64 bit word.
+*
+******************************************************************************/
+#define XUINT64_MSW(x) ((x).Upper)
+
+/*****************************************************************************/
+/**
+* @brief    Return the least significant half of the 64 bit data type.
+*
+* @param    x is the 64 bit word.
+*
+* @return   The lower 32 bits of the 64 bit word.
+*
+******************************************************************************/
+#define XUINT64_LSW(x) ((x).Lower)
+
+#endif /* XBASIC_TYPES_H */
+
+/*
+ * xbasic_types.h does not typedef s* or u64
+ */
+/** @{ */
+typedef char char8;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+typedef int sint32;
+
+typedef intptr_t INTPTR;
+typedef uintptr_t UINTPTR;
+typedef ptrdiff_t PTRDIFF;
+/** @}*/
+#if !defined(LONG) || !defined(ULONG)
+typedef long LONG;
+typedef unsigned long ULONG;
+#endif
+
+#define ULONG64_HI_MASK	0xFFFFFFFF00000000U
+#define ULONG64_LO_MASK	~ULONG64_HI_MASK
+
+#else
+#include <linux/types.h>
+#endif
+
+/** @{ */
+/**
+ * This data type defines an interrupt handler for a device.
+ * The argument points to the instance of the component
+ */
+typedef void (*XInterruptHandler) (void *InstancePtr);
+
+/**
+ * This data type defines an exception handler for a processor.
+ * The argument points to the instance of the component
+ */
+typedef void (*XExceptionHandler) (void *InstancePtr);
+
+/**
+ * @brief  Returns 32-63 bits of a number.
+ * @param  n : Number being accessed.
+ * @return Bits 32-63 of number.
+ *
+ * @note    A basic shift-right of a 64- or 32-bit quantity.
+ *          Use this to suppress the "right shift count >= width of type"
+ *          warning when that quantity is 32-bits.
+ */
+#if defined (__aarch64__) || defined (__arch64__)
+#define UPPER_32_BITS(n) ((u32)(((n) >> 16) >> 16))
+#else
+#define UPPER_32_BITS(n) 0U
+#endif
+/**
+ * @brief  Returns 0-31 bits of a number
+ * @param  n : Number being accessed.
+ * @return Bits 0-31 of number
+ */
+#define LOWER_32_BITS(n) ((u32)(n))
+
+
+
+
+/************************** Constant Definitions *****************************/
+
+#ifndef TRUE
+#define TRUE		1U
+#endif
+
+#ifndef FALSE
+#define FALSE		0U
+#endif
+
+#ifndef NULL
+#define NULL		0U
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* end of protection macro */
+/**
+ *@endcond
+ */
+/**
+* @} End of "addtogroup common_types".
+*/
diff --git a/embeddedsw/lib/bsp/standalone/src/common/xstatus.h b/embeddedsw/lib/bsp/standalone/src/common/xstatus.h
new file mode 100644
index 0000000..1e9e6fb
--- /dev/null
+++ b/embeddedsw/lib/bsp/standalone/src/common/xstatus.h
@@ -0,0 +1,522 @@
+/******************************************************************************
+* Copyright (c) 2002 - 2021 Xilinx, Inc.  All rights reserved.
+* SPDX-License-Identifier: MIT
+******************************************************************************/
+
+/*****************************************************************************/
+/**
+*
+* @file xstatus.h
+*
+* @addtogroup common_status_codes Xilinx software status codes
+*
+* The xstatus.h file contains the Xilinx software status codes.These codes are
+* used throughout the Xilinx device drivers.
+*
+* @{
+******************************************************************************/
+
+/**
+ *@cond nocomments
+ */
+
+#ifndef XSTATUS_H		/* prevent circular inclusions */
+#define XSTATUS_H		/* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***************************** Include Files *********************************/
+
+#include "xil_types.h"
+#include "xil_assert.h"
+
+/************************** Constant Definitions *****************************/
+
+/*********************** Common statuses 0 - 500 *****************************/
+/**
+ at name Common Status Codes for All Device Drivers
+@{
+*/
+#define XST_SUCCESS                     0L
+#define XST_FAILURE                     1L
+#define XST_DEVICE_NOT_FOUND            2L
+#define XST_DEVICE_BLOCK_NOT_FOUND      3L
+#define XST_INVALID_VERSION             4L
+#define XST_DEVICE_IS_STARTED           5L
+#define XST_DEVICE_IS_STOPPED           6L
+#define XST_FIFO_ERROR                  7L	/*!< An error occurred during an
+						   operation with a FIFO such as
+						   an underrun or overrun, this
+						   error requires the device to
+						   be reset */
+#define XST_RESET_ERROR                 8L	/*!< An error occurred which requires
+						   the device to be reset */
+#define XST_DMA_ERROR                   9L	/*!< A DMA error occurred, this error
+						   typically requires the device
+						   using the DMA to be reset */
+#define XST_NOT_POLLED                  10L	/*!< The device is not configured for
+						   polled mode operation */
+#define XST_FIFO_NO_ROOM                11L	/*!< A FIFO did not have room to put
+						   the specified data into */
+#define XST_BUFFER_TOO_SMALL            12L	/*!< The buffer is not large enough
+						   to hold the expected data */
+#define XST_NO_DATA                     13L	/*!< There was no data available */
+#define XST_REGISTER_ERROR              14L	/*!< A register did not contain the
+						   expected value */
+#define XST_INVALID_PARAM               15L	/*!< An invalid parameter was passed
+						   into the function */
+#define XST_NOT_SGDMA                   16L	/*!< The device is not configured for
+						   scatter-gather DMA operation */
+#define XST_LOOPBACK_ERROR              17L	/*!< A loopback test failed */
+#define XST_NO_CALLBACK                 18L	/*!< A callback has not yet been
+						   registered */
+#define XST_NO_FEATURE                  19L	/*!< Device is not configured with
+						   the requested feature */
+#define XST_NOT_INTERRUPT               20L	/*!< Device is not configured for
+						   interrupt mode operation */
+#define XST_DEVICE_BUSY                 21L	/*!< Device is busy */
+#define XST_ERROR_COUNT_MAX             22L	/*!< The error counters of a device
+						   have maxed out */
+#define XST_IS_STARTED                  23L	/*!< Used when part of device is
+						   already started i.e.
+						   sub channel */
+#define XST_IS_STOPPED                  24L	/*!< Used when part of device is
+						   already stopped i.e.
+						   sub channel */
+#define XST_DATA_LOST                   26L	/*!< Driver defined error */
+#define XST_RECV_ERROR                  27L	/*!< Generic receive error */
+#define XST_SEND_ERROR                  28L	/*!< Generic transmit error */
+#define XST_NOT_ENABLED                 29L	/*!< A requested service is not
+						   available because it has not
+						   been enabled */
+#define XST_NO_ACCESS			30L	/* Generic access error */
+#define XST_TIMEOUT                     31L	/*!< Event timeout occurred */
+
+/** @} */
+/***************** Utility Component statuses 401 - 500  *********************/
+/**
+ at name Utility Component Status Codes 401 - 500
+@{
+*/
+#define XST_MEMTEST_FAILED              401L	/*!< Memory test failed */
+
+/** @} */
+/***************** Common Components statuses 501 - 1000 *********************/
+/**
+ at name Packet Fifo Status Codes 501 - 510
+@{
+*/
+/********************* Packet Fifo statuses 501 - 510 ************************/
+
+#define XST_PFIFO_LACK_OF_DATA          501L	/*!< Not enough data in FIFO   */
+#define XST_PFIFO_NO_ROOM               502L	/*!< Not enough room in FIFO   */
+#define XST_PFIFO_BAD_REG_VALUE         503L	/*!< Self test, a register value
+						   was invalid after reset */
+#define XST_PFIFO_ERROR                 504L	/*!< Generic packet FIFO error */
+#define XST_PFIFO_DEADLOCK              505L	/*!< Packet FIFO is reporting
+						 * empty and full simultaneously
+						 */
+/** @} */
+/**
+ at name DMA Status Codes 511 - 530
+@{
+*/
+/************************** DMA statuses 511 - 530 ***************************/
+
+#define XST_DMA_TRANSFER_ERROR          511L	/*!< Self test, DMA transfer
+						   failed */
+#define XST_DMA_RESET_REGISTER_ERROR    512L	/*!< Self test, a register value
+						   was invalid after reset */
+#define XST_DMA_SG_LIST_EMPTY           513L	/*!< Scatter gather list contains
+						   no buffer descriptors ready
+						   to be processed */
+#define XST_DMA_SG_IS_STARTED           514L	/*!< Scatter gather not stopped */
+#define XST_DMA_SG_IS_STOPPED           515L	/*!< Scatter gather not running */
+#define XST_DMA_SG_LIST_FULL            517L	/*!< All the buffer descriptors of
+						   the scatter gather list are
+						   being used */
+#define XST_DMA_SG_BD_LOCKED            518L	/*!< The scatter gather buffer
+						   descriptor which is to be
+						   copied over in the scatter
+						   list is locked */
+#define XST_DMA_SG_NOTHING_TO_COMMIT    519L	/*!< No buffer descriptors have been
+						   put into the scatter gather
+						   list to be committed */
+#define XST_DMA_SG_COUNT_EXCEEDED       521L	/*!< The packet count threshold
+						   specified was larger than the
+						   total # of buffer descriptors
+						   in the scatter gather list */
+#define XST_DMA_SG_LIST_EXISTS          522L	/*!< The scatter gather list has
+						   already been created */
+#define XST_DMA_SG_NO_LIST              523L	/*!< No scatter gather list has
+						   been created */
+#define XST_DMA_SG_BD_NOT_COMMITTED     524L	/*!< The buffer descriptor which was
+						   being started was not committed
+						   to the list */
+#define XST_DMA_SG_NO_DATA              525L	/*!< The buffer descriptor to start
+						   has already been used by the
+						   hardware so it can't be reused
+						 */
+#define XST_DMA_SG_LIST_ERROR           526L	/*!< General purpose list access
+						   error */
+#define XST_DMA_BD_ERROR                527L	/*!< General buffer descriptor
+						   error */
+/** @} */
+/**
+ at name IPIF Status Codes Codes 531 - 550
+@{
+*/
+/************************** IPIF statuses 531 - 550 ***************************/
+
+#define XST_IPIF_REG_WIDTH_ERROR        531L	/*!< An invalid register width
+						   was passed into the function */
+#define XST_IPIF_RESET_REGISTER_ERROR   532L	/*!< The value of a register at
+						   reset was not valid */
+#define XST_IPIF_DEVICE_STATUS_ERROR    533L	/*!< A write to the device interrupt
+						   status register did not read
+						   back correctly */
+#define XST_IPIF_DEVICE_ACK_ERROR       534L	/*!< The device interrupt status
+						   register did not reset when
+						   acked */
+#define XST_IPIF_DEVICE_ENABLE_ERROR    535L	/*!< The device interrupt enable
+						   register was not updated when
+						   other registers changed */
+#define XST_IPIF_IP_STATUS_ERROR        536L	/*!< A write to the IP interrupt
+						   status register did not read
+						   back correctly */
+#define XST_IPIF_IP_ACK_ERROR           537L	/*!< The IP interrupt status register
+						   did not reset when acked */
+#define XST_IPIF_IP_ENABLE_ERROR        538L	/*!< IP interrupt enable register was
+						   not updated correctly when other
+						   registers changed */
+#define XST_IPIF_DEVICE_PENDING_ERROR   539L	/*!< The device interrupt pending
+						   register did not indicate the
+						   expected value */
+#define XST_IPIF_DEVICE_ID_ERROR        540L	/*!< The device interrupt ID register
+						   did not indicate the expected
+						   value */
+#define XST_IPIF_ERROR                  541L	/*!< Generic ipif error */
+/** @} */
+
+/****************** Device specific statuses 1001 - 4095 *********************/
+/**
+ at name Ethernet Status Codes 1001 - 1050
+@{
+*/
+/********************* Ethernet statuses 1001 - 1050 *************************/
+
+#define XST_EMAC_MEMORY_SIZE_ERROR  1001L	/*!< Memory space is not big enough
+						 * to hold the minimum number of
+						 * buffers or descriptors */
+#define XST_EMAC_MEMORY_ALLOC_ERROR 1002L	/*!< Memory allocation failed */
+#define XST_EMAC_MII_READ_ERROR     1003L	/*!< MII read error */
+#define XST_EMAC_MII_BUSY           1004L	/*!< An MII operation is in progress */
+#define XST_EMAC_OUT_OF_BUFFERS     1005L	/*!< Driver is out of buffers */
+#define XST_EMAC_PARSE_ERROR        1006L	/*!< Invalid driver init string */
+#define XST_EMAC_COLLISION_ERROR    1007L	/*!< Excess deferral or late
+						 * collision on polled send */
+/** @} */
+/**
+ at name UART Status Codes 1051 - 1075
+@{
+*/
+/*********************** UART statuses 1051 - 1075 ***************************/
+#define XST_UART
+
+#define XST_UART_INIT_ERROR         1051L
+#define XST_UART_START_ERROR        1052L
+#define XST_UART_CONFIG_ERROR       1053L
+#define XST_UART_TEST_FAIL          1054L
+#define XST_UART_BAUD_ERROR         1055L
+#define XST_UART_BAUD_RANGE         1056L
+
+/** @} */
+/**
+ at name IIC Status Codes 1076 - 1100
+@{
+*/
+/************************ IIC statuses 1076 - 1100 ***************************/
+
+#define XST_IIC_SELFTEST_FAILED         1076	/*!< self test failed            */
+#define XST_IIC_BUS_BUSY                1077	/*!< bus found busy              */
+#define XST_IIC_GENERAL_CALL_ADDRESS    1078	/*!< mastersend attempted with   */
+					     /* general call address        */
+#define XST_IIC_STAND_REG_RESET_ERROR   1079	/*!< A non parameterizable reg   */
+					     /* value after reset not valid */
+#define XST_IIC_TX_FIFO_REG_RESET_ERROR 1080	/*!< Tx fifo included in design  */
+					     /* value after reset not valid */
+#define XST_IIC_RX_FIFO_REG_RESET_ERROR 1081	/*!< Rx fifo included in design  */
+					     /* value after reset not valid */
+#define XST_IIC_TBA_REG_RESET_ERROR     1082	/*!< 10 bit addr incl in design  */
+					     /* value after reset not valid */
+#define XST_IIC_CR_READBACK_ERROR       1083	/*!< Read of the control register */
+					     /* didn't return value written */
+#define XST_IIC_DTR_READBACK_ERROR      1084	/*!< Read of the data Tx reg     */
+					     /* didn't return value written */
+#define XST_IIC_DRR_READBACK_ERROR      1085	/*!< Read of the data Receive reg */
+					     /* didn't return value written */
+#define XST_IIC_ADR_READBACK_ERROR      1086	/*!< Read of the data Tx reg     */
+					     /* didn't return value written */
+#define XST_IIC_TBA_READBACK_ERROR      1087	/*!< Read of the 10 bit addr reg */
+					     /* didn't return written value */
+#define XST_IIC_NOT_SLAVE               1088	/*!< The device isn't a slave    */
+#define XST_IIC_ARB_LOST 				1089 	/*!< Arbitration lost for master	*/
+/** @} */
+/**
+ at name ATMC Status Codes 1101 - 1125
+@{
+*/
+/*********************** ATMC statuses 1101 - 1125 ***************************/
+
+#define XST_ATMC_ERROR_COUNT_MAX    1101L	/*!< the error counters in the ATM
+						   controller hit the max value
+						   which requires the statistics
+						   to be cleared */
+/** @} */
+/**
+ at name Flash Status Codes 1126 - 1150
+@{
+*/
+/*********************** Flash statuses 1126 - 1150 **************************/
+
+#define XST_FLASH_BUSY                1126L	/*!< Flash is erasing or programming
+						 */
+#define XST_FLASH_READY               1127L	/*!< Flash is ready for commands */
+#define XST_FLASH_ERROR               1128L	/*!< Flash had detected an internal
+						   error. Use XFlash_DeviceControl
+						   to retrieve device specific codes
+						 */
+#define XST_FLASH_ERASE_SUSPENDED     1129L	/*!< Flash is in suspended erase state
+						 */
+#define XST_FLASH_WRITE_SUSPENDED     1130L	/*!< Flash is in suspended write state
+						 */
+#define XST_FLASH_PART_NOT_SUPPORTED  1131L	/*!< Flash type not supported by
+						   driver */
+#define XST_FLASH_NOT_SUPPORTED       1132L	/*!< Operation not supported */
+#define XST_FLASH_TOO_MANY_REGIONS    1133L	/*!< Too many erase regions */
+#define XST_FLASH_TIMEOUT_ERROR       1134L	/*!< Programming or erase operation
+						   aborted due to a timeout */
+#define XST_FLASH_ADDRESS_ERROR       1135L	/*!< Accessed flash outside its
+						   addressible range */
+#define XST_FLASH_ALIGNMENT_ERROR     1136L	/*!< Write alignment error */
+#define XST_FLASH_BLOCKING_CALL_ERROR 1137L	/*!< Couldn't return immediately from
+						   write/erase function with
+						   XFL_NON_BLOCKING_WRITE/ERASE
+						   option cleared */
+#define XST_FLASH_CFI_QUERY_ERROR     1138L	/*!< Failed to query the device */
+/** @} */
+/**
+ at name SPI Status Codes 1151 - 1175
+@{
+*/
+/*********************** SPI statuses 1151 - 1175 ****************************/
+
+#define XST_SPI_MODE_FAULT          1151	/*!< master was selected as slave */
+#define XST_SPI_TRANSFER_DONE       1152	/*!< data transfer is complete */
+#define XST_SPI_TRANSMIT_UNDERRUN   1153	/*!< slave underruns transmit register */
+#define XST_SPI_RECEIVE_OVERRUN     1154	/*!< device overruns receive register */
+#define XST_SPI_NO_SLAVE            1155	/*!< no slave has been selected yet */
+#define XST_SPI_TOO_MANY_SLAVES     1156	/*!< more than one slave is being
+						 * selected */
+#define XST_SPI_NOT_MASTER          1157	/*!< operation is valid only as master */
+#define XST_SPI_SLAVE_ONLY          1158	/*!< device is configured as slave-only
+						 */
+#define XST_SPI_SLAVE_MODE_FAULT    1159	/*!< slave was selected while disabled */
+#define XST_SPI_SLAVE_MODE          1160	/*!< device has been addressed as slave */
+#define XST_SPI_RECEIVE_NOT_EMPTY   1161	/*!< device received data in slave mode */
+
+#define XST_SPI_COMMAND_ERROR       1162	/*!< unrecognised command - qspi only */
+#define XST_SPI_POLL_DONE           1163        /*!< controller completed polling the
+						   device for status */
+/** @} */
+/**
+ at name OPB Arbiter Status Codes 1176 - 1200
+@{
+*/
+/********************** OPB Arbiter statuses 1176 - 1200 *********************/
+
+#define XST_OPBARB_INVALID_PRIORITY  1176	/*!< the priority registers have either
+						 * one master assigned to two or more
+						 * priorities, or one master not
+						 * assigned to any priority
+						 */
+#define XST_OPBARB_NOT_SUSPENDED     1177	/*!< an attempt was made to modify the
+						 * priority levels without first
+						 * suspending the use of priority
+						 * levels
+						 */
+#define XST_OPBARB_PARK_NOT_ENABLED  1178	/*!< bus parking by id was enabled but
+						 * bus parking was not enabled
+						 */
+#define XST_OPBARB_NOT_FIXED_PRIORITY 1179	/*!< the arbiter must be in fixed
+						 * priority mode to allow the
+						 * priorities to be changed
+						 */
+/** @} */
+/**
+ at name INTC Status Codes 1201 - 1225
+@{
+*/
+/************************ Intc statuses 1201 - 1225 **************************/
+
+#define XST_INTC_FAIL_SELFTEST      1201	/*!< self test failed */
+#define XST_INTC_CONNECT_ERROR      1202	/*!< interrupt already in use */
+/** @} */
+/**
+ at name TmrCtr Status Codes 1226 - 1250
+@{
+*/
+/********************** TmrCtr statuses 1226 - 1250 **************************/
+
+#define XST_TMRCTR_TIMER_FAILED     1226	/*!< self test failed */
+/** @} */
+/**
+ at name WdtTb Status Codes 1251 - 1275
+@{
+*/
+/********************** WdtTb statuses 1251 - 1275 ***************************/
+
+#define XST_WDTTB_TIMER_FAILED      1251L
+/** @} */
+/**
+ at name PlbArb status Codes 1276 - 1300
+@{
+*/
+/********************** PlbArb statuses 1276 - 1300 **************************/
+
+#define XST_PLBARB_FAIL_SELFTEST    1276L
+/** @} */
+/**
+ at name Plb2Opb Status Codes 1301 - 1325
+@{
+*/
+/********************** Plb2Opb statuses 1301 - 1325 *************************/
+
+#define XST_PLB2OPB_FAIL_SELFTEST   1301L
+/** @} */
+/**
+ at name Opb2Plb Status 1326 - 1350
+@{
+*/
+/********************** Opb2Plb statuses 1326 - 1350 *************************/
+
+#define XST_OPB2PLB_FAIL_SELFTEST   1326L
+/** @} */
+/**
+ at name SysAce Status Codes 1351 - 1360
+@{
+*/
+/********************** SysAce statuses 1351 - 1360 **************************/
+
+#define XST_SYSACE_NO_LOCK          1351L	/*!< No MPU lock has been granted */
+/** @} */
+/**
+ at name PCI Bridge Status Codes 1361 - 1375
+@{
+*/
+/********************** PCI Bridge statuses 1361 - 1375 **********************/
+
+#define XST_PCI_INVALID_ADDRESS     1361L
+/** @} */
+/**
+ at name FlexRay Constants 1400 - 1409
+@{
+*/
+/********************** FlexRay constants 1400 - 1409 *************************/
+
+#define XST_FR_TX_ERROR			1400
+#define XST_FR_TX_BUSY			1401
+#define XST_FR_BUF_LOCKED		1402
+#define XST_FR_NO_BUF			1403
+/** @} */
+/**
+ at name USB constants 1410 - 1420
+@{
+*/
+/****************** USB constants 1410 - 1420  *******************************/
+
+#define XST_USB_ALREADY_CONFIGURED	1410
+#define XST_USB_BUF_ALIGN_ERROR		1411
+#define XST_USB_NO_DESC_AVAILABLE	1412
+#define XST_USB_BUF_TOO_BIG		1413
+#define XST_USB_NO_BUF			1414
+/** @} */
+/**
+ at name HWICAP constants 1421 - 1429
+@{
+*/
+/****************** HWICAP constants 1421 - 1429  *****************************/
+
+#define XST_HWICAP_WRITE_DONE		1421
+
+/** @} */
+/**
+ at name AXI VDMA constants 1430 - 1440
+@{
+*/
+/****************** AXI VDMA constants 1430 - 1440  *****************************/
+
+#define XST_VDMA_MISMATCH_ERROR		1430
+/** @} */
+/**
+ at name NAND Flash Status Codes 1441 - 1459
+@{
+*/
+/*********************** NAND Flash statuses 1441 - 1459  *********************/
+
+#define XST_NAND_BUSY			1441L	/*!< Flash is erasing or
+						 * programming
+						 */
+#define XST_NAND_READY			1442L	/*!< Flash is ready for commands
+						 */
+#define XST_NAND_ERROR			1443L	/*!< Flash had detected an
+						 * internal error.
+						 */
+#define XST_NAND_PART_NOT_SUPPORTED	1444L	/*!< Flash type not supported by
+						 * driver
+						 */
+#define XST_NAND_OPT_NOT_SUPPORTED	1445L	/*!< Operation not supported
+						 */
+#define XST_NAND_TIMEOUT_ERROR		1446L	/*!< Programming or erase
+						 * operation aborted due to a
+						 * timeout
+						 */
+#define XST_NAND_ADDRESS_ERROR		1447L	/*!< Accessed flash outside its
+						 * addressible range
+						 */
+#define XST_NAND_ALIGNMENT_ERROR	1448L	/*!< Write alignment error
+						 */
+#define XST_NAND_PARAM_PAGE_ERROR	1449L	/*!< Failed to read parameter
+						 * page of the device
+						 */
+#define XST_NAND_CACHE_ERROR		1450L	/*!< Flash page buffer error
+						 */
+
+#define XST_NAND_WRITE_PROTECTED	1451L	/*!< Flash is write protected
+						 */
+/** @} */
+
+/**************************** Type Definitions *******************************/
+
+typedef s32 XStatus;
+
+/***************** Macros (Inline Functions) Definitions *********************/
+
+
+/************************** Function Prototypes ******************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* end of protection macro */
+
+/**
+ *@endcond
+ */
+
+/**
+* @} End of "addtogroup common_status_codes".
+*/
-- 
2.30.2



More information about the devel mailing list