[rtems-libbsd commit] Import DPAA driver snapshot

Sebastian Huber sebh at rtems.org
Mon Oct 23 07:27:51 UTC 2017


Module:    rtems-libbsd
Branch:    master
Commit:    28ee86a9b0f2e980beeb637da4f787065c74a39e
Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=28ee86a9b0f2e980beeb637da4f787065c74a39e

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Wed Apr 27 11:58:19 2016 +0200

Import DPAA driver snapshot

Imported from Freescale Linux repository

git://git.freescale.com/ppc/upstream/linux.git

commit 2774c204cd8bfc56a200ff4dcdfc9cdf5b6fc161.

Linux compatibility layer is partly from FreeBSD.

---

 builder.py                                         |    2 +
 libbsd.py                                          |   44 +
 libbsd_waf.py                                      |   31 +-
 .../drivers/net/ethernet/freescale/dpaa/dpaa_eth.c |  988 +++++++
 .../drivers/net/ethernet/freescale/dpaa/dpaa_eth.h |  519 ++++
 .../net/ethernet/freescale/dpaa/dpaa_eth_common.c  | 1491 ++++++++++
 .../net/ethernet/freescale/dpaa/dpaa_eth_common.h  |  113 +
 .../net/ethernet/freescale/dpaa/dpaa_eth_sg.c      |  710 +++++
 .../net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c   |  171 ++
 .../net/ethernet/freescale/dpaa/dpaa_eth_trace.h   |  143 +
 .../net/ethernet/freescale/dpaa/dpaa_ethtool.c     |  417 +++
 .../net/ethernet/freescale/fman/crc_mac_addr_ext.h |  314 ++
 linux/drivers/net/ethernet/freescale/fman/fman.c   | 2957 +++++++++++++++++++
 linux/drivers/net/ethernet/freescale/fman/fman.h   |  500 ++++
 .../net/ethernet/freescale/fman/fman_dtsec.c       | 1786 ++++++++++++
 .../net/ethernet/freescale/fman/fman_dtsec.h       |   59 +
 .../drivers/net/ethernet/freescale/fman/fman_mac.h |  276 ++
 .../net/ethernet/freescale/fman/fman_memac.c       | 1382 +++++++++
 .../net/ethernet/freescale/fman/fman_memac.h       |   62 +
 .../net/ethernet/freescale/fman/fman_muram.c       |  124 +
 .../net/ethernet/freescale/fman/fman_muram.h       |   90 +
 .../net/ethernet/freescale/fman/fman_port.c        | 1827 ++++++++++++
 .../net/ethernet/freescale/fman/fman_port.h        |  240 ++
 .../drivers/net/ethernet/freescale/fman/fman_sp.c  |  171 ++
 .../drivers/net/ethernet/freescale/fman/fman_sp.h  |  103 +
 .../net/ethernet/freescale/fman/fman_tgec.c        |  853 ++++++
 .../net/ethernet/freescale/fman/fman_tgec.h        |   55 +
 linux/drivers/net/ethernet/freescale/fman/mac.c    | 1180 ++++++++
 linux/drivers/net/ethernet/freescale/fman/mac.h    |  147 +
 linux/drivers/soc/fsl/qbman/bman-debugfs.c         |  121 +
 linux/drivers/soc/fsl/qbman/bman.c                 |  692 +++++
 linux/drivers/soc/fsl/qbman/bman.h                 |  542 ++++
 linux/drivers/soc/fsl/qbman/bman_api.c             | 1123 ++++++++
 linux/drivers/soc/fsl/qbman/bman_portal.c          |  399 +++
 linux/drivers/soc/fsl/qbman/bman_priv.h            |  136 +
 linux/drivers/soc/fsl/qbman/bman_test.c            |   60 +
 linux/drivers/soc/fsl/qbman/bman_test.h            |   34 +
 linux/drivers/soc/fsl/qbman/bman_test_api.c        |  188 ++
 linux/drivers/soc/fsl/qbman/bman_test_thresh.c     |  216 ++
 linux/drivers/soc/fsl/qbman/bman_utils.c           |   76 +
 linux/drivers/soc/fsl/qbman/dpaa_resource.c        |  363 +++
 linux/drivers/soc/fsl/qbman/dpaa_sys.h             |  292 ++
 linux/drivers/soc/fsl/qbman/qman-debugfs.c         | 1317 +++++++++
 linux/drivers/soc/fsl/qbman/qman.c                 | 1106 +++++++
 linux/drivers/soc/fsl/qbman/qman.h                 | 1133 ++++++++
 linux/drivers/soc/fsl/qbman/qman_api.c             | 3026 ++++++++++++++++++++
 linux/drivers/soc/fsl/qbman/qman_driver.c          |   87 +
 linux/drivers/soc/fsl/qbman/qman_portal.c          |  796 +++++
 linux/drivers/soc/fsl/qbman/qman_priv.h            |  293 ++
 linux/drivers/soc/fsl/qbman/qman_test.c            |   61 +
 linux/drivers/soc/fsl/qbman/qman_test.h            |   44 +
 linux/drivers/soc/fsl/qbman/qman_test_api.c        |  222 ++
 linux/drivers/soc/fsl/qbman/qman_test_stash.c      |  540 ++++
 linux/drivers/soc/fsl/qbman/qman_utils.c           |  309 ++
 linux/include/soc/fsl/bman.h                       |  524 ++++
 linux/include/soc/fsl/qman.h                       | 1986 +++++++++++++
 rtemsbsd/include/bsp/nexus-devices.h               |   15 +-
 rtemsbsd/include/rtems/bsd/local/opt_dpaa.h        |   12 +
 rtemsbsd/powerpc/include/asm/atomic.h              |  109 +
 rtemsbsd/powerpc/include/asm/byteorder.h           |   94 +
 rtemsbsd/powerpc/include/asm/cache.h               |   37 +
 rtemsbsd/powerpc/include/asm/cacheflush.h          |    6 +
 rtemsbsd/powerpc/include/asm/fsl_pamu_stash.h      |    0
 rtemsbsd/powerpc/include/asm/mpc85xx.h             |   51 +
 rtemsbsd/powerpc/include/asm/pgtable.h             |    0
 rtemsbsd/powerpc/include/asm/types.h               |   62 +
 rtemsbsd/powerpc/include/fdt_phy.h                 |   53 +
 rtemsbsd/powerpc/include/linux/bitops.h            |  481 ++++
 rtemsbsd/powerpc/include/linux/bitrev.h            |   65 +
 rtemsbsd/powerpc/include/linux/clk.h               |    0
 rtemsbsd/powerpc/include/linux/compiler.h          |   79 +
 rtemsbsd/powerpc/include/linux/completion.h        |   67 +
 rtemsbsd/powerpc/include/linux/ctype.h             |    0
 rtemsbsd/powerpc/include/linux/debugfs.h           |    0
 rtemsbsd/powerpc/include/linux/delay.h             |    0
 rtemsbsd/powerpc/include/linux/device.h            |  501 ++++
 rtemsbsd/powerpc/include/linux/dma-mapping.h       |    0
 rtemsbsd/powerpc/include/linux/err.h               |   84 +
 rtemsbsd/powerpc/include/linux/errno.h             |   49 +
 rtemsbsd/powerpc/include/linux/etherdevice.h       |   49 +
 rtemsbsd/powerpc/include/linux/gfp.h               |  169 ++
 rtemsbsd/powerpc/include/linux/if_ether.h          |   48 +
 rtemsbsd/powerpc/include/linux/init.h              |    0
 rtemsbsd/powerpc/include/linux/interrupt.h         |   67 +
 rtemsbsd/powerpc/include/linux/io.h                |  171 ++
 rtemsbsd/powerpc/include/linux/iommu.h             |    0
 rtemsbsd/powerpc/include/linux/ioport.h            |   48 +
 rtemsbsd/powerpc/include/linux/jiffies.h           |   98 +
 rtemsbsd/powerpc/include/linux/kdev_t.h            |   38 +
 rtemsbsd/powerpc/include/linux/kernel.h            |  236 ++
 rtemsbsd/powerpc/include/linux/kobject.h           |  173 ++
 rtemsbsd/powerpc/include/linux/kref.h              |   90 +
 rtemsbsd/powerpc/include/linux/kthread.h           |  137 +
 rtemsbsd/powerpc/include/linux/list.h              |  434 +++
 rtemsbsd/powerpc/include/linux/log2.h              |  131 +
 rtemsbsd/powerpc/include/linux/module.h            |  102 +
 rtemsbsd/powerpc/include/linux/moduleparam.h       |  234 ++
 rtemsbsd/powerpc/include/linux/netdevice.h         |   57 +
 rtemsbsd/powerpc/include/linux/of.h                |   77 +
 rtemsbsd/powerpc/include/linux/of_address.h        |   44 +
 rtemsbsd/powerpc/include/linux/of_irq.h            |   44 +
 rtemsbsd/powerpc/include/linux/of_mdio.h           |    0
 rtemsbsd/powerpc/include/linux/of_net.h            |   42 +
 rtemsbsd/powerpc/include/linux/of_platform.h       |    1 +
 rtemsbsd/powerpc/include/linux/of_reserved_mem.h   |    0
 rtemsbsd/powerpc/include/linux/page.h              |   53 +
 rtemsbsd/powerpc/include/linux/percpu.h            |   64 +
 rtemsbsd/powerpc/include/linux/phy.h               |   73 +
 rtemsbsd/powerpc/include/linux/phy_fixed.h         |    0
 rtemsbsd/powerpc/include/linux/platform_device.h   |   51 +
 rtemsbsd/powerpc/include/linux/rbtree.h            |  146 +
 rtemsbsd/powerpc/include/linux/rwlock.h            |   66 +
 rtemsbsd/powerpc/include/linux/sched.h             |  125 +
 rtemsbsd/powerpc/include/linux/slab.h              |  113 +
 rtemsbsd/powerpc/include/linux/smp.h               |   57 +
 rtemsbsd/powerpc/include/linux/spinlock.h          |   65 +
 rtemsbsd/powerpc/include/linux/sysfs.h             |  194 ++
 rtemsbsd/powerpc/include/linux/threads.h           |   46 +
 rtemsbsd/powerpc/include/linux/time.h              |  131 +
 rtemsbsd/powerpc/include/linux/timer.h             |   74 +
 rtemsbsd/powerpc/include/linux/types.h             |   66 +
 rtemsbsd/powerpc/include/linux/uaccess.h           |    0
 rtemsbsd/powerpc/include/linux/vmalloc.h           |    0
 rtemsbsd/powerpc/include/linux/wait.h              |  137 +
 rtemsbsd/powerpc/include/linux/workqueue.h         |  231 ++
 rtemsbsd/sys/powerpc/compat.c                      |  297 ++
 .../net/ethernet/freescale/dpaa/if_fmanmac.c       |  801 ++++++
 .../net/ethernet/freescale/dpaa/if_fmanmac.h       |   82 +
 rtemsbsd/sys/powerpc/fdt_phy.c                     |  360 +++
 rtemsbsd/sys/powerpc/fman_muram.c                  |  116 +
 rtemsbsd/sys/powerpc/linux_compat.c                |  965 +++++++
 .../include/rtems/bsd/test/network-config.h.in     |    6 +
 132 files changed, 41012 insertions(+), 3 deletions(-)

diff --git a/builder.py b/builder.py
index c3010fa..5903777 100755
--- a/builder.py
+++ b/builder.py
@@ -188,6 +188,8 @@ def includes():
             '-Ifreebsd/contrib/expat/lib',
             '-Ifreebsd/contrib/libpcap',
             '-Ifreebsd/contrib/libxo',
+            '-Ilinux/include',
+            '-Ilinux/drivers/net/ethernet/freescale/fman',
             '-Irtemsbsd/sys',
             '-ImDNSResponder/mDNSCore',
             '-ImDNSResponder/mDNSShared',
diff --git a/libbsd.py b/libbsd.py
index bbb7aa7..b7cffe1 100644
--- a/libbsd.py
+++ b/libbsd.py
@@ -4175,7 +4175,51 @@ def mghttpd(mm):
     )
     return mod
 
+def dpaa(mm):
+    mod = builder.Module('dpaa')
+    mod.addCPUDependentLinuxSourceFiles(
+        [ 'powerpc' ],
+        [
+            'drivers/net/ethernet/freescale/dpaa/dpaa_eth.c',
+            'drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c',
+            'drivers/net/ethernet/freescale/fman/fman.c',
+            'drivers/net/ethernet/freescale/fman/fman_dtsec.c',
+            'drivers/net/ethernet/freescale/fman/fman_memac.c',
+            'drivers/net/ethernet/freescale/fman/fman_port.c',
+            'drivers/net/ethernet/freescale/fman/fman_sp.c',
+            'drivers/net/ethernet/freescale/fman/fman_tgec.c',
+            'drivers/net/ethernet/freescale/fman/mac.c',
+            'drivers/soc/fsl/qbman/bman_api.c',
+            'drivers/soc/fsl/qbman/bman.c',
+            'drivers/soc/fsl/qbman/bman_test_api.c',
+            'drivers/soc/fsl/qbman/bman_test.c',
+            'drivers/soc/fsl/qbman/bman_test_thresh.c',
+            'drivers/soc/fsl/qbman/bman_utils.c',
+            'drivers/soc/fsl/qbman/dpaa_resource.c',
+            'drivers/soc/fsl/qbman/qman_api.c',
+            'drivers/soc/fsl/qbman/qman.c',
+            'drivers/soc/fsl/qbman/qman_portal.c',
+            'drivers/soc/fsl/qbman/qman_test_api.c',
+            'drivers/soc/fsl/qbman/qman_test_stash.c',
+            'drivers/soc/fsl/qbman/qman_utils.c',
+        ],
+        mm.generator['source']()
+    )
+    mod.addCPUDependentRTEMSSourceFiles(
+        [ 'powerpc' ],
+        [
+            'sys/powerpc/compat.c',
+            'sys/powerpc/fdt_phy.c',
+            'sys/powerpc/fman_muram.c',
+            'sys/powerpc/linux_compat.c',
+            'sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c',
+        ],
+        mm.generator['source']()
+    )
+    return mod
+
 def sources(mm):
+    mm.addModule(dpaa(mm))
     mm.addModule(rtems(mm))
     mm.addModule(base(mm))
 
diff --git a/libbsd_waf.py b/libbsd_waf.py
index 420f12c..21553ba 100644
--- a/libbsd_waf.py
+++ b/libbsd_waf.py
@@ -78,6 +78,8 @@ def build(bld):
     includes += ["freebsd/contrib/expat/lib"]
     includes += ["freebsd/contrib/libpcap"]
     includes += ["freebsd/contrib/libxo"]
+    includes += ["linux/include"]
+    includes += ["linux/drivers/net/ethernet/freescale/fman"]
     includes += ["rtemsbsd/sys"]
     includes += ["mDNSResponder/mDNSCore"]
     includes += ["mDNSResponder/mDNSShared"]
@@ -2300,7 +2302,34 @@ def build(bld):
     if bld.get_env()["RTEMS_ARCH"] == "nios2":
         source += ['freebsd/sys/mips/mips/in_cksum.c']
     if bld.get_env()["RTEMS_ARCH"] == "powerpc":
-        source += ['freebsd/sys/powerpc/powerpc/in_cksum.c']
+        source += ['freebsd/sys/powerpc/powerpc/in_cksum.c',
+                   'linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c',
+                   'linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman_memac.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman_port.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman_sp.c',
+                   'linux/drivers/net/ethernet/freescale/fman/fman_tgec.c',
+                   'linux/drivers/net/ethernet/freescale/fman/mac.c',
+                   'linux/drivers/soc/fsl/qbman/bman.c',
+                   'linux/drivers/soc/fsl/qbman/bman_api.c',
+                   'linux/drivers/soc/fsl/qbman/bman_test.c',
+                   'linux/drivers/soc/fsl/qbman/bman_test_api.c',
+                   'linux/drivers/soc/fsl/qbman/bman_test_thresh.c',
+                   'linux/drivers/soc/fsl/qbman/bman_utils.c',
+                   'linux/drivers/soc/fsl/qbman/dpaa_resource.c',
+                   'linux/drivers/soc/fsl/qbman/qman.c',
+                   'linux/drivers/soc/fsl/qbman/qman_api.c',
+                   'linux/drivers/soc/fsl/qbman/qman_portal.c',
+                   'linux/drivers/soc/fsl/qbman/qman_test_api.c',
+                   'linux/drivers/soc/fsl/qbman/qman_test_stash.c',
+                   'linux/drivers/soc/fsl/qbman/qman_utils.c',
+                   'rtemsbsd/sys/powerpc/compat.c',
+                   'rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c',
+                   'rtemsbsd/sys/powerpc/fdt_phy.c',
+                   'rtemsbsd/sys/powerpc/fman_muram.c',
+                   'rtemsbsd/sys/powerpc/linux_compat.c']
     if bld.get_env()["RTEMS_ARCH"] == "sh":
         source += ['freebsd/sys/mips/mips/in_cksum.c']
     if bld.get_env()["RTEMS_ARCH"] == "sparc":
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
new file mode 100644
index 0000000..73173b8
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -0,0 +1,988 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#ifndef __rtems__
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/net.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/percpu.h>
+#include <linux/dma-mapping.h>
+#endif /* __rtems__ */
+#include <soc/fsl/bman.h>
+
+#include "fman.h"
+#include "fman_port.h"
+
+#include "mac.h"
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa_eth_trace.h"
+
+#define DPA_NAPI_WEIGHT		64
+
+/* Valid checksum indication */
+#define DPA_CSUM_VALID		0xFFFF
+
+#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
+
+#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
+/* Ingress congestion threshold on FMan ports
+ * The size in bytes of the ingress tail-drop threshold on FMan ports.
+ * Traffic piling up above this value will be rejected by QMan and discarded
+ * by FMan.
+ */
+
+#ifndef __rtems__
+static u8 debug = -1;
+module_param(debug, byte, S_IRUGO);
+MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
+
+/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
+static u16 tx_timeout = 1000;
+module_param(tx_timeout, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+#endif /* __rtems__ */
+
+/* BM */
+
+#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
+
+static u8 dpa_priv_common_bpid;
+
+static void _dpa_rx_error(struct net_device *net_dev,
+			  const struct dpa_priv_s *priv,
+			  struct dpa_percpu_priv_s *percpu_priv,
+			  const struct qm_fd *fd,
+			  u32 fqid)
+{
+	/* limit common, possibly innocuous Rx FIFO Overflow errors'
+	 * interference with zero-loss convergence benchmark results.
+	 */
+	if (likely(fd->status & FM_FD_ERR_PHYSICAL))
+		pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");
+	else
+#ifndef __rtems__
+		if (net_ratelimit())
+			netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
+				  fd->status & FM_FD_STAT_RX_ERRORS);
+#else /* __rtems__ */
+		BSD_ASSERT(0);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+	percpu_priv->stats.rx_errors++;
+#endif /* __rtems__ */
+
+	if (fd->status & FM_FD_ERR_DMA)
+		percpu_priv->rx_errors.dme++;
+	if (fd->status & FM_FD_ERR_PHYSICAL)
+		percpu_priv->rx_errors.fpe++;
+	if (fd->status & FM_FD_ERR_SIZE)
+		percpu_priv->rx_errors.fse++;
+	if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
+		percpu_priv->rx_errors.phe++;
+
+	dpa_fd_release(net_dev, fd);
+}
+
+static void _dpa_tx_error(struct net_device *net_dev,
+			  const struct dpa_priv_s *priv,
+			  struct dpa_percpu_priv_s *percpu_priv,
+			  const struct qm_fd *fd,
+			  u32 fqid)
+{
+#ifndef __rtems__
+	struct sk_buff *skb;
+
+	if (net_ratelimit())
+		netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+			   fd->status & FM_FD_STAT_TX_ERRORS);
+
+	percpu_priv->stats.tx_errors++;
+#else /* __rtems__ */
+	struct ifnet *ifp = net_dev->ifp;
+
+	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+#endif /* __rtems__ */
+
+	/* If we intended the buffers from this frame to go into the bpools
+	 * when the FMan transmit was done, we need to put it in manually.
+	 */
+	if (fd->bpid != 0xff) {
+		dpa_fd_release(net_dev, fd);
+		return;
+	}
+
+#ifndef __rtems__
+	skb = _dpa_cleanup_tx_fd(priv, fd);
+	dev_kfree_skb(skb);
+#else /* __rtems__ */
+	_dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static int dpaa_eth_poll(struct napi_struct *napi, int budget)
+{
+	struct dpa_napi_portal *np =
+			container_of(napi, struct dpa_napi_portal, napi);
+
+	int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+	if (cleaned < budget) {
+		int tmp;
+
+		napi_complete(napi);
+		tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+		DPA_ERR_ON(tmp);
+	}
+
+	return cleaned;
+}
+#endif /* __rtems__ */
+
+static void _dpa_tx_conf(struct net_device *net_dev,
+			 const struct dpa_priv_s *priv,
+			 struct dpa_percpu_priv_s *percpu_priv,
+			 const struct qm_fd *fd,
+			 u32 fqid)
+{
+#ifndef __rtems__
+	struct sk_buff	*skb;
+
+	if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+		if (net_ratelimit())
+			netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+				   fd->status & FM_FD_STAT_TX_ERRORS);
+
+		percpu_priv->stats.tx_errors++;
+	}
+
+	percpu_priv->tx_confirm++;
+
+	skb = _dpa_cleanup_tx_fd(priv, fd);
+
+	dev_kfree_skb(skb);
+#else /* __rtems__ */
+	struct ifnet *ifp = net_dev->ifp;
+
+	if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
+		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+	}
+
+	_dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+static enum qman_cb_dqrr_result
+priv_rx_error_dqrr(struct qman_portal *portal,
+		   struct qman_fq *fq,
+		   const struct qm_dqrr_entry *dq)
+{
+	struct net_device *net_dev;
+	struct dpa_priv_s *priv;
+	struct dpa_percpu_priv_s *percpu_priv;
+	int *count_ptr;
+
+	net_dev = ((struct dpa_fq *)fq)->net_dev;
+	priv = netdev_priv(net_dev);
+
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+	count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+	if (dpaa_eth_napi_schedule(percpu_priv, portal))
+		return qman_cb_dqrr_stop;
+
+	if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
+		/* Unable to refill the buffer pool due to insufficient
+		 * system memory. Just release the frame back into the pool,
+		 * otherwise we'll soon end up with an empty buffer pool.
+		 */
+		dpa_fd_release(net_dev, &dq->fd);
+	else
+		_dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_rx_default_dqrr(struct qman_portal *portal,
+		     struct qman_fq *fq,
+		     const struct qm_dqrr_entry *dq)
+{
+	struct net_device *net_dev;
+	struct dpa_priv_s *priv;
+	struct dpa_percpu_priv_s *percpu_priv;
+	int *count_ptr;
+	struct dpa_bp *dpa_bp;
+
+	net_dev = ((struct dpa_fq *)fq)->net_dev;
+	priv = netdev_priv(net_dev);
+	dpa_bp = priv->dpa_bp;
+
+#ifndef __rtems__
+	/* Trace the Rx fd */
+	trace_dpa_rx_fd(net_dev, fq, &dq->fd);
+#endif /* __rtems__ */
+
+	/* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+	count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
+
+	if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+		return qman_cb_dqrr_stop;
+
+	/* Vale of plenty: make sure we didn't run out of buffers */
+
+	if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
+#ifdef __rtems__
+	{
+		struct ifnet *ifp = net_dev->ifp;
+		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+#endif /* __rtems__ */
+		/* Unable to refill the buffer pool due to insufficient
+		 * system memory. Just release the frame back into the pool,
+		 * otherwise we'll soon end up with an empty buffer pool.
+		 */
+		dpa_fd_release(net_dev, &dq->fd);
+#ifdef __rtems__
+	}
+#endif /* __rtems__ */
+	else
+		_dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
+			count_ptr);
+
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_tx_conf_error_dqrr(struct qman_portal *portal,
+			struct qman_fq *fq,
+			const struct qm_dqrr_entry *dq)
+{
+	struct net_device *net_dev;
+	struct dpa_priv_s *priv;
+	struct dpa_percpu_priv_s *percpu_priv;
+
+	net_dev = ((struct dpa_fq *)fq)->net_dev;
+	priv = netdev_priv(net_dev);
+
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+	if (dpaa_eth_napi_schedule(percpu_priv, portal))
+		return qman_cb_dqrr_stop;
+
+	_dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+priv_tx_conf_default_dqrr(struct qman_portal *portal,
+			  struct qman_fq *fq,
+			  const struct qm_dqrr_entry *dq)
+{
+	struct net_device *net_dev;
+	struct dpa_priv_s *priv;
+	struct dpa_percpu_priv_s *percpu_priv;
+
+	net_dev = ((struct dpa_fq *)fq)->net_dev;
+	priv = netdev_priv(net_dev);
+
+#ifndef __rtems__
+	/* Trace the fd */
+	trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
+#endif /* __rtems__ */
+
+	/* Non-migratable context, safe to use raw_cpu_ptr */
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+	if (dpaa_eth_napi_schedule(percpu_priv, portal))
+		return qman_cb_dqrr_stop;
+
+	_dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+
+	return qman_cb_dqrr_consume;
+}
+
+static void priv_ern(struct qman_portal *portal,
+		     struct qman_fq *fq,
+		     const struct qm_mr_entry *msg)
+{
+	struct net_device *net_dev;
+	const struct dpa_priv_s *priv;
+#ifndef __rtems__
+	struct sk_buff *skb;
+#else /* __rtems__ */
+	struct ifnet *ifp;
+#endif /* __rtems__ */
+	struct dpa_percpu_priv_s *percpu_priv;
+	const struct qm_fd *fd = &msg->ern.fd;
+
+	net_dev = ((struct dpa_fq *)fq)->net_dev;
+	priv = netdev_priv(net_dev);
+	/* Non-migratable context, safe to use raw_cpu_ptr */
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+#ifndef __rtems__
+	percpu_priv->stats.tx_dropped++;
+	percpu_priv->stats.tx_fifo_errors++;
+#else /* __rtems__ */
+	ifp = net_dev->ifp;
+	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+#endif /* __rtems__ */
+	count_ern(percpu_priv, msg);
+
+	/* If we intended this buffer to go into the pool
+	 * when the FM was done, we need to put it in
+	 * manually.
+	 */
+	if (msg->ern.fd.bpid != 0xff) {
+		dpa_fd_release(net_dev, fd);
+		return;
+	}
+
+#ifndef __rtems__
+	skb = _dpa_cleanup_tx_fd(priv, fd);
+	dev_kfree_skb_any(skb);
+#else /* __rtems__ */
+	_dpa_cleanup_tx_fd(ifp, fd);
+#endif /* __rtems__ */
+}
+
+static const struct dpa_fq_cbs_t private_fq_cbs = {
+	.rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
+	.tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
+	.rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
+	.tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
+	.egress_ern = { .cb = { .ern = priv_ern } }
+};
+
+static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
+{
+#ifndef __rtems__
+	struct dpa_percpu_priv_s *percpu_priv;
+	int i, j;
+
+	for_each_possible_cpu(i) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+		for (j = 0; j < qman_portal_max; j++)
+			napi_enable(&percpu_priv->np[j].napi);
+	}
+#endif /* __rtems__ */
+}
+
+static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
+{
+#ifndef __rtems__
+	struct dpa_percpu_priv_s *percpu_priv;
+	int i, j;
+
+	for_each_possible_cpu(i) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+		for (j = 0; j < qman_portal_max; j++)
+			napi_disable(&percpu_priv->np[j].napi);
+	}
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static int dpa_eth_priv_start(struct net_device *net_dev)
+#else /* __rtems__ */
+int dpa_eth_priv_start(struct net_device *net_dev)
+#endif /* __rtems__ */
+{
+	int err;
+	struct dpa_priv_s *priv;
+
+	priv = netdev_priv(net_dev);
+
+	dpaa_eth_napi_enable(priv);
+
+	err = dpa_start(net_dev);
+	if (err < 0)
+		dpaa_eth_napi_disable(priv);
+
+	return err;
+}
+
+#ifndef __rtems__
+static int dpa_eth_priv_stop(struct net_device *net_dev)
+#else /* __rtems__ */
+int dpa_eth_priv_stop(struct net_device *net_dev)
+#endif /* __rtems__ */
+{
+	int err;
+	struct dpa_priv_s *priv;
+
+	err = dpa_stop(net_dev);
+	/* Allow NAPI to consume any frame still in the Rx/TxConfirm
+	 * ingress queues. This is to avoid a race between the current
+	 * context and ksoftirqd which could leave NAPI disabled while
+	 * in fact there's still Rx traffic to be processed.
+	 */
+	usleep_range(5000, 10000);
+
+	priv = netdev_priv(net_dev);
+	dpaa_eth_napi_disable(priv);
+
+	return err;
+}
+
+#ifndef __rtems__
+static const struct net_device_ops dpa_private_ops = {
+	.ndo_open = dpa_eth_priv_start,
+	.ndo_start_xmit = dpa_tx,
+	.ndo_stop = dpa_eth_priv_stop,
+	.ndo_tx_timeout = dpa_timeout,
+	.ndo_get_stats64 = dpa_get_stats64,
+	.ndo_set_mac_address = dpa_set_mac_address,
+	.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+	.ndo_select_queue = dpa_select_queue,
+#endif
+	.ndo_change_mtu = dpa_change_mtu,
+	.ndo_set_rx_mode = dpa_set_rx_mode,
+	.ndo_init = dpa_ndo_init,
+	.ndo_set_features = dpa_set_features,
+	.ndo_fix_features = dpa_fix_features,
+};
+#endif /* __rtems__ */
+
+static int dpa_private_napi_add(struct net_device *net_dev)
+{
+#ifndef __rtems__
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	struct dpa_percpu_priv_s *percpu_priv;
+	int i, cpu;
+
+	for_each_possible_cpu(cpu) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+		percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
+			qman_portal_max * sizeof(struct dpa_napi_portal),
+			GFP_KERNEL);
+
+		if (!percpu_priv->np)
+			return -ENOMEM;
+
+		for (i = 0; i < qman_portal_max; i++)
+			netif_napi_add(net_dev, &percpu_priv->np[i].napi,
+				       dpaa_eth_poll, DPA_NAPI_WEIGHT);
+	}
+#endif /* __rtems__ */
+
+	return 0;
+}
+
+void dpa_private_napi_del(struct net_device *net_dev)
+{
+#ifndef __rtems__
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	struct dpa_percpu_priv_s *percpu_priv;
+	int i, cpu;
+
+	for_each_possible_cpu(cpu) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
+
+		if (percpu_priv->np) {
+			for (i = 0; i < qman_portal_max; i++)
+				netif_napi_del(&percpu_priv->np[i].napi);
+
+			devm_kfree(net_dev->dev.parent, percpu_priv->np);
+		}
+	}
+#endif /* __rtems__ */
+}
+
+static int dpa_private_netdev_init(struct net_device *net_dev)
+{
+	int i;
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	struct dpa_percpu_priv_s *percpu_priv;
+#ifndef __rtems__
+	const u8 *mac_addr;
+#endif /* __rtems__ */
+
+	/* Although we access another CPU's private data here
+	 * we do it at initialization so it is safe
+	 */
+#ifndef __rtems__
+	for_each_possible_cpu(i) {
+#else /* __rtems__ */
+	for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+		percpu_priv->net_dev = net_dev;
+	}
+
+#ifndef __rtems__
+	net_dev->netdev_ops = &dpa_private_ops;
+	mac_addr = priv->mac_dev->addr;
+
+	net_dev->mem_start = priv->mac_dev->res->start;
+	net_dev->mem_end = priv->mac_dev->res->end;
+
+	net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_LLTX);
+
+	/* Advertise S/G and HIGHDMA support for private interfaces */
+	net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
+	/* Recent kernels enable GSO automatically, if
+	 * we declare NETIF_F_SG. For conformity, we'll
+	 * still declare GSO explicitly.
+	 */
+	net_dev->features |= NETIF_F_GSO;
+
+	return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
+#else /* __rtems__ */
+	return 0;
+#endif /* __rtems__ */
+}
+
+static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
+{
+	struct dpa_bp *dpa_bp;
+
+	dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
+	if (!dpa_bp)
+		return ERR_PTR(-ENOMEM);
+
+	dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
+	dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
+
+	dpa_bp->seed_cb = dpa_bp_priv_seed;
+	dpa_bp->free_buf_cb = _dpa_bp_free_pf;
+
+	return dpa_bp;
+}
+
+/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
+ * We won't be sending congestion notifications to FMan; for now, we just use
+ * this CGR to generate enqueue rejections to FMan in order to drop the frames
+ * before they reach our ingress queues and eat up memory.
+ */
+static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
+{
+	struct qm_mcc_initcgr initcgr;
+	u32 cs_th;
+	int err;
+
+	err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
+	if (err < 0) {
+		pr_err("Error %d allocating CGR ID\n", err);
+		goto out_error;
+	}
+
+	/* Enable CS TD, but disable Congestion State Change Notifications. */
+	initcgr.we_mask = QM_CGR_WE_CS_THRES;
+	initcgr.cgr.cscn_en = QM_CGR_EN;
+	cs_th = DPAA_INGRESS_CS_THRESHOLD;
+	qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+	initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+	initcgr.cgr.cstd_en = QM_CGR_EN;
+
+	/* This is actually a hack, because this CGR will be associated with
+	 * our affine SWP. However, we'll place our ingress FQs in it.
+	 */
+	err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
+			      &initcgr);
+	if (err < 0) {
+		pr_err("Error %d creating ingress CGR with ID %d\n", err,
+		       priv->ingress_cgr.cgrid);
+		qman_release_cgrid(priv->ingress_cgr.cgrid);
+		goto out_error;
+	}
+	pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
+		 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
+
+	/* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
+	 * range), but we have no common initialization path between the
+	 * different variants of the DPAA Eth driver, so we do it here rather
+	 * than modifying every other variant than "private Eth".
+	 */
+	priv->use_ingress_cgr = true;
+
+out_error:
+	return err;
+}
+
+static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
+			      size_t count)
+{
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	int i;
+
+	netif_dbg(priv, probe, net_dev,
+		  "Using private BM buffer pools\n");
+
+	priv->bp_count = count;
+
+	for (i = 0; i < count; i++) {
+		int err;
+
+		err = dpa_bp_alloc(&dpa_bp[i]);
+		if (err < 0) {
+			dpa_bp_free(priv);
+			priv->dpa_bp = NULL;
+			return err;
+		}
+
+		priv->dpa_bp = &dpa_bp[i];
+	}
+
+	dpa_priv_common_bpid = priv->dpa_bp->bpid;
+	return 0;
+}
+
+#ifndef __rtems__
+static const struct of_device_id dpa_match[];
+
+static int
+dpaa_eth_priv_probe(struct platform_device *pdev)
+#else /* __rtems__ */
+int
+dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
+#endif /* __rtems__ */
+{
+	int err = 0, i, channel;
+	struct device *dev;
+	struct dpa_bp *dpa_bp;
+	struct dpa_fq *dpa_fq, *tmp;
+	size_t count = 1;
+	struct net_device *net_dev = NULL;
+	struct dpa_priv_s *priv = NULL;
+	struct dpa_percpu_priv_s *percpu_priv;
+	struct fm_port_fqs port_fqs;
+	struct dpa_buffer_layout_s *buf_layout = NULL;
+#ifndef __rtems__
+	struct mac_device *mac_dev;
+	struct task_struct *kth;
+#endif /* __rtems__ */
+
+	dev = &pdev->dev;
+
+	/* Get the buffer pool assigned to this interface;
+	 * run only once the default pool probing code
+	 */
+	dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
+			dpa_priv_bp_probe(dev);
+	if (IS_ERR(dpa_bp))
+		return PTR_ERR(dpa_bp);
+
+#ifndef __rtems__
+	/* Allocate this early, so we can store relevant information in
+	 * the private area
+	 */
+	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
+	if (!net_dev) {
+		dev_err(dev, "alloc_etherdev_mq() failed\n");
+		goto alloc_etherdev_mq_failed;
+	}
+#else /* __rtems__ */
+	net_dev = &mac_dev->net_dev;
+	net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
+#endif /* __rtems__ */
+
+#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME
+	snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",
+		 dpa_mac_fman_index_get(pdev),
+		 dpa_mac_hw_index_get(pdev));
+#endif
+
+	/* Do this here, so we can be verbose early */
+#ifndef __rtems__
+	SET_NETDEV_DEV(net_dev, dev);
+#endif /* __rtems__ */
+	dev_set_drvdata(dev, net_dev);
+
+	priv = netdev_priv(net_dev);
+	priv->net_dev = net_dev;
+
+#ifndef __rtems__
+	priv->msg_enable = netif_msg_init(debug, -1);
+
+	mac_dev = dpa_mac_dev_get(pdev);
+	if (IS_ERR(mac_dev) || !mac_dev) {
+		err = PTR_ERR(mac_dev);
+		goto mac_probe_failed;
+	}
+#endif /* __rtems__ */
+
+	/* We have physical ports, so we need to establish
+	 * the buffer layout.
+	 */
+	buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
+				  GFP_KERNEL);
+	if (!buf_layout)
+		goto alloc_failed;
+
+	dpa_set_buffers_layout(mac_dev, buf_layout);
+
+	/* For private ports, need to compute the size of the default
+	 * buffer pool, based on FMan port buffer layout;also update
+	 * the maximum buffer size for private ports if necessary
+	 */
+	dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
+
+	INIT_LIST_HEAD(&priv->dpa_fq_list);
+
+	memset(&port_fqs, 0, sizeof(port_fqs));
+
+	err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
+	if (!err)
+		err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
+				       &port_fqs, true, TX);
+
+	if (err < 0)
+		goto fq_probe_failed;
+
+	/* bp init */
+
+	err = dpa_priv_bp_create(net_dev, dpa_bp, count);
+
+	if (err < 0)
+		goto bp_create_failed;
+
+	priv->mac_dev = mac_dev;
+
+	channel = dpa_get_channel();
+
+	if (channel < 0) {
+		err = channel;
+		goto get_channel_failed;
+	}
+
+	priv->channel = (u16)channel;
+
+#ifndef __rtems__
+	/* Start a thread that will walk the cpus with affine portals
+	 * and add this pool channel to each's dequeue mask.
+	 */
+	kth = kthread_run(dpaa_eth_add_channel,
+			  (void *)(unsigned long)priv->channel,
+			  "dpaa_%p:%d", net_dev, priv->channel);
+	if (!kth) {
+		err = -ENOMEM;
+		goto add_channel_failed;
+	}
+#else /* __rtems__ */
+	dpaa_eth_add_channel((void *)(unsigned long)priv->channel);
+#endif /* __rtems__ */
+
+	dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]);
+
+	/* Create a congestion group for this netdev, with
+	 * dynamically-allocated CGR ID.
+	 * Must be executed after probing the MAC, but before
+	 * assigning the egress FQs to the CGRs.
+	 */
+	err = dpaa_eth_cgr_init(priv);
+	if (err < 0) {
+		dev_err(dev, "Error initializing CGR\n");
+		goto tx_cgr_init_failed;
+	}
+	err = dpaa_eth_priv_ingress_cgr_init(priv);
+	if (err < 0) {
+		dev_err(dev, "Error initializing ingress CGR\n");
+		goto rx_cgr_init_failed;
+	}
+
+	/* Add the FQs to the interface, and make them active */
+	list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
+		err = dpa_fq_init(dpa_fq, false);
+		if (err < 0)
+			goto fq_alloc_failed;
+	}
+
+	priv->buf_layout = buf_layout;
+	priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
+	priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
+
+	/* All real interfaces need their ports initialized */
+	dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
+			    buf_layout, dev);
+
+	priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
+
+	if (!priv->percpu_priv) {
+		dev_err(dev, "devm_alloc_percpu() failed\n");
+		err = -ENOMEM;
+		goto alloc_percpu_failed;
+	}
+#ifndef __rtems__
+	for_each_possible_cpu(i) {
+#else /* __rtems__ */
+	for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+		memset(percpu_priv, 0, sizeof(*percpu_priv));
+	}
+
+	/* Initialize NAPI */
+	err = dpa_private_napi_add(net_dev);
+
+	if (err < 0)
+		goto napi_add_failed;
+
+	err = dpa_private_netdev_init(net_dev);
+
+	if (err < 0)
+		goto netdev_init_failed;
+
+#ifndef __rtems__
+	dpaa_eth_sysfs_init(&net_dev->dev);
+
+	pr_info("Probed interface %s\n", net_dev->name);
+#endif /* __rtems__ */
+
+	return 0;
+
+netdev_init_failed:
+napi_add_failed:
+	dpa_private_napi_del(net_dev);
+alloc_percpu_failed:
+#ifndef __rtems__
+	dpa_fq_free(dev, &priv->dpa_fq_list);
+#endif /* __rtems__ */
+fq_alloc_failed:
+#ifndef __rtems__
+	qman_delete_cgr_safe(&priv->ingress_cgr);
+	qman_release_cgrid(priv->ingress_cgr.cgrid);
+#endif /* __rtems__ */
+rx_cgr_init_failed:
+#ifndef __rtems__
+	qman_delete_cgr_safe(&priv->cgr_data.cgr);
+	qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+#endif /* __rtems__ */
+tx_cgr_init_failed:
+#ifndef __rtems__
+add_channel_failed:
+#endif /* __rtems__ */
+get_channel_failed:
+	dpa_bp_free(priv);
+bp_create_failed:
+fq_probe_failed:
+alloc_failed:
+#ifndef __rtems__
+mac_probe_failed:
+#endif /* __rtems__ */
+	dev_set_drvdata(dev, NULL);
+#ifndef __rtems__
+	free_netdev(net_dev);
+alloc_etherdev_mq_failed:
+	if (atomic_read(&dpa_bp->refs) == 0)
+		devm_kfree(dev, dpa_bp);
+#else /* __rtems__ */
+	BSD_ASSERT(0);
+#endif /* __rtems__ */
+
+	return err;
+}
+
+#ifndef __rtems__
+static struct platform_device_id dpa_devtype[] = {
+	{
+		.name = "dpaa-ethernet",
+		.driver_data = 0,
+	}, {
+	}
+};
+MODULE_DEVICE_TABLE(platform, dpa_devtype);
+
+static struct platform_driver dpa_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+	},
+	.id_table = dpa_devtype,
+	.probe = dpaa_eth_priv_probe,
+	.remove = dpa_remove
+};
+
+static int __init dpa_load(void)
+{
+	int err;
+
+	pr_info(DPA_DESCRIPTION "\n");
+
+	/* initialise dpaa_eth mirror values */
+	dpa_rx_extra_headroom = fman_get_rx_extra_headroom();
+	dpa_max_frm = fman_get_max_frm();
+
+	err = platform_driver_register(&dpa_driver);
+	if (err < 0)
+		pr_err("Error, platform_driver_register() = %d\n", err);
+
+	return err;
+}
+module_init(dpa_load);
+
+static void __exit dpa_unload(void)
+{
+	platform_driver_unregister(&dpa_driver);
+
+	/* Only one channel is used and needs to be relased after all
+	 * interfaces are removed
+	 */
+	dpa_release_channel();
+}
+module_exit(dpa_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Andy Fleming <afleming at freescale.com>");
+MODULE_DESCRIPTION(DPA_DESCRIPTION);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
new file mode 100644
index 0000000..11b11e6
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -0,0 +1,519 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPA_H
+#define __DPA_H
+
+#include <linux/netdevice.h>
+#include <soc/fsl/qman.h>
+
+#include "fman.h"
+#include "mac.h"
+#include "dpaa_eth_trace.h"
+
+#ifndef __rtems__
+extern int dpa_rx_extra_headroom;
+extern int dpa_max_frm;
+
+#define dpa_get_rx_extra_headroom() dpa_rx_extra_headroom
+#define dpa_get_max_frm() dpa_max_frm
+#else /* __rtems__ */
+#define dpa_get_rx_extra_headroom fman_get_rx_extra_headroom
+#define dpa_get_max_frm fman_get_max_frm
+#endif /* __rtems__ */
+
+#define dpa_get_max_mtu()	\
+	(dpa_get_max_frm() - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+
+/* Simple enum of FQ types - used for array indexing */
+enum port_type {RX, TX};
+
+struct dpa_buffer_layout_s {
+	u16 priv_data_size;
+	bool parse_results;
+	bool time_stamp;
+	bool hash_results;
+	u16 data_align;
+};
+
+#define DPA_ERR_ON(cond)
+
+#define DPA_TX_PRIV_DATA_SIZE	16
+#define DPA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+#define DPA_TIME_STAMP_SIZE 8
+#define DPA_HASH_RESULTS_SIZE 8
+#define DPA_RX_PRIV_DATA_SIZE	(DPA_TX_PRIV_DATA_SIZE + \
+					dpa_get_rx_extra_headroom())
+
+#define FM_FD_STAT_RX_ERRORS						\
+	(FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL	| \
+	 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
+	 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME	| \
+	 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
+	 FM_FD_ERR_PRS_HDR_ERR)
+
+#define FM_FD_STAT_TX_ERRORS \
+	(FM_FD_ERR_UNSUPPORTED_FORMAT | \
+	 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
+
+/* The raw buffer size must be cacheline aligned.
+ * Normally we use 2K buffers.
+ */
+#define DPA_BP_RAW_SIZE		2048
+
+/* This is what FMan is ever allowed to use.
+ * FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
+ * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
+ * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
+ * half-page-aligned buffers (can we?), so we reserve some more space
+ * for start-of-buffer alignment.
+ */
+#ifndef __rtems__
+#define dpa_bp_size(buffer_layout)	(SKB_WITH_OVERHEAD(DPA_BP_RAW_SIZE) - \
+						SMP_CACHE_BYTES)
+#else /* __rtems__ */
+/*
+ * FIXME: 4 bytes would be enough for the mbuf pointer.  However, jumbo receive
+ * frames overwrite this area if < 64 bytes.
+ */
+#define DPA_OUT_OF_BAND_SIZE 64
+#define DPA_MBUF_POINTER_OFFSET (DPA_BP_RAW_SIZE - DPA_OUT_OF_BAND_SIZE)
+#define dpa_bp_size(buffer_layout) DPA_MBUF_POINTER_OFFSET
+#endif /* __rtems__ */
+/* We must ensure that skb_shinfo is always cacheline-aligned. */
+#define DPA_SKB_SIZE(size)	((size) & ~(SMP_CACHE_BYTES - 1))
+
+/* Largest value that the FQD's OAL field can hold.
+ * This is DPAA-1.x specific.
+ */
+#define FSL_QMAN_MAX_OAL	127
+
+/* Default alignment for start of data in an Rx FD */
+#define DPA_FD_DATA_ALIGNMENT  16
+
+/* Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define FM_L3_PARSE_RESULT_IPV4	0x8000
+/* L3 Type field: First IP Present IPv6 */
+#define FM_L3_PARSE_RESULT_IPV6	0x4000
+
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define FM_L4_PARSE_RESULT_UDP	0x40
+/* L4 Type field: TCP */
+#define FM_L4_PARSE_RESULT_TCP	0x20
+
+/* number of Tx queues to FMan */
+#define DPAA_ETH_TX_QUEUES	NR_CPUS
+
+#define DPAA_ETH_RX_QUEUES	128
+
+#define FSL_DPAA_ETH_MAX_BUF_COUNT	128
+#define FSL_DPAA_ETH_REFILL_THRESHOLD	80
+
+/* More detailed FQ types - used for fine-grained WQ assignments */
+enum dpa_fq_type {
+	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
+	FQ_TYPE_RX_ERROR,	/* Rx Error FQs */
+	FQ_TYPE_RX_PCD,		/* User-defined PCDs */
+	FQ_TYPE_TX,		/* "Real" Tx FQs */
+	FQ_TYPE_TX_CONFIRM,	/* Tx default Conf FQ (actually an Rx FQ) */
+	FQ_TYPE_TX_CONF_MQ,	/* Tx conf FQs (one for each Tx FQ) */
+	FQ_TYPE_TX_ERROR,	/* Tx Error FQs (these are actually Rx FQs) */
+};
+
+struct dpa_fq {
+	struct qman_fq fq_base;
+	struct list_head list;
+	struct net_device *net_dev;
+	bool init;
+	u32 fqid;
+	u32 flags;
+	u16 channel;
+	u8 wq;
+	enum dpa_fq_type fq_type;
+};
+
+struct dpa_fq_cbs_t {
+	struct qman_fq rx_defq;
+	struct qman_fq tx_defq;
+	struct qman_fq rx_errq;
+	struct qman_fq tx_errq;
+	struct qman_fq egress_ern;
+};
+
+struct fqid_cell {
+	u32 start;
+	u32 count;
+};
+
+struct dpa_bp {
+	struct bman_pool *pool;
+	u8 bpid;
+#ifndef __rtems__
+	struct device *dev;
+#endif /* __rtems__ */
+	/* the buffer pools used for the private ports are initialized
+	 * with config_count buffers for each CPU; at runtime the
+	 * number of buffers per CPU is constantly brought back to this
+	 * level
+	 */
+	int config_count;
+	size_t size;
+	bool seed_pool;
+	/* physical address of the contiguous memory used by the pool to store
+	 * the buffers
+	 */
+	dma_addr_t paddr;
+	/* virtual address of the contiguous memory used by the pool to store
+	 * the buffers
+	 */
+	void __iomem *vaddr;
+	/* current number of buffers in the bpool alloted to this CPU */
+	int __percpu *percpu_count;
+	atomic_t refs;
+	/* some bpools need to be seeded before use by this cb */
+	int (*seed_cb)(struct dpa_bp *);
+	/* some bpools need to be emptied before freeing; this cb is used
+	 * for freeing of individual buffers taken from the pool
+	 */
+	void (*free_buf_cb)(void *addr);
+};
+
+struct dpa_rx_errors {
+	u64 dme;		/* DMA Error */
+	u64 fpe;		/* Frame Physical Error */
+	u64 fse;		/* Frame Size Error */
+	u64 phe;		/* Header Error */
+};
+
+/* Counters for QMan ERN frames - one counter per rejection code */
+struct dpa_ern_cnt {
+	u64 cg_tdrop;		/* Congestion group taildrop */
+	u64 wred;		/* WRED congestion */
+	u64 err_cond;		/* Error condition */
+	u64 early_window;	/* Order restoration, frame too early */
+	u64 late_window;	/* Order restoration, frame too late */
+	u64 fq_tdrop;		/* FQ taildrop */
+	u64 fq_retired;		/* FQ is retired */
+	u64 orp_zero;		/* ORP disabled */
+};
+
+struct dpa_napi_portal {
+#ifndef __rtems__
+	struct napi_struct napi;
+#endif /* __rtems__ */
+	struct qman_portal *p;
+};
+
+struct dpa_percpu_priv_s {
+	struct net_device *net_dev;
+	struct dpa_napi_portal *np;
+	u64 in_interrupt;
+	u64 tx_confirm;
+	/* fragmented (non-linear) skbuffs received from the stack */
+	u64 tx_frag_skbuffs;
+#ifndef __rtems__
+	struct rtnl_link_stats64 stats;
+#endif /* __rtems__ */
+	struct dpa_rx_errors rx_errors;
+	struct dpa_ern_cnt ern_cnt;
+};
+
+struct dpa_priv_s {
+	struct dpa_percpu_priv_s __percpu *percpu_priv;
+	struct dpa_bp *dpa_bp;
+	/* Store here the needed Tx headroom for convenience and speed
+	 * (even though it can be computed based on the fields of buf_layout)
+	 */
+	u16 tx_headroom;
+	struct net_device *net_dev;
+	struct mac_device *mac_dev;
+	struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES];
+	struct qman_fq *conf_fqs[DPAA_ETH_TX_QUEUES];
+
+	size_t bp_count;
+
+	u16 channel;	/* "fsl,qman-channel-id" */
+	struct list_head dpa_fq_list;
+
+#ifndef __rtems__
+	u32 msg_enable;	/* net_device message level */
+#endif /* __rtems__ */
+
+	struct {
+		/* All egress queues to a given net device belong to one
+		 * (and the same) congestion group.
+		 */
+		struct qman_cgr cgr;
+		/* If congested, when it began. Used for performance stats. */
+		u32 congestion_start_jiffies;
+		/* Number of jiffies the Tx port was congested. */
+		u32 congested_jiffies;
+		/* Counter for the number of times the CGR
+		 * entered congestion state
+		 */
+		u32 cgr_congested_count;
+	} cgr_data;
+	/* Use a per-port CGR for ingress traffic. */
+	bool use_ingress_cgr;
+	struct qman_cgr ingress_cgr;
+
+	struct dpa_buffer_layout_s *buf_layout;
+	u16 rx_headroom;
+};
+
+struct fm_port_fqs {
+	struct dpa_fq *tx_defq;
+	struct dpa_fq *tx_errq;
+	struct dpa_fq *rx_defq;
+	struct dpa_fq *rx_errq;
+};
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp);
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *count_ptr);
+void _dpa_rx(struct net_device *net_dev,
+	     struct qman_portal *portal,
+	     const struct dpa_priv_s *priv,
+	     struct dpa_percpu_priv_s *percpu_priv,
+	     const struct qm_fd *fd,
+	     u32 fqid,
+	     int *count_ptr);
+#ifndef __rtems__
+int dpa_tx(struct sk_buff *skb, struct net_device *net_dev);
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+				   const struct qm_fd *fd);
+
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv, struct sk_buff *skb,
+		       struct qm_fd *fd, char *parse_results);
+#else /* __rtems__ */
+void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd);
+#endif /* __rtems__ */
+
+static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv,
+					 struct qman_portal *portal)
+{
+#ifndef __rtems__
+	/* In case of threaded ISR for RT enable kernel,
+	 * in_irq() does not return appropriate value, so use
+	 * in_serving_softirq to distinguish softirq or irq context.
+	 */
+	if (unlikely(in_irq() || !in_serving_softirq())) {
+		/* Disable QMan IRQ and invoke NAPI */
+		int ret = qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
+
+		if (likely(!ret)) {
+			const struct qman_portal_config *pc =
+					qman_p_get_portal_config(portal);
+			struct dpa_napi_portal *np =
+					&percpu_priv->np[pc->channel];
+
+			np->p = portal;
+			napi_schedule(&np->napi);
+			percpu_priv->in_interrupt++;
+			return 1;
+		}
+	}
+#else /* __rtems__ */
+	/* FIXME */
+#endif /* __rtems__ */
+	return 0;
+}
+
+static inline ssize_t __const dpa_fd_length(const struct qm_fd *fd)
+{
+	return fd->length20;
+}
+
+static inline ssize_t __const dpa_fd_offset(const struct qm_fd *fd)
+{
+	return fd->offset;
+}
+
+#ifndef __rtems__
+/* Verifies if the skb length is below the interface MTU */
+static inline int dpa_check_rx_mtu(struct sk_buff *skb, int mtu)
+{
+	if (unlikely(skb->len > mtu))
+		if ((skb->protocol != htons(ETH_P_8021Q)) ||
+		    (skb->len > mtu + 4))
+			return -1;
+
+	return 0;
+}
+#endif /* __rtems__ */
+
+static inline u16 dpa_get_headroom(struct dpa_buffer_layout_s *bl)
+{
+	u16 headroom;
+	/* The frame headroom must accommodate:
+	 * - the driver private data area
+	 * - parse results, hash results, timestamp if selected
+	 * If either hash results or time stamp are selected, both will
+	 * be copied to/from the frame headroom, as TS is located between PR and
+	 * HR in the IC and IC copy size has a granularity of 16bytes
+	 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
+	 *
+	 * Also make sure the headroom is a multiple of data_align bytes
+	 */
+	headroom = (u16)(bl->priv_data_size +
+		   (bl->parse_results ? DPA_PARSE_RESULTS_SIZE : 0) +
+		   (bl->hash_results || bl->time_stamp ?
+		    DPA_TIME_STAMP_SIZE + DPA_HASH_RESULTS_SIZE : 0));
+
+	return bl->data_align ? ALIGN(headroom, bl->data_align) : headroom;
+}
+
+#ifndef __rtems__
+void dpaa_eth_sysfs_remove(struct device *dev);
+void dpaa_eth_sysfs_init(struct device *dev);
+
+void dpa_private_napi_del(struct net_device *net_dev);
+#endif /* __rtems__ */
+
+static inline void clear_fd(struct qm_fd *fd)
+{
+	fd->opaque_addr = 0;
+	fd->opaque = 0;
+	fd->cmd = 0;
+}
+
+static inline int _dpa_tx_fq_to_id(const struct dpa_priv_s *priv,
+		struct qman_fq *tx_fq)
+{
+	int i;
+
+	for (i = 0; i < DPAA_ETH_TX_QUEUES; i++)
+		if (priv->egress_fqs[i] == tx_fq)
+			return i;
+
+	return -EINVAL;
+}
+
+#ifndef __rtems__
+static inline int dpa_xmit(struct dpa_priv_s *priv,
+			   struct rtnl_link_stats64 *percpu_stats,
+			   int queue,
+			   struct qm_fd *fd)
+{
+	int err, i;
+	struct qman_fq *egress_fq;
+
+	egress_fq = priv->egress_fqs[queue];
+	if (fd->bpid == 0xff)
+		fd->cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+
+	/* Trace this Tx fd */
+	trace_dpa_tx_fd(priv->net_dev, egress_fq, fd);
+
+	for (i = 0; i < 100000; i++) {
+		err = qman_enqueue(egress_fq, fd, 0);
+		if (err != -EBUSY)
+			break;
+	}
+
+	if (unlikely(err < 0)) {
+		percpu_stats->tx_errors++;
+		percpu_stats->tx_fifo_errors++;
+		return err;
+	}
+
+	percpu_stats->tx_packets++;
+	percpu_stats->tx_bytes += dpa_fd_length(fd);
+
+	return 0;
+}
+#endif /* __rtems__ */
+
+/* Use multiple WQs for FQ assignment:
+ *	- Tx Confirmation queues go to WQ1.
+ *	- Rx Default and Tx queues go to WQ3 (no differentiation between
+ *	  Rx and Tx traffic).
+ *	- Rx Error and Tx Error queues go to WQ2 (giving them a better chance
+ *	  to be scheduled, in case there are many more FQs in WQ3).
+ * This ensures that Tx-confirmed buffers are timely released. In particular,
+ * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
+ * are greatly outnumbered by other FQs in the system, while
+ * dequeue scheduling is round-robin.
+ */
+static inline void _dpa_assign_wq(struct dpa_fq *fq)
+{
+	switch (fq->fq_type) {
+	case FQ_TYPE_TX_CONFIRM:
+	case FQ_TYPE_TX_CONF_MQ:
+		fq->wq = 1;
+		break;
+	case FQ_TYPE_RX_DEFAULT:
+	case FQ_TYPE_TX:
+		fq->wq = 3;
+		break;
+	case FQ_TYPE_RX_ERROR:
+	case FQ_TYPE_TX_ERROR:
+		fq->wq = 2;
+		break;
+	default:
+		WARN(1, "Invalid FQ type %d for FQID %d!\n",
+		     fq->fq_type, fq->fqid);
+	}
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+/* Use in lieu of skb_get_queue_mapping() */
+#define dpa_get_queue_mapping(skb) \
+	raw_smp_processor_id()
+#else
+/* Use the queue selected by XPS */
+#define dpa_get_queue_mapping(skb) \
+	skb_get_queue_mapping(skb)
+#endif
+
+static inline void _dpa_bp_free_pf(void *addr)
+{
+#ifndef __rtems__
+	put_page(virt_to_head_page(addr));
+#else /* __rtems__ */
+	BSD_ASSERT(0);
+#endif /* __rtems__ */
+}
+
+#endif	/* __DPA_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
new file mode 100644
index 0000000..9a4a218
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.c
@@ -0,0 +1,1491 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/etherdevice.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#ifndef __rtems__
+#include <linux/highmem.h>
+#include <linux/sort.h>
+#endif /* __rtems__ */
+#include <soc/fsl/qman.h>
+#ifndef __rtems__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#endif /* __rtems__ */
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+#include "mac.h"
+
+/* Size in bytes of the FQ taildrop threshold */
+#define DPA_FQ_TD 0x200000
+
+#define DPAA_CS_THRESHOLD_1G 0x06000000
+/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
+ * The size in bytes of the egress Congestion State notification threshold on
+ * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
+ * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
+ * and the larger the frame size, the more acute the problem.
+ * So we have to find a balance between these factors:
+ * - avoiding the device staying congested for a prolonged time (risking
+ *   the netdev watchdog to fire - see also the tx_timeout module param);
+ * - affecting performance of protocols such as TCP, which otherwise
+ *   behave well under the congestion notification mechanism;
+ * - preventing the Tx cores from tightly-looping (as if the congestion
+ *   threshold was too low to be effective);
+ * - running out of memory if the CS threshold is set too high.
+ */
+
+#define DPAA_CS_THRESHOLD_10G 0x10000000
+/* The size in bytes of the egress Congestion State notification threshold on
+ * 10G ports, range 0x1000 .. 0x10000000
+ */
+
+static struct dpa_bp *dpa_bp_array[64];
+
+#ifndef __rtems__
+int dpa_max_frm;
+
+int dpa_rx_extra_headroom;
+#endif /* __rtems__ */
+
+static const struct fqid_cell tx_confirm_fqids[] = {
+	{0, DPAA_ETH_TX_QUEUES}
+};
+
+static const struct fqid_cell default_fqids[][3] = {
+	[RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} },
+	[TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} }
+};
+
+#ifndef __rtems__
+int dpa_netdev_init(struct net_device *net_dev,
+		    const u8 *mac_addr,
+		    u16 tx_timeout)
+{
+	int err;
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	struct device *dev = net_dev->dev.parent;
+
+	net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	/* we do not want shared skbs on TX */
+	net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
+	net_dev->features |= net_dev->hw_features;
+	net_dev->vlan_features = net_dev->features;
+
+	memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+	memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+
+	net_dev->ethtool_ops = &dpa_ethtool_ops;
+
+	net_dev->needed_headroom = priv->tx_headroom;
+	net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+
+	/* start without the RUNNING flag, phylib controls it later */
+	netif_carrier_off(net_dev);
+
+	err = register_netdev(net_dev);
+	if (err < 0) {
+		dev_err(dev, "register_netdev() = %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+#endif /* __rtems__ */
+
+int dpa_start(struct net_device *net_dev)
+{
+	int err, i;
+	struct dpa_priv_s *priv;
+	struct mac_device *mac_dev;
+
+	priv = netdev_priv(net_dev);
+	mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+	err = mac_dev->init_phy(net_dev, priv->mac_dev);
+	if (err < 0) {
+		netif_err(priv, ifup, net_dev, "init_phy() = %d\n", err);
+		return err;
+	}
+#endif /* __rtems__ */
+
+	for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+		err = fman_port_enable(mac_dev->port[i]);
+		if (err)
+			goto mac_start_failed;
+	}
+
+	err = priv->mac_dev->start(mac_dev);
+	if (err < 0) {
+		netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+		goto mac_start_failed;
+	}
+
+#ifndef __rtems__
+	netif_tx_start_all_queues(net_dev);
+#endif /* __rtems__ */
+
+	return 0;
+
+mac_start_failed:
+	for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
+		fman_port_disable(mac_dev->port[i]);
+
+	return err;
+}
+
+int dpa_stop(struct net_device *net_dev)
+{
+	int i, err, error;
+	struct dpa_priv_s *priv;
+	struct mac_device *mac_dev;
+
+	priv = netdev_priv(net_dev);
+	mac_dev = priv->mac_dev;
+
+#ifndef __rtems__
+	netif_tx_stop_all_queues(net_dev);
+#endif /* __rtems__ */
+	/* Allow the Fman (Tx) port to process in-flight frames before we
+	 * try switching it off.
+	 */
+	usleep_range(5000, 10000);
+
+	err = mac_dev->stop(mac_dev);
+	if (err < 0)
+		netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
+			  err);
+
+	for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+		error = fman_port_disable(mac_dev->port[i]);
+		if (error)
+			err = error;
+	}
+
+#ifndef __rtems__
+	if (mac_dev->phy_dev)
+		phy_disconnect(mac_dev->phy_dev);
+	mac_dev->phy_dev = NULL;
+#endif /* __rtems__ */
+
+	return err;
+}
+
+#ifndef __rtems__
+void dpa_timeout(struct net_device *net_dev)
+{
+	const struct dpa_priv_s	*priv;
+	struct dpa_percpu_priv_s *percpu_priv;
+
+	priv = netdev_priv(net_dev);
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+
+	netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
+		   jiffies_to_msecs(jiffies - net_dev->trans_start));
+
+	percpu_priv->stats.tx_errors++;
+}
+
+/* Calculates the statistics for the given device by adding the statistics
+ * collected by each CPU.
+ */
+struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
+					  struct rtnl_link_stats64 *stats)
+{
+	struct dpa_priv_s *priv = netdev_priv(net_dev);
+	u64 *cpustats;
+	u64 *netstats = (u64 *)stats;
+	int i, j;
+	struct dpa_percpu_priv_s *percpu_priv;
+	int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+	for_each_possible_cpu(i) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+		cpustats = (u64 *)&percpu_priv->stats;
+
+		for (j = 0; j < numstats; j++)
+			netstats[j] += cpustats[j];
+	}
+
+	return stats;
+}
+#endif /* __rtems__ */
+
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+	const int max_mtu = dpa_get_max_mtu();
+
+	/* Make sure we don't exceed the Ethernet controller's MAXFRM */
+	if (new_mtu < 68 || new_mtu > max_mtu) {
+		netdev_err(net_dev, "Invalid L3 mtu %d (must be between %d and %d).\n",
+			   new_mtu, 68, max_mtu);
+		return -EINVAL;
+	}
+#ifndef __rtems__
+	net_dev->mtu = new_mtu;
+#endif /* __rtems__ */
+
+	return 0;
+}
+
+#ifndef __rtems__
+/* .ndo_init callback */
+int dpa_ndo_init(struct net_device *net_dev)
+{
+	/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
+	 * we choose conservatively and let the user explicitly set a higher
+	 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+	 * in the same LAN.
+	 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
+	 * start with the maximum allowed.
+	 */
+	int init_mtu = min(dpa_get_max_mtu(), ETH_DATA_LEN);
+
+	netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
+		   init_mtu);
+	net_dev->mtu = init_mtu;
+
+	return 0;
+}
+
+int dpa_set_features(struct net_device *dev, netdev_features_t features)
+{
+	/* Not much to do here for now */
+	dev->features = features;
+	return 0;
+}
+
+netdev_features_t dpa_fix_features(struct net_device *dev,
+				   netdev_features_t features)
+{
+	netdev_features_t unsupported_features = 0;
+
+	/* In theory we should never be requested to enable features that
+	 * we didn't set in netdev->features and netdev->hw_features at probe
+	 * time, but double check just to be on the safe side.
+	 * We don't support enabling Rx csum through ethtool yet
+	 */
+	unsupported_features |= NETIF_F_RXCSUM;
+
+	features &= ~unsupported_features;
+
+	return features;
+}
+
+int dpa_remove(struct platform_device *pdev)
+{
+	int err;
+	struct device *dev;
+	struct net_device *net_dev;
+	struct dpa_priv_s *priv;
+
+	dev = &pdev->dev;
+	net_dev = dev_get_drvdata(dev);
+
+	priv = netdev_priv(net_dev);
+
+	dpaa_eth_sysfs_remove(dev);
+
+	dev_set_drvdata(dev, NULL);
+	unregister_netdev(net_dev);
+
+	err = dpa_fq_free(dev, &priv->dpa_fq_list);
+
+	qman_delete_cgr_safe(&priv->ingress_cgr);
+	qman_release_cgrid(priv->ingress_cgr.cgrid);
+	qman_delete_cgr_safe(&priv->cgr_data.cgr);
+	qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+
+	dpa_private_napi_del(net_dev);
+
+	dpa_bp_free(priv);
+
+	if (priv->buf_layout)
+		devm_kfree(dev, priv->buf_layout);
+
+	free_netdev(net_dev);
+
+	return err;
+}
+
+struct mac_device *dpa_mac_dev_get(struct platform_device *pdev)
+{
+	struct device *dpa_dev, *dev;
+	struct device_node *mac_node;
+	struct platform_device *of_dev;
+	struct mac_device *mac_dev;
+	struct dpaa_eth_data *eth_data;
+
+	dpa_dev = &pdev->dev;
+	eth_data = dpa_dev->platform_data;
+	if (!eth_data)
+		return ERR_PTR(-ENODEV);
+
+	mac_node = eth_data->mac_node;
+
+	of_dev = of_find_device_by_node(mac_node);
+	if (!of_dev) {
+		dev_err(dpa_dev, "of_find_device_by_node(%s) failed\n",
+			mac_node->full_name);
+		of_node_put(mac_node);
+		return ERR_PTR(-EINVAL);
+	}
+	of_node_put(mac_node);
+
+	dev = &of_dev->dev;
+
+	mac_dev = dev_get_drvdata(dev);
+	if (!mac_dev) {
+		dev_err(dpa_dev, "dev_get_drvdata(%s) failed\n",
+			dev_name(dev));
+		return ERR_PTR(-EINVAL);
+	}
+
+	return mac_dev;
+}
+
+int dpa_mac_hw_index_get(struct platform_device *pdev)
+{
+	struct device *dpa_dev;
+	struct dpaa_eth_data *eth_data;
+
+	dpa_dev = &pdev->dev;
+	eth_data = dpa_dev->platform_data;
+
+	return eth_data->mac_hw_id;
+}
+
+int dpa_mac_fman_index_get(struct platform_device *pdev)
+{
+	struct device *dpa_dev;
+	struct dpaa_eth_data *eth_data;
+
+	dpa_dev = &pdev->dev;
+	eth_data = dpa_dev->platform_data;
+
+	return eth_data->fman_hw_id;
+}
+
+int dpa_set_mac_address(struct net_device *net_dev, void *addr)
+{
+	const struct dpa_priv_s	*priv;
+	int err;
+	struct mac_device *mac_dev;
+
+	priv = netdev_priv(net_dev);
+
+	err = eth_mac_addr(net_dev, addr);
+	if (err < 0) {
+		netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
+		return err;
+	}
+
+	mac_dev = priv->mac_dev;
+
+	err = mac_dev->change_addr(mac_dev->fman_mac,
+				   (enet_addr_t *)net_dev->dev_addr);
+	if (err < 0) {
+		netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
+			  err);
+		return err;
+	}
+
+	return 0;
+}
+
+void dpa_set_rx_mode(struct net_device *net_dev)
+{
+	int err;
+	const struct dpa_priv_s	*priv;
+
+	priv = netdev_priv(net_dev);
+
+	if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
+		priv->mac_dev->promisc = !priv->mac_dev->promisc;
+		err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
+						 priv->mac_dev->promisc);
+		if (err < 0)
+			netif_err(priv, drv, net_dev,
+				  "mac_dev->set_promisc() = %d\n",
+				  err);
+	}
+
+	err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+	if (err < 0)
+		netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+			  err);
+}
+#endif /* __rtems__ */
+
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+			    struct dpa_buffer_layout_s *layout)
+{
+	/* Rx */
+	layout[RX].priv_data_size = (u16)DPA_RX_PRIV_DATA_SIZE;
+	layout[RX].parse_results = true;
+	layout[RX].hash_results = true;
+	layout[RX].data_align = DPA_FD_DATA_ALIGNMENT;
+
+	/* Tx */
+	layout[TX].priv_data_size = DPA_TX_PRIV_DATA_SIZE;
+	layout[TX].parse_results = true;
+	layout[TX].hash_results = true;
+	layout[TX].data_align = DPA_FD_DATA_ALIGNMENT;
+}
+
+int dpa_bp_alloc(struct dpa_bp *dpa_bp)
+{
+	int err;
+	struct bman_pool_params bp_params;
+#ifndef __rtems__
+	struct platform_device *pdev;
+#endif /* __rtems__ */
+
+	if (dpa_bp->size == 0 || dpa_bp->config_count == 0) {
+		pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers");
+		return -EINVAL;
+	}
+
+	memset(&bp_params, 0, sizeof(struct bman_pool_params));
+
+	/* If the pool is already specified, we only create one per bpid */
+	if (dpa_bpid2pool_use(dpa_bp->bpid))
+		return 0;
+
+	if (dpa_bp->bpid == 0)
+		bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID;
+	else
+		bp_params.bpid = dpa_bp->bpid;
+
+	dpa_bp->pool = bman_new_pool(&bp_params);
+	if (!dpa_bp->pool) {
+		pr_err("bman_new_pool() failed\n");
+		return -ENODEV;
+	}
+
+	dpa_bp->bpid = (u8)bman_get_params(dpa_bp->pool)->bpid;
+
+#ifndef __rtems__
+	pdev = platform_device_register_simple("DPAA_bpool",
+					       dpa_bp->bpid, NULL, 0);
+	if (IS_ERR(pdev)) {
+		err = PTR_ERR(pdev);
+		goto pdev_register_failed;
+	}
+
+	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
+	if (err)
+		goto pdev_mask_failed;
+
+	dpa_bp->dev = &pdev->dev;
+#endif /* __rtems__ */
+
+	if (dpa_bp->seed_cb) {
+		err = dpa_bp->seed_cb(dpa_bp);
+		if (err)
+			goto pool_seed_failed;
+	}
+
+	dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp);
+
+	return 0;
+
+pool_seed_failed:
+#ifndef __rtems__
+pdev_mask_failed:
+	platform_device_unregister(pdev);
+pdev_register_failed:
+#endif /* __rtems__ */
+	bman_free_pool(dpa_bp->pool);
+
+	return err;
+}
+
+void dpa_bp_drain(struct dpa_bp *bp)
+{
+	int ret;
+	u8 num = 8;
+
+	do {
+		struct bm_buffer bmb[8];
+		int i;
+
+		ret = bman_acquire(bp->pool, bmb, num, 0);
+		if (ret < 0) {
+			if (num == 8) {
+				/* we have less than 8 buffers left;
+				 * drain them one by one
+				 */
+				num = 1;
+				ret = 1;
+				continue;
+			} else {
+				/* Pool is fully drained */
+				break;
+			}
+		}
+
+		for (i = 0; i < num; i++) {
+			dma_addr_t addr = bm_buf_addr(&bmb[i]);
+
+#ifndef __rtems__
+			dma_unmap_single(bp->dev, addr, bp->size,
+					 DMA_BIDIRECTIONAL);
+#endif /* __rtems__ */
+
+			bp->free_buf_cb(phys_to_virt(addr));
+		}
+	} while (ret > 0);
+}
+
+static void _dpa_bp_free(struct dpa_bp *dpa_bp)
+{
+	struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid);
+
+	/* the mapping between bpid and dpa_bp is done very late in the
+	 * allocation procedure; if something failed before the mapping, the bp
+	 * was not configured, therefore we don't need the below instructions
+	 */
+	if (!bp)
+		return;
+
+	if (!atomic_dec_and_test(&bp->refs))
+		return;
+
+	if (bp->free_buf_cb)
+		dpa_bp_drain(bp);
+
+	dpa_bp_array[bp->bpid] = NULL;
+	bman_free_pool(bp->pool);
+
+#ifndef __rtems__
+	if (bp->dev)
+		platform_device_unregister(to_platform_device(bp->dev));
+#endif /* __rtems__ */
+}
+
+void dpa_bp_free(struct dpa_priv_s *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->bp_count; i++)
+		_dpa_bp_free(&priv->dpa_bp[i]);
+}
+
+struct dpa_bp *dpa_bpid2pool(int bpid)
+{
+	return dpa_bp_array[bpid];
+}
+
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp)
+{
+	dpa_bp_array[bpid] = dpa_bp;
+	atomic_set(&dpa_bp->refs, 1);
+}
+
+bool dpa_bpid2pool_use(int bpid)
+{
+	if (dpa_bpid2pool(bpid)) {
+		atomic_inc(&dpa_bp_array[bpid]->refs);
+		return true;
+	}
+
+	return false;
+}
+
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+		     void *accel_priv, select_queue_fallback_t fallback)
+{
+	return dpa_get_queue_mapping(skb);
+}
+#endif
+
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+			    const struct fqid_cell *fqids,
+			    struct list_head *list,
+			    enum dpa_fq_type fq_type)
+{
+	int i;
+	struct dpa_fq *dpa_fq;
+
+	dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids->count, GFP_KERNEL);
+	if (!dpa_fq)
+		return NULL;
+
+	for (i = 0; i < fqids->count; i++) {
+		dpa_fq[i].fq_type = fq_type;
+		dpa_fq[i].fqid = fqids->start ? fqids->start + i : 0;
+		list_add_tail(&dpa_fq[i].list, list);
+	}
+
+	for (i = 0; i < fqids->count; i++)
+		_dpa_assign_wq(dpa_fq + i);
+
+	return dpa_fq;
+}
+
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+		     struct fm_port_fqs *port_fqs,
+		     bool alloc_tx_conf_fqs,
+		     enum port_type ptype)
+{
+	const struct fqid_cell *fqids;
+	struct dpa_fq *dpa_fq;
+	int num_ranges;
+	int i;
+
+	if (ptype == TX && alloc_tx_conf_fqs) {
+		if (!dpa_fq_alloc(dev, tx_confirm_fqids, list,
+				  FQ_TYPE_TX_CONF_MQ))
+			goto fq_alloc_failed;
+	}
+
+	fqids = default_fqids[ptype];
+	num_ranges = 3;
+
+	for (i = 0; i < num_ranges; i++) {
+		switch (i) {
+		case 0:
+			/* The first queue is the error queue */
+			if (fqids[i].count != 1)
+				goto invalid_error_queue;
+
+			dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+					      ptype == RX ?
+						FQ_TYPE_RX_ERROR :
+						FQ_TYPE_TX_ERROR);
+			if (!dpa_fq)
+				goto fq_alloc_failed;
+
+			if (ptype == RX)
+				port_fqs->rx_errq = &dpa_fq[0];
+			else
+				port_fqs->tx_errq = &dpa_fq[0];
+			break;
+		case 1:
+			/* the second queue is the default queue */
+			if (fqids[i].count != 1)
+				goto invalid_default_queue;
+
+			dpa_fq = dpa_fq_alloc(dev, &fqids[i], list,
+					      ptype == RX ?
+						FQ_TYPE_RX_DEFAULT :
+						FQ_TYPE_TX_CONFIRM);
+			if (!dpa_fq)
+				goto fq_alloc_failed;
+
+			if (ptype == RX)
+				port_fqs->rx_defq = &dpa_fq[0];
+			else
+				port_fqs->tx_defq = &dpa_fq[0];
+			break;
+		default:
+			/* all subsequent queues are Tx */
+			if (!dpa_fq_alloc(dev, &fqids[i], list, FQ_TYPE_TX))
+				goto fq_alloc_failed;
+			break;
+		}
+	}
+
+	return 0;
+
+fq_alloc_failed:
+	dev_err(dev, "dpa_fq_alloc() failed\n");
+	return -ENOMEM;
+
+invalid_default_queue:
+invalid_error_queue:
+	dev_err(dev, "Too many default or error queues\n");
+	return -EINVAL;
+}
+
+static u32 rx_pool_channel;
+static DEFINE_SPINLOCK(rx_pool_channel_init);
+
+int dpa_get_channel(void)
+{
+	spin_lock(&rx_pool_channel_init);
+	if (!rx_pool_channel) {
+		u32 pool;
+		int ret = qman_alloc_pool(&pool);
+
+		if (!ret)
+			rx_pool_channel = pool;
+	}
+	spin_unlock(&rx_pool_channel_init);
+	if (!rx_pool_channel)
+		return -ENOMEM;
+	return rx_pool_channel;
+}
+
+void dpa_release_channel(void)
+{
+	qman_release_pool(rx_pool_channel);
+}
+
+int dpaa_eth_add_channel(void *__arg)
+{
+#ifndef __rtems__
+	const cpumask_t *cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+	u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u16)(unsigned long)__arg);
+	int cpu;
+	struct qman_portal *portal;
+
+#ifndef __rtems__
+	for_each_cpu(cpu, cpus) {
+#else /* __rtems__ */
+	for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
+#endif /* __rtems__ */
+
+		portal = (struct qman_portal *)qman_get_affine_portal(cpu);
+		qman_p_static_dequeue_add(portal, pool);
+	}
+	return 0;
+}
+
+/* Congestion group state change notification callback.
+ * Stops the device's egress queues while they are congested and
+ * wakes them upon exiting congested state.
+ * Also updates some CGR-related stats.
+ */
+static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
+			   int congested)
+{
+	struct dpa_priv_s *priv = (struct dpa_priv_s *)container_of(cgr,
+		struct dpa_priv_s, cgr_data.cgr);
+
+	if (congested) {
+		priv->cgr_data.congestion_start_jiffies = jiffies;
+#ifndef __rtems__
+		netif_tx_stop_all_queues(priv->net_dev);
+#else /* __rtems__ */
+		BSD_ASSERT(0);
+#endif /* __rtems__ */
+		priv->cgr_data.cgr_congested_count++;
+	} else {
+		priv->cgr_data.congested_jiffies +=
+			(jiffies - priv->cgr_data.congestion_start_jiffies);
+#ifndef __rtems__
+		netif_tx_wake_all_queues(priv->net_dev);
+#else /* __rtems__ */
+		BSD_ASSERT(0);
+#endif /* __rtems__ */
+	}
+}
+
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv)
+{
+	struct qm_mcc_initcgr initcgr;
+	u32 cs_th;
+	int err;
+
+	err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
+	if (err < 0) {
+		pr_err("Error %d allocating CGR ID\n", err);
+		goto out_error;
+	}
+	priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
+
+	/* Enable Congestion State Change Notifications and CS taildrop */
+	initcgr.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES;
+	initcgr.cgr.cscn_en = QM_CGR_EN;
+
+	/* Set different thresholds based on the MAC speed.
+	 * This may turn suboptimal if the MAC is reconfigured at a speed
+	 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
+	 * In such cases, we ought to reconfigure the threshold, too.
+	 */
+#ifndef __rtems__
+	if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+		cs_th = DPAA_CS_THRESHOLD_10G;
+	else
+		cs_th = DPAA_CS_THRESHOLD_1G;
+#else /* __rtems__ */
+	/* FIXME */
+	cs_th = DPAA_CS_THRESHOLD_1G;
+#endif /* __rtems__ */
+	qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
+
+	initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
+	initcgr.cgr.cstd_en = QM_CGR_EN;
+
+	err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
+			      &initcgr);
+	if (err < 0) {
+		pr_err("Error %d creating CGR with ID %d\n", err,
+		       priv->cgr_data.cgr.cgrid);
+		qman_release_cgrid(priv->cgr_data.cgr.cgrid);
+		goto out_error;
+	}
+	pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
+		 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
+		 priv->cgr_data.cgr.chan);
+
+out_error:
+	return err;
+}
+
+static inline void dpa_setup_ingress(const struct dpa_priv_s *priv,
+				     struct dpa_fq *fq,
+				     const struct qman_fq *template)
+{
+	fq->fq_base = *template;
+	fq->net_dev = priv->net_dev;
+
+	fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
+	fq->channel = priv->channel;
+}
+
+static inline void dpa_setup_egress(const struct dpa_priv_s *priv,
+				    struct dpa_fq *fq,
+				    struct fman_port *port,
+				    const struct qman_fq *template)
+{
+	fq->fq_base = *template;
+	fq->net_dev = priv->net_dev;
+
+	if (port) {
+		fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
+		fq->channel = (u16)fman_port_get_qman_channel_id(port);
+	} else {
+		fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+	}
+}
+
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+		  struct fman_port *tx_port)
+{
+	struct dpa_fq *fq;
+#ifndef __rtems__
+	u16 portals[NR_CPUS];
+	int cpu, num_portals = 0;
+	const cpumask_t *affine_cpus = qman_affine_cpus();
+#endif /* __rtems__ */
+	int egress_cnt = 0, conf_cnt = 0;
+
+#ifndef __rtems__
+	for_each_cpu(cpu, affine_cpus)
+		portals[num_portals++] = qman_affine_channel(cpu);
+	if (num_portals == 0)
+		dev_err(priv->net_dev->dev.parent,
+			"No Qman software (affine) channels found");
+#else /* __rtems__ */
+	/* FIXME */
+#endif /* __rtems__ */
+
+	/* Initialize each FQ in the list */
+	list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+		switch (fq->fq_type) {
+		case FQ_TYPE_RX_DEFAULT:
+			DPA_ERR_ON(!priv->mac_dev);
+			dpa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
+			break;
+		case FQ_TYPE_RX_ERROR:
+			DPA_ERR_ON(!priv->mac_dev);
+			dpa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
+			break;
+		case FQ_TYPE_TX:
+			dpa_setup_egress(priv, fq, tx_port,
+					 &fq_cbs->egress_ern);
+			/* If we have more Tx queues than the number of cores,
+			 * just ignore the extra ones.
+			 */
+			if (egress_cnt < DPAA_ETH_TX_QUEUES)
+				priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+			break;
+		case FQ_TYPE_TX_CONFIRM:
+			DPA_ERR_ON(!priv->mac_dev);
+			dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+			break;
+		case FQ_TYPE_TX_CONF_MQ:
+			DPA_ERR_ON(!priv->mac_dev);
+			dpa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
+			priv->conf_fqs[conf_cnt++] = &fq->fq_base;
+			break;
+		case FQ_TYPE_TX_ERROR:
+			DPA_ERR_ON(!priv->mac_dev);
+			dpa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
+			break;
+		default:
+#ifndef __rtems__
+			dev_warn(priv->net_dev->dev.parent,
+				 "Unknown FQ type detected!\n");
+#else /* __rtems__ */
+			BSD_ASSERT(0);
+#endif /* __rtems__ */
+			break;
+		}
+	}
+
+	/* The number of Tx queues may be smaller than the number of cores, if
+	 * the Tx queue range is specified in the device tree instead of being
+	 * dynamically allocated.
+	 * Make sure all CPUs receive a corresponding Tx queue.
+	 */
+	while (egress_cnt < DPAA_ETH_TX_QUEUES) {
+		list_for_each_entry(fq, &priv->dpa_fq_list, list) {
+			if (fq->fq_type != FQ_TYPE_TX)
+				continue;
+			priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+			if (egress_cnt == DPAA_ETH_TX_QUEUES)
+				break;
+		}
+	}
+}
+
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable)
+{
+	int err;
+	const struct dpa_priv_s	*priv;
+#ifndef __rtems__
+	struct device *dev;
+#endif /* __rtems__ */
+	struct qman_fq *fq;
+	struct qm_mcc_initfq initfq;
+	struct qman_fq *confq = NULL;
+	int queue_id;
+
+	priv = netdev_priv(dpa_fq->net_dev);
+#ifndef __rtems__
+	dev = dpa_fq->net_dev->dev.parent;
+#endif /* __rtems__ */
+
+	if (dpa_fq->fqid == 0)
+		dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+	dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
+
+	err = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base);
+	if (err) {
+#ifndef __rtems__
+		dev_err(dev, "qman_create_fq() failed\n");
+#else /* __rtems__ */
+		BSD_ASSERT(0);
+#endif /* __rtems__ */
+		return err;
+	}
+	fq = &dpa_fq->fq_base;
+
+	if (dpa_fq->init) {
+		memset(&initfq, 0, sizeof(initfq));
+
+		initfq.we_mask = QM_INITFQ_WE_FQCTRL;
+		/* Note: we may get to keep an empty FQ in cache */
+		initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+		/* Try to reduce the number of portal interrupts for
+		 * Tx Confirmation FQs.
+		 */
+		if (dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
+			initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+
+		/* FQ placement */
+		initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+
+		initfq.fqd.dest.channel	= dpa_fq->channel;
+		initfq.fqd.dest.wq = dpa_fq->wq;
+
+		/* Put all egress queues in a congestion group of their own.
+		 * Sensu stricto, the Tx confirmation queues are Rx FQs,
+		 * rather than Tx - but they nonetheless account for the
+		 * memory footprint on behalf of egress traffic. We therefore
+		 * place them in the netdev's CGR, along with the Tx FQs.
+		 */
+		if (dpa_fq->fq_type == FQ_TYPE_TX ||
+		    dpa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
+		    dpa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
+			initfq.we_mask |= QM_INITFQ_WE_CGID;
+			initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+			initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
+			/* Set a fixed overhead accounting, in an attempt to
+			 * reduce the impact of fixed-size skb shells and the
+			 * driver's needed headroom on system memory. This is
+			 * especially the case when the egress traffic is
+			 * composed of small datagrams.
+			 * Unfortunately, QMan's OAL value is capped to an
+			 * insufficient value, but even that is better than
+			 * no overhead accounting at all.
+			 */
+			initfq.we_mask |= QM_INITFQ_WE_OAC;
+			initfq.fqd.oac_init.oac = QM_OAC_CG;
+#ifndef __rtems__
+			initfq.fqd.oac_init.oal =
+				(signed char)(min(sizeof(struct sk_buff) +
+						  priv->tx_headroom,
+						  (size_t)FSL_QMAN_MAX_OAL));
+#else /* __rtems__ */
+			/* FIXME */
+			initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
+#endif /* __rtems__ */
+		}
+
+		if (td_enable) {
+			initfq.we_mask |= QM_INITFQ_WE_TDTHRESH;
+			qm_fqd_taildrop_set(&initfq.fqd.td,
+					    DPA_FQ_TD, 1);
+			initfq.fqd.fq_ctrl = QM_FQCTRL_TDE;
+		}
+
+		/* Configure the Tx confirmation queue, now that we know
+		 * which Tx queue it pairs with.
+		 */
+		if (dpa_fq->fq_type == FQ_TYPE_TX) {
+			queue_id = _dpa_tx_fq_to_id(priv, &dpa_fq->fq_base);
+			if (queue_id >= 0)
+				confq = priv->conf_fqs[queue_id];
+			if (confq) {
+				initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			/* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
+			 *	     A2V=1 (contextA A2 field is valid)
+			 *	     A0V=1 (contextA A0 field is valid)
+			 *	     B0V=1 (contextB field is valid)
+			 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
+			 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
+			 */
+				initfq.fqd.context_a.hi = 0x1e000000;
+				initfq.fqd.context_a.lo = 0x80000000;
+			}
+		}
+
+		/* Put all *private* ingress queues in our "ingress CGR". */
+		if (priv->use_ingress_cgr &&
+		    (dpa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+		     dpa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
+			initfq.we_mask |= QM_INITFQ_WE_CGID;
+			initfq.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+			initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
+			/* Set a fixed overhead accounting, just like for the
+			 * egress CGR.
+			 */
+			initfq.we_mask |= QM_INITFQ_WE_OAC;
+			initfq.fqd.oac_init.oac = QM_OAC_CG;
+#ifndef __rtems__
+			initfq.fqd.oac_init.oal =
+				(signed char)(min(sizeof(struct sk_buff) +
+				priv->tx_headroom, (size_t)FSL_QMAN_MAX_OAL));
+#else /* __rtems__ */
+			/* FIXME */
+			initfq.fqd.oac_init.oal = FSL_QMAN_MAX_OAL;
+#endif /* __rtems__ */
+		}
+
+		/* Initialization common to all ingress queues */
+		if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
+			initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			initfq.fqd.fq_ctrl |=
+				QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK;
+			initfq.fqd.context_a.stashing.exclusive =
+				QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
+				QM_STASHING_EXCL_ANNOTATION;
+			initfq.fqd.context_a.stashing.data_cl = 2;
+			initfq.fqd.context_a.stashing.annotation_cl = 1;
+			initfq.fqd.context_a.stashing.context_cl =
+				DIV_ROUND_UP(sizeof(struct qman_fq), 64);
+		}
+
+		err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
+		if (err < 0) {
+#ifndef __rtems__
+			dev_err(dev, "qman_init_fq(%u) = %d\n",
+				qman_fq_fqid(fq), err);
+#endif /* __rtems__ */
+			qman_destroy_fq(fq, 0);
+			return err;
+		}
+	}
+
+	dpa_fq->fqid = qman_fq_fqid(fq);
+
+	return 0;
+}
+
+#ifndef __rtems__
+static int _dpa_fq_free(struct device *dev, struct qman_fq *fq)
+{
+	int err, error;
+	struct dpa_fq *dpa_fq;
+	const struct dpa_priv_s	*priv;
+
+	err = 0;
+
+	dpa_fq = container_of(fq, struct dpa_fq, fq_base);
+	priv = netdev_priv(dpa_fq->net_dev);
+
+	if (dpa_fq->init) {
+		err = qman_retire_fq(fq, NULL);
+		if (err < 0 && netif_msg_drv(priv))
+			dev_err(dev, "qman_retire_fq(%u) = %d\n",
+				qman_fq_fqid(fq), err);
+
+		error = qman_oos_fq(fq);
+		if (error < 0 && netif_msg_drv(priv)) {
+			dev_err(dev, "qman_oos_fq(%u) = %d\n",
+				qman_fq_fqid(fq), error);
+			if (err >= 0)
+				err = error;
+		}
+	}
+
+	qman_destroy_fq(fq, 0);
+	list_del(&dpa_fq->list);
+
+	return err;
+}
+
+int dpa_fq_free(struct device *dev, struct list_head *list)
+{
+	int err, error;
+	struct dpa_fq *dpa_fq, *tmp;
+
+	err = 0;
+	list_for_each_entry_safe(dpa_fq, tmp, list, list) {
+		error = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq);
+		if (error < 0 && err >= 0)
+			err = error;
+	}
+
+	return err;
+}
+#endif /* __rtems__ */
+
+static void
+dpaa_eth_init_tx_port(struct fman_port *port, struct dpa_fq *errq,
+		      struct dpa_fq *defq,
+		      struct dpa_buffer_layout_s *buf_layout)
+{
+	struct fman_port_params params;
+	struct fman_buffer_prefix_content buf_prefix_content;
+	int err;
+
+	memset(&params, 0, sizeof(params));
+	memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+	buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+	buf_prefix_content.pass_prs_result = buf_layout->parse_results;
+	buf_prefix_content.pass_hash_result = buf_layout->hash_results;
+	buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
+	buf_prefix_content.data_align = buf_layout->data_align;
+
+	params.specific_params.non_rx_params.err_fqid = errq->fqid;
+	params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
+
+	err = fman_port_config(port, &params);
+	if (err)
+		pr_info("fman_port_config failed\n");
+
+	err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+	if (err)
+		pr_info("fman_port_cfg_buf_prefix_content failed\n");
+
+	err = fman_port_init(port);
+	if (err)
+		pr_err("fm_port_init failed\n");
+}
+
+static void
+dpaa_eth_init_rx_port(struct fman_port *port, struct dpa_bp *bp,
+		      size_t count, struct dpa_fq *errq, struct dpa_fq *defq,
+		      struct dpa_buffer_layout_s *buf_layout)
+{
+	struct fman_port_params params;
+	struct fman_buffer_prefix_content buf_prefix_content;
+	struct fman_port_rx_params *rx_p;
+	int i, err;
+
+	memset(&params, 0, sizeof(params));
+	memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
+
+	buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+	buf_prefix_content.pass_prs_result = buf_layout->parse_results;
+	buf_prefix_content.pass_hash_result = buf_layout->hash_results;
+	buf_prefix_content.pass_time_stamp = buf_layout->time_stamp;
+	buf_prefix_content.data_align = buf_layout->data_align;
+
+	rx_p = &params.specific_params.rx_params;
+	rx_p->err_fqid = errq->fqid;
+	rx_p->dflt_fqid = defq->fqid;
+
+	count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
+	rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
+	for (i = 0; i < count; i++) {
+		rx_p->ext_buf_pools.ext_buf_pool[i].id =  bp[i].bpid;
+		rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bp[i].size;
+	}
+
+	err = fman_port_config(port, &params);
+	if (err)
+		pr_info("fman_port_config failed\n");
+
+	err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
+	if (err)
+		pr_info("fman_port_cfg_buf_prefix_content failed\n");
+
+	err = fman_port_init(port);
+	if (err)
+		pr_err("fm_port_init failed\n");
+}
+
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+			 struct dpa_bp *bp, size_t count,
+			 struct fm_port_fqs *port_fqs,
+			 struct dpa_buffer_layout_s *buf_layout,
+			 struct device *dev)
+{
+	struct fman_port *rxport = mac_dev->port[RX];
+	struct fman_port *txport = mac_dev->port[TX];
+
+	dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
+			      port_fqs->tx_defq, &buf_layout[TX]);
+	dpaa_eth_init_rx_port(rxport, bp, count, port_fqs->rx_errq,
+			      port_fqs->rx_defq, &buf_layout[RX]);
+}
+
+void dpa_release_sgt(struct qm_sg_entry *sgt)
+{
+	struct dpa_bp *dpa_bp;
+	struct bm_buffer bmb[DPA_BUFF_RELEASE_MAX];
+	u8 i = 0, j;
+
+	memset(bmb, 0, sizeof(bmb));
+
+	do {
+		dpa_bp = dpa_bpid2pool(sgt[i].bpid);
+		DPA_ERR_ON(!dpa_bp);
+
+		j = 0;
+		do {
+			DPA_ERR_ON(sgt[i].extension);
+
+			bmb[j].hi = sgt[i].addr_hi;
+			bmb[j].lo = be32_to_cpu(sgt[i].addr_lo);
+
+			j++; i++;
+		} while (j < ARRAY_SIZE(bmb) &&
+				!sgt[i - 1].final &&
+				sgt[i - 1].bpid == sgt[i].bpid);
+
+		while (bman_release(dpa_bp->pool, bmb, j, 0))
+			cpu_relax();
+	} while (!sgt[i - 1].final);
+}
+
+void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd)
+{
+	struct qm_sg_entry *sgt;
+	struct dpa_bp *dpa_bp;
+	struct bm_buffer bmb;
+	dma_addr_t addr;
+	void *vaddr;
+
+	memset(&bmb, 0, sizeof(bmb));
+	bm_buffer_set64(&bmb, fd->addr);
+
+	dpa_bp = dpa_bpid2pool(fd->bpid);
+	DPA_ERR_ON(!dpa_bp);
+
+	if (fd->format == qm_fd_sg) {
+		vaddr = phys_to_virt(fd->addr);
+		sgt = vaddr + dpa_fd_offset(fd);
+
+#ifndef __rtems__
+		dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size,
+				 DMA_BIDIRECTIONAL);
+#endif /* __rtems__ */
+
+		dpa_release_sgt(sgt);
+
+#ifndef __rtems__
+		addr = dma_map_single(dpa_bp->dev, vaddr, dpa_bp->size,
+				      DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(dpa_bp->dev, addr)) {
+			dev_err(dpa_bp->dev, "DMA mapping failed");
+			return;
+		}
+#else /* __rtems__ */
+		addr = (dma_addr_t)vaddr;
+#endif /* __rtems__ */
+		bm_buffer_set64(&bmb, addr);
+	}
+
+	while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+		cpu_relax();
+}
+
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+	       const struct qm_mr_entry *msg)
+{
+	switch (msg->ern.rc & QM_MR_RC_MASK) {
+	case QM_MR_RC_CGR_TAILDROP:
+		percpu_priv->ern_cnt.cg_tdrop++;
+		break;
+	case QM_MR_RC_WRED:
+		percpu_priv->ern_cnt.wred++;
+		break;
+	case QM_MR_RC_ERROR:
+		percpu_priv->ern_cnt.err_cond++;
+		break;
+	case QM_MR_RC_ORPWINDOW_EARLY:
+		percpu_priv->ern_cnt.early_window++;
+		break;
+	case QM_MR_RC_ORPWINDOW_LATE:
+		percpu_priv->ern_cnt.late_window++;
+		break;
+	case QM_MR_RC_FQ_TAILDROP:
+		percpu_priv->ern_cnt.fq_tdrop++;
+		break;
+	case QM_MR_RC_ORPWINDOW_RETIRED:
+		percpu_priv->ern_cnt.fq_retired++;
+		break;
+	case QM_MR_RC_ORP_ZERO:
+		percpu_priv->ern_cnt.orp_zero++;
+		break;
+	}
+}
+
+#ifndef __rtems__
+/* Turn on HW checksum computation for this outgoing frame.
+ * If the current protocol is not something we support in this regard
+ * (or if the stack has already computed the SW checksum), we do nothing.
+ *
+ * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
+ * otherwise.
+ *
+ * Note that this function may modify the fd->cmd field and the skb data buffer
+ * (the Parse Results area).
+ */
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+		       struct sk_buff *skb,
+		       struct qm_fd *fd,
+		       char *parse_results)
+{
+	struct fman_prs_result *parse_result;
+	struct iphdr *iph;
+	struct ipv6hdr *ipv6h = NULL;
+	u8 l4_proto;
+	u16 ethertype = ntohs(skb->protocol);
+	int retval = 0;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	/* Note: L3 csum seems to be already computed in sw, but we can't choose
+	 * L4 alone from the FM configuration anyway.
+	 */
+
+	/* Fill in some fields of the Parse Results array, so the FMan
+	 * can find them as if they came from the FMan Parser.
+	 */
+	parse_result = (struct fman_prs_result *)parse_results;
+
+	/* If we're dealing with VLAN, get the real Ethernet type */
+	if (ethertype == ETH_P_8021Q) {
+		/* We can't always assume the MAC header is set correctly
+		 * by the stack, so reset to beginning of skb->data
+		 */
+		skb_reset_mac_header(skb);
+		ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+	}
+
+	/* Fill in the relevant L3 parse result fields
+	 * and read the L4 protocol type
+	 */
+	switch (ethertype) {
+	case ETH_P_IP:
+		parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
+		iph = ip_hdr(skb);
+		DPA_ERR_ON(!iph);
+		l4_proto = iph->protocol;
+		break;
+	case ETH_P_IPV6:
+		parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
+		ipv6h = ipv6_hdr(skb);
+		DPA_ERR_ON(!ipv6h);
+		l4_proto = ipv6h->nexthdr;
+		break;
+	default:
+		/* We shouldn't even be here */
+		if (net_ratelimit())
+			netif_alert(priv, tx_err, priv->net_dev,
+				    "Can't compute HW csum for L3 proto 0x%x\n",
+				    ntohs(skb->protocol));
+		retval = -EIO;
+		goto return_error;
+	}
+
+	/* Fill in the relevant L4 parse result fields */
+	switch (l4_proto) {
+	case IPPROTO_UDP:
+		parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
+		break;
+	case IPPROTO_TCP:
+		parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
+		break;
+	default:
+		/* This can as well be a BUG() */
+		if (net_ratelimit())
+			netif_alert(priv, tx_err, priv->net_dev,
+				    "Can't compute HW csum for L4 proto 0x%x\n",
+				    l4_proto);
+		retval = -EIO;
+		goto return_error;
+	}
+
+	/* At index 0 is IPOffset_1 as defined in the Parse Results */
+	parse_result->ip_off[0] = (u8)skb_network_offset(skb);
+	parse_result->l4_off = (u8)skb_transport_offset(skb);
+
+	/* Enable L3 (and L4, if TCP or UDP) HW checksum. */
+	fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+
+	/* On P1023 and similar platforms fd->cmd interpretation could
+	 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
+	 * is not set so we do not need to check; in the future, if/when
+	 * using context_a we need to check this bit
+	 */
+
+return_error:
+	return retval;
+}
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
new file mode 100644
index 0000000..954de39
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h
@@ -0,0 +1,113 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_ETH_COMMON_H
+#define __DPAA_ETH_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <soc/fsl/bman.h>
+#include <linux/of_platform.h>
+
+#include "dpaa_eth.h"
+
+#define DPA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+#define DPA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
+
+/* used in napi related functions */
+extern u16 qman_portal_max;
+
+/* from dpa_ethtool.c */
+extern const struct ethtool_ops dpa_ethtool_ops;
+
+int dpa_netdev_init(struct net_device *net_dev,
+		    const u8 *mac_addr,
+		    u16 tx_timeout);
+int dpa_start(struct net_device *net_dev);
+int dpa_stop(struct net_device *net_dev);
+void dpa_timeout(struct net_device *net_dev);
+struct rtnl_link_stats64 *dpa_get_stats64(struct net_device *net_dev,
+					  struct rtnl_link_stats64 *stats);
+int dpa_change_mtu(struct net_device *net_dev, int new_mtu);
+int dpa_ndo_init(struct net_device *net_dev);
+#ifndef __rtems__
+int dpa_set_features(struct net_device *dev, netdev_features_t features);
+netdev_features_t dpa_fix_features(struct net_device *dev,
+				   netdev_features_t features);
+#endif /* __rtems__ */
+int dpa_remove(struct platform_device *pdev);
+struct mac_device *dpa_mac_dev_get(struct platform_device *pdev);
+int dpa_mac_hw_index_get(struct platform_device *pdev);
+int dpa_mac_fman_index_get(struct platform_device *pdev);
+int dpa_set_mac_address(struct net_device *net_dev, void *addr);
+void dpa_set_rx_mode(struct net_device *net_dev);
+void dpa_set_buffers_layout(struct mac_device *mac_dev,
+			    struct dpa_buffer_layout_s *layout);
+int dpa_bp_alloc(struct dpa_bp *dpa_bp);
+void dpa_bp_free(struct dpa_priv_s *priv);
+struct dpa_bp *dpa_bpid2pool(int bpid);
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
+bool dpa_bpid2pool_use(int bpid);
+void dpa_bp_drain(struct dpa_bp *bp);
+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+		     void *accel_priv, select_queue_fallback_t fallback);
+#endif
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+			    const struct fqid_cell *fqids,
+			    struct list_head *list,
+			    enum dpa_fq_type fq_type);
+int dpa_fq_probe_mac(struct device *dev, struct list_head *list,
+		     struct fm_port_fqs *port_fqs,
+		     bool tx_conf_fqs_per_core,
+		     enum port_type ptype);
+int dpa_get_channel(void);
+void dpa_release_channel(void);
+int dpaa_eth_add_channel(void *__arg);
+int dpaa_eth_cgr_init(struct dpa_priv_s *priv);
+void dpa_fq_setup(struct dpa_priv_s *priv, const struct dpa_fq_cbs_t *fq_cbs,
+		  struct fman_port *tx_port);
+int dpa_fq_init(struct dpa_fq *dpa_fq, bool td_enable);
+int dpa_fq_free(struct device *dev, struct list_head *list);
+void dpaa_eth_init_ports(struct mac_device *mac_dev,
+			 struct dpa_bp *bp, size_t count,
+			 struct fm_port_fqs *port_fqs,
+			 struct dpa_buffer_layout_s *buf_layout,
+			 struct device *dev);
+void dpa_release_sgt(struct qm_sg_entry *sgt);
+void dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd);
+void count_ern(struct dpa_percpu_priv_s *percpu_priv,
+	       const struct qm_mr_entry *msg);
+#ifndef __rtems__
+int dpa_enable_tx_csum(struct dpa_priv_s *priv,
+		       struct sk_buff *skb,
+		       struct qm_fd *fd,
+		       char *parse_results);
+#endif /* __rtems__ */
+#endif	/* __DPAA_ETH_COMMON_H */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
new file mode 100644
index 0000000..2d0903e
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c
@@ -0,0 +1,710 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/highmem.h>
+#include <soc/fsl/bman.h>
+
+#include "dpaa_eth.h"
+#include "dpaa_eth_common.h"
+
+/* Convenience macros for storing/retrieving the skb back-pointers.
+ *
+ * NB: @off is an offset from a (struct sk_buff **) pointer!
+ */
+#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
+	{ \
+		skbh = (struct sk_buff **)addr; \
+		*(skbh + (off)) = skb; \
+	}
+#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
+	{ \
+		skbh = (struct sk_buff **)addr; \
+		skb = *(skbh + (off)); \
+	}
+
+/* DMA map and add a page frag back into the bpool.
+ * @vaddr fragment must have been allocated with netdev_alloc_frag(),
+ * specifically for fitting into @dpa_bp.
+ */
+static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
+				int *count_ptr)
+{
+	struct bm_buffer bmb;
+	dma_addr_t addr;
+
+	addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
+			      DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+		dev_err(dpa_bp->dev, "DMA mapping failed");
+		return;
+	}
+
+	bm_buffer_set64(&bmb, addr);
+
+	while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+		cpu_relax();
+
+	(*count_ptr)++;
+}
+
+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
+{
+	struct bm_buffer bmb[8];
+	void *new_buf;
+	dma_addr_t addr;
+	u8 i;
+	struct device *dev = dpa_bp->dev;
+	struct sk_buff *skb, **skbh;
+
+	memset(bmb, 0, sizeof(bmb));
+
+	for (i = 0; i < 8; i++) {
+		/* We'll prepend the skb back-pointer; can't use the DPA
+		 * priv space, because FMan will overwrite it (from offset 0)
+		 * if it ends up being the second, third, etc. fragment
+		 * in a S/G frame.
+		 *
+		 * We only need enough space to store a pointer, but allocate
+		 * an entire cacheline for performance reasons.
+		 */
+		new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
+		if (unlikely(!new_buf))
+			goto netdev_alloc_failed;
+		new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
+
+		skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+		if (unlikely(!skb)) {
+			put_page(virt_to_head_page(new_buf));
+			goto build_skb_failed;
+		}
+		DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
+
+		addr = dma_map_single(dev, new_buf,
+				      dpa_bp->size, DMA_BIDIRECTIONAL);
+		if (unlikely(dma_mapping_error(dev, addr)))
+			goto dma_map_failed;
+
+		bm_buffer_set64(&bmb[i], addr);
+	}
+
+release_bufs:
+	/* Release the buffers. In case bman is busy, keep trying
+	 * until successful. bman_release() is guaranteed to succeed
+	 * in a reasonable amount of time
+	 */
+	while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+		cpu_relax();
+	return i;
+
+dma_map_failed:
+	kfree_skb(skb);
+
+build_skb_failed:
+netdev_alloc_failed:
+	net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
+	WARN_ONCE(1, "Memory allocation failure on Rx\n");
+
+	bm_buffer_set64(&bmb[i], 0);
+	/* Avoid releasing a completely null buffer; bman_release() requires
+	 * at least one buffer.
+	 */
+	if (likely(i))
+		goto release_bufs;
+
+	return 0;
+}
+
+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
+{
+	int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+	*count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
+}
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+	int i;
+
+	/* Give each CPU an allotment of "config_count" buffers */
+	for_each_possible_cpu(i) {
+		int j;
+
+		/* Although we access another CPU's counters here
+		 * we do it at boot time so it is safe
+		 */
+		for (j = 0; j < dpa_bp->config_count; j += 8)
+			dpa_bp_add_8_bufs(dpa_bp, i);
+	}
+	return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
+{
+	int count = *countptr;
+	int new_bufs;
+
+	if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+		do {
+			new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+			if (unlikely(!new_bufs)) {
+				/* Avoid looping forever if we've temporarily
+				 * run out of memory. We'll try again at the
+				 * next NAPI cycle.
+				 */
+				break;
+			}
+			count += new_bufs;
+		} while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+		*countptr = count;
+		if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+ * against cases when not all fd relevant fields were filled in.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
+				   const struct qm_fd *fd)
+{
+	const struct qm_sg_entry *sgt;
+	int i;
+	struct dpa_bp *dpa_bp = priv->dpa_bp;
+	dma_addr_t addr = qm_fd_addr(fd);
+	struct sk_buff **skbh;
+	struct sk_buff *skb = NULL;
+	const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+	int nr_frags;
+
+
+	/* retrieve skb back pointer */
+	DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
+
+	if (unlikely(fd->format == qm_fd_sg)) {
+		nr_frags = skb_shinfo(skb)->nr_frags;
+		dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
+				 sizeof(struct qm_sg_entry) * (1 + nr_frags),
+				 dma_dir);
+
+		/* The sgt buffer has been allocated with netdev_alloc_frag(),
+		 * it's from lowmem.
+		 */
+		sgt = phys_to_virt(addr + dpa_fd_offset(fd));
+
+		/* sgt[0] is from lowmem, was dma_map_single()-ed */
+		dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
+				 sgt[0].length, dma_dir);
+
+		/* remaining pages were mapped with dma_map_page() */
+		for (i = 1; i < nr_frags; i++) {
+			DPA_ERR_ON(sgt[i].extension);
+
+			dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
+				       sgt[i].length, dma_dir);
+		}
+
+		/* Free the page frag that we allocated on Tx */
+		put_page(virt_to_head_page(sgt));
+	} else {
+		dma_unmap_single(dpa_bp->dev, addr,
+				 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+	}
+
+	return skb;
+}
+
+/* Build a linear skb around the received buffer.
+ * We are guaranteed there is enough room at the end of the data buffer to
+ * accommodate the shared info area of the skb.
+ */
+static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
+					const struct qm_fd *fd)
+{
+	struct sk_buff *skb = NULL, **skbh;
+	ssize_t fd_off = dpa_fd_offset(fd);
+	dma_addr_t addr = qm_fd_addr(fd);
+	void *vaddr;
+
+	vaddr = phys_to_virt(addr);
+	DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+	/* Retrieve the skb and adjust data and tail pointers, to make sure
+	 * forwarded skbs will have enough space on Tx if extra headers
+	 * are added.
+	 */
+	DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
+
+	DPA_ERR_ON(fd_off != priv->rx_headroom);
+	skb_reserve(skb, fd_off);
+	skb_put(skb, dpa_fd_length(fd));
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	return skb;
+}
+
+/* Build an skb with the data of the first S/G entry in the linear portion and
+ * the rest of the frame as skb fragments.
+ *
+ * The page fragment holding the S/G Table is recycled here.
+ */
+static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
+				    const struct qm_fd *fd,
+				    int *count_ptr)
+{
+	const struct qm_sg_entry *sgt;
+	dma_addr_t addr = qm_fd_addr(fd);
+	ssize_t fd_off = dpa_fd_offset(fd);
+	dma_addr_t sg_addr;
+	void *vaddr, *sg_vaddr;
+	struct dpa_bp *dpa_bp;
+	struct page *page, *head_page;
+	int frag_offset, frag_len;
+	int page_offset;
+	int i;
+	struct sk_buff *skb = NULL, *skb_tmp, **skbh;
+
+	vaddr = phys_to_virt(addr);
+	DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+	dpa_bp = priv->dpa_bp;
+	/* Iterate through the SGT entries and add data buffers to the skb */
+	sgt = vaddr + fd_off;
+	for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
+		/* Extension bit is not supported */
+		DPA_ERR_ON(sgt[i].extension);
+
+		/* We use a single global Rx pool */
+		DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
+
+		sg_addr = qm_sg_addr(&sgt[i]);
+		sg_vaddr = phys_to_virt(sg_addr);
+		DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+				       SMP_CACHE_BYTES));
+
+		dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
+				 DMA_BIDIRECTIONAL);
+		if (i == 0) {
+			DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
+			DPA_ERR_ON(skb->head != sg_vaddr);
+
+			skb->ip_summed = CHECKSUM_NONE;
+
+			/* Make sure forwarded skbs will have enough space
+			 * on Tx, if extra headers are added.
+			 */
+			DPA_ERR_ON(fd_off != priv->rx_headroom);
+			skb_reserve(skb, fd_off);
+			skb_put(skb, sgt[i].length);
+		} else {
+			/* Not the first S/G entry; all data from buffer will
+			 * be added in an skb fragment; fragment index is offset
+			 * by one since first S/G entry was incorporated in the
+			 * linear part of the skb.
+			 *
+			 * Caution: 'page' may be a tail page.
+			 */
+			DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
+			page = virt_to_page(sg_vaddr);
+			head_page = virt_to_head_page(sg_vaddr);
+
+			/* Free (only) the skbuff shell because its data buffer
+			 * is already a frag in the main skb.
+			 */
+			get_page(head_page);
+			dev_kfree_skb(skb_tmp);
+
+			/* Compute offset in (possibly tail) page */
+			page_offset = ((unsigned long)sg_vaddr &
+					(PAGE_SIZE - 1)) +
+				(page_address(page) - page_address(head_page));
+			/* page_offset only refers to the beginning of sgt[i];
+			 * but the buffer itself may have an internal offset.
+			 */
+			frag_offset = sgt[i].offset + page_offset;
+			frag_len = sgt[i].length;
+			/* skb_add_rx_frag() does no checking on the page; if
+			 * we pass it a tail page, we'll end up with
+			 * bad page accounting and eventually with segafults.
+			 */
+			skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
+					frag_len, dpa_bp->size);
+		}
+		/* Update the pool count for the current {cpu x bpool} */
+		(*count_ptr)--;
+
+		if (sgt[i].final)
+			break;
+	}
+	WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
+
+	/* recycle the SGT fragment */
+	DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+	dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
+	return skb;
+}
+
+void _dpa_rx(struct net_device *net_dev,
+	     struct qman_portal *portal,
+	     const struct dpa_priv_s *priv,
+	     struct dpa_percpu_priv_s *percpu_priv,
+	     const struct qm_fd *fd,
+	     u32 fqid,
+	     int *count_ptr)
+{
+	struct dpa_bp *dpa_bp;
+	struct sk_buff *skb;
+	dma_addr_t addr = qm_fd_addr(fd);
+	u32 fd_status = fd->status;
+	unsigned int skb_len;
+	struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
+
+	if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
+		if (net_ratelimit())
+			netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
+				   fd_status & FM_FD_STAT_RX_ERRORS);
+
+		percpu_stats->rx_errors++;
+		goto _release_frame;
+	}
+
+	dpa_bp = priv->dpa_bp;
+	DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
+
+	/* prefetch the first 64 bytes of the frame or the SGT start */
+	dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
+	prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
+
+	/* The only FD types that we may receive are contig and S/G */
+	DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
+
+	if (likely(fd->format == qm_fd_contig))
+		skb = contig_fd_to_skb(priv, fd);
+	else
+		skb = sg_fd_to_skb(priv, fd, count_ptr);
+
+	/* Account for either the contig buffer or the SGT buffer (depending on
+	 * which case we were in) having been removed from the pool.
+	 */
+	(*count_ptr)--;
+	skb->protocol = eth_type_trans(skb, net_dev);
+
+	/* IP Reassembled frames are allowed to be larger than MTU */
+	if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
+		     !(fd_status & FM_FD_IPR))) {
+		percpu_stats->rx_dropped++;
+		goto drop_bad_frame;
+	}
+
+	skb_len = skb->len;
+
+	if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+		goto packet_dropped;
+
+	percpu_stats->rx_packets++;
+	percpu_stats->rx_bytes += skb_len;
+
+packet_dropped:
+	return;
+
+drop_bad_frame:
+	dev_kfree_skb(skb);
+	return;
+
+_release_frame:
+	dpa_fd_release(net_dev, fd);
+}
+
+static int skb_to_contig_fd(struct dpa_priv_s *priv,
+			    struct sk_buff *skb, struct qm_fd *fd,
+			    int *count_ptr, int *offset)
+{
+	struct sk_buff **skbh;
+	dma_addr_t addr;
+	struct dpa_bp *dpa_bp = priv->dpa_bp;
+	struct net_device *net_dev = priv->net_dev;
+	int err;
+	enum dma_data_direction dma_dir;
+	unsigned char *buffer_start;
+
+	{
+		/* We are guaranteed to have at least tx_headroom bytes
+		 * available, so just use that for offset.
+		 */
+		fd->bpid = 0xff;
+		buffer_start = skb->data - priv->tx_headroom;
+		fd->offset = priv->tx_headroom;
+		dma_dir = DMA_TO_DEVICE;
+
+		DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+	}
+
+	/* Enable L3/L4 hardware checksum computation.
+	 *
+	 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+	 * need to write into the skb.
+	 */
+	err = dpa_enable_tx_csum(priv, skb, fd,
+				 ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
+	if (unlikely(err < 0)) {
+		if (net_ratelimit())
+			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+				  err);
+		return err;
+	}
+
+	/* Fill in the rest of the FD fields */
+	fd->format = qm_fd_contig;
+	fd->length20 = skb->len;
+	fd->cmd |= FM_FD_CMD_FCO;
+
+	/* Map the entire buffer size that may be seen by FMan, but no more */
+	addr = dma_map_single(dpa_bp->dev, skbh,
+			      skb_tail_pointer(skb) - buffer_start, dma_dir);
+	if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+		if (net_ratelimit())
+			netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
+		return -EINVAL;
+	}
+	fd->addr_hi = (u8)upper_32_bits(addr);
+	fd->addr_lo = lower_32_bits(addr);
+
+	return 0;
+}
+
+static int skb_to_sg_fd(struct dpa_priv_s *priv,
+			struct sk_buff *skb, struct qm_fd *fd)
+{
+	struct dpa_bp *dpa_bp = priv->dpa_bp;
+	dma_addr_t addr;
+	struct sk_buff **skbh;
+	struct net_device *net_dev = priv->net_dev;
+	int err;
+
+	struct qm_sg_entry *sgt;
+	void *sgt_buf;
+	void *buffer_start;
+	skb_frag_t *frag;
+	int i, j;
+	const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+	const int nr_frags = skb_shinfo(skb)->nr_frags;
+
+	fd->format = qm_fd_sg;
+
+	/* get a page frag to store the SGTable */
+	sgt_buf = netdev_alloc_frag(priv->tx_headroom +
+		sizeof(struct qm_sg_entry) * (1 + nr_frags));
+	if (unlikely(!sgt_buf)) {
+		netdev_err(net_dev, "netdev_alloc_frag() failed\n");
+		return -ENOMEM;
+	}
+
+	/* Enable L3/L4 hardware checksum computation.
+	 *
+	 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
+	 * need to write into the skb.
+	 */
+	err = dpa_enable_tx_csum(priv, skb, fd,
+				 sgt_buf + DPA_TX_PRIV_DATA_SIZE);
+	if (unlikely(err < 0)) {
+		if (net_ratelimit())
+			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
+				  err);
+		goto csum_failed;
+	}
+
+	sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+	sgt[0].bpid = 0xff;
+	sgt[0].offset = 0;
+	sgt[0].length = cpu_to_be32(skb_headlen(skb));
+	sgt[0].extension = 0;
+	sgt[0].final = 0;
+	addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
+	if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+		dev_err(dpa_bp->dev, "DMA mapping failed");
+		err = -EINVAL;
+		goto sg0_map_failed;
+	}
+	sgt[0].addr_hi = (u8)upper_32_bits(addr);
+	sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
+
+	/* populate the rest of SGT entries */
+	for (i = 1; i <= nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i - 1];
+		sgt[i].bpid = 0xff;
+		sgt[i].offset = 0;
+		sgt[i].length = cpu_to_be32(frag->size);
+		sgt[i].extension = 0;
+		sgt[i].final = 0;
+
+		DPA_ERR_ON(!skb_frag_page(frag));
+		addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
+					dma_dir);
+		if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+			dev_err(dpa_bp->dev, "DMA mapping failed");
+			err = -EINVAL;
+			goto sg_map_failed;
+		}
+
+		/* keep the offset in the address */
+		sgt[i].addr_hi = (u8)upper_32_bits(addr);
+		sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
+	}
+	sgt[i - 1].final = 1;
+
+	fd->length20 = skb->len;
+	fd->offset = priv->tx_headroom;
+
+	/* DMA map the SGT page */
+	buffer_start = (void *)sgt - priv->tx_headroom;
+	DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+
+	addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
+			      sizeof(struct qm_sg_entry) * (1 + nr_frags),
+			      dma_dir);
+	if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
+		dev_err(dpa_bp->dev, "DMA mapping failed");
+		err = -EINVAL;
+		goto sgt_map_failed;
+	}
+
+	fd->bpid = 0xff;
+	fd->cmd |= FM_FD_CMD_FCO;
+	fd->addr_hi = (u8)upper_32_bits(addr);
+	fd->addr_lo = lower_32_bits(addr);
+
+	return 0;
+
+sgt_map_failed:
+sg_map_failed:
+	for (j = 0; j < i; j++)
+		dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
+			       cpu_to_be32(sgt[j].length), dma_dir);
+sg0_map_failed:
+csum_failed:
+	put_page(virt_to_head_page(sgt_buf));
+
+	return err;
+}
+
+int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+	struct dpa_priv_s *priv;
+	struct qm_fd fd;
+	struct dpa_percpu_priv_s *percpu_priv;
+	struct rtnl_link_stats64 *percpu_stats;
+	int err = 0;
+	const int queue_mapping = dpa_get_queue_mapping(skb);
+	bool nonlinear = skb_is_nonlinear(skb);
+	int *countptr, offset = 0;
+
+	priv = netdev_priv(net_dev);
+	/* Non-migratable context, safe to use raw_cpu_ptr */
+	percpu_priv = raw_cpu_ptr(priv->percpu_priv);
+	percpu_stats = &percpu_priv->stats;
+	countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
+
+	clear_fd(&fd);
+
+	if (!nonlinear) {
+		/* We're going to store the skb backpointer at the beginning
+		 * of the data buffer, so we need a privately owned skb
+		 *
+		 * We've made sure skb is not shared in dev->priv_flags,
+		 * we need to verify the skb head is not cloned
+		 */
+		if (skb_cow_head(skb, priv->tx_headroom))
+			goto enomem;
+
+		BUG_ON(skb_is_nonlinear(skb));
+	}
+
+	/* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
+	 * make sure we don't feed FMan with more fragments than it supports.
+	 * Btw, we're using the first sgt entry to store the linear part of
+	 * the skb, so we're one extra frag short.
+	 */
+	if (nonlinear &&
+	    likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
+		/* Just create a S/G fd based on the skb */
+		err = skb_to_sg_fd(priv, skb, &fd);
+		percpu_priv->tx_frag_skbuffs++;
+	} else {
+		/* If the egress skb contains more fragments than we support
+		 * we have no choice but to linearize it ourselves.
+		 */
+		if (unlikely(nonlinear) && __skb_linearize(skb))
+			goto enomem;
+
+		/* Finally, create a contig FD from this skb */
+		err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
+	}
+	if (unlikely(err < 0))
+		goto skb_to_fd_failed;
+
+	if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+		return NETDEV_TX_OK;
+
+	/* dpa_xmit failed */
+	if (fd.bpid != 0xff) {
+		(*countptr)--;
+		dpa_fd_release(net_dev, &fd);
+		percpu_stats->tx_errors++;
+		return NETDEV_TX_OK;
+	}
+	_dpa_cleanup_tx_fd(priv, &fd);
+skb_to_fd_failed:
+enomem:
+	percpu_stats->tx_errors++;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
new file mode 100644
index 0000000..3edc70c
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -0,0 +1,171 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/io.h>
+#include <linux/of_net.h>
+#include "dpaa_eth.h"
+#include "mac.h"
+
+static ssize_t dpaa_eth_show_addr(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+	struct mac_device *mac_dev = priv->mac_dev;
+
+	if (mac_dev)
+		return sprintf(buf, "%llx",
+				(unsigned long long)mac_dev->res->start);
+	else
+		return sprintf(buf, "none");
+}
+
+static ssize_t dpaa_eth_show_fqids(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+	ssize_t bytes = 0;
+	int i = 0;
+	char *str;
+	struct dpa_fq *fq;
+	struct dpa_fq *tmp;
+	struct dpa_fq *prev = NULL;
+	u32 first_fqid = 0;
+	u32 last_fqid = 0;
+	char *prevstr = NULL;
+
+	list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) {
+		switch (fq->fq_type) {
+		case FQ_TYPE_RX_DEFAULT:
+			str = "Rx default";
+			break;
+		case FQ_TYPE_RX_ERROR:
+			str = "Rx error";
+			break;
+		case FQ_TYPE_TX_CONFIRM:
+			str = "Tx default confirmation";
+			break;
+		case FQ_TYPE_TX_CONF_MQ:
+			str = "Tx confirmation (mq)";
+			break;
+		case FQ_TYPE_TX_ERROR:
+			str = "Tx error";
+			break;
+		case FQ_TYPE_TX:
+			str = "Tx";
+			break;
+		default:
+			str = "Unknown";
+		}
+
+		if (prev && (abs(fq->fqid - prev->fqid) != 1 ||
+			     str != prevstr)) {
+			if (last_fqid == first_fqid)
+				bytes += sprintf(buf + bytes,
+					"%s: %d\n", prevstr, prev->fqid);
+			else
+				bytes += sprintf(buf + bytes,
+					"%s: %d - %d\n", prevstr,
+					first_fqid, last_fqid);
+		}
+
+		if (prev && abs(fq->fqid - prev->fqid) == 1 &&
+		    str == prevstr) {
+			last_fqid = fq->fqid;
+		} else {
+			first_fqid = fq->fqid;
+			last_fqid = fq->fqid;
+		}
+
+		prev = fq;
+		prevstr = str;
+		i++;
+	}
+
+	if (prev) {
+		if (last_fqid == first_fqid)
+			bytes += sprintf(buf + bytes, "%s: %d\n", prevstr,
+					prev->fqid);
+		else
+			bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr,
+					first_fqid, last_fqid);
+	}
+
+	return bytes;
+}
+
+static ssize_t dpaa_eth_show_bpids(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	ssize_t bytes = 0;
+	struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev));
+	struct dpa_bp *dpa_bp = priv->dpa_bp;
+	int i = 0;
+
+	for (i = 0; i < priv->bp_count; i++)
+		bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+				dpa_bp[i].bpid);
+
+	return bytes;
+}
+
+static struct device_attribute dpaa_eth_attrs[] = {
+	__ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL),
+	__ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL),
+	__ATTR(bpids, S_IRUGO, dpaa_eth_show_bpids, NULL),
+};
+
+void dpaa_eth_sysfs_init(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+		if (device_create_file(dev, &dpaa_eth_attrs[i])) {
+			dev_err(dev, "Error creating sysfs file\n");
+			while (i > 0)
+				device_remove_file(dev, &dpaa_eth_attrs[--i]);
+			return;
+		}
+}
+
+void dpaa_eth_sysfs_remove(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpaa_eth_attrs); i++)
+		device_remove_file(dev, &dpaa_eth_attrs[i]);
+}
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
new file mode 100644
index 0000000..46eca27
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -0,0 +1,143 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __rtems__
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM	dpaa_eth
+
+#if !defined(_DPAA_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa_eth.h"
+#include <linux/tracepoint.h>
+
+#define fd_format_name(format)	{ qm_fd_##format, #format }
+#define fd_format_list	\
+	fd_format_name(contig),	\
+	fd_format_name(sg)
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor and the FQ on which it was
+ * transmitted/received.
+ */
+DECLARE_EVENT_CLASS(dpaa_eth_fd,
+	/* Trace function prototype */
+	TP_PROTO(struct net_device *netdev,
+		 struct qman_fq *fq,
+		 const struct qm_fd *fd),
+
+	/* Repeat argument list here */
+	TP_ARGS(netdev, fq, fd),
+
+	/* A structure containing the relevant information we want to record.
+	 * Declare name and type for each normal element, name, type and size
+	 * for arrays. Use __string for variable length strings.
+	 */
+	TP_STRUCT__entry(
+		__field(u32,	fqid)
+		__field(u64,	fd_addr)
+		__field(u8,	fd_format)
+		__field(u16,	fd_offset)
+		__field(u32,	fd_length)
+		__field(u32,	fd_status)
+		__string(name,	netdev->name)
+	),
+
+	/* The function that assigns values to the above declared fields */
+	TP_fast_assign(
+		__entry->fqid = fq->fqid;
+		__entry->fd_addr = qm_fd_addr_get64(fd);
+		__entry->fd_format = fd->format;
+		__entry->fd_offset = dpa_fd_offset(fd);
+		__entry->fd_length = dpa_fd_length(fd);
+		__entry->fd_status = fd->status;
+		__assign_str(name, netdev->name);
+	),
+
+	/* This is what gets printed when the trace event is triggered */
+	TP_printk("[%s] fqid=%d, fd: addr=0x%llx, format=%s, off=%u, len=%u, status=0x%08x",
+		  __get_str(name), __entry->fqid, __entry->fd_addr,
+		  __print_symbolic(__entry->fd_format, fd_format_list),
+		  __entry->fd_offset, __entry->fd_length, __entry->fd_status)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_fd,
+
+	TP_PROTO(struct net_device *netdev,
+		 struct qman_fq *fq,
+		 const struct qm_fd *fd),
+
+	TP_ARGS(netdev, fq, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_rx_fd,
+
+	TP_PROTO(struct net_device *netdev,
+		 struct qman_fq *fq,
+		 const struct qm_fd *fd),
+
+	TP_ARGS(netdev, fq, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa_eth_fd, dpa_tx_conf_fd,
+
+	TP_PROTO(struct net_device *netdev,
+		 struct qman_fq *fq,
+		 const struct qm_fd *fd),
+
+	TP_ARGS(netdev, fq, fd)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE	dpaa_eth_trace
+#include <trace/define_trace.h>
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
new file mode 100644
index 0000000..edf8d66
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -0,0 +1,417 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+
+#include "dpaa_eth.h"
+#include "mac.h"
+#include "dpaa_eth_common.h"
+
+static const char dpa_stats_percpu[][ETH_GSTRING_LEN] = {
+	"interrupts",
+	"rx packets",
+	"tx packets",
+	"tx confirm",
+	"tx S/G",
+	"tx error",
+	"rx error",
+	"bp count"
+};
+
+static char dpa_stats_global[][ETH_GSTRING_LEN] = {
+	/* dpa rx errors */
+	"rx dma error",
+	"rx frame physical error",
+	"rx frame size error",
+	"rx header error",
+
+	/* demultiplexing errors */
+	"qman cg_tdrop",
+	"qman wred",
+	"qman error cond",
+	"qman early window",
+	"qman late window",
+	"qman fq tdrop",
+	"qman fq retired",
+	"qman orp disabled",
+
+	/* congestion related stats */
+	"congestion time (ms)",
+	"entered congestion",
+	"congested (0/1)"
+};
+
+#define DPA_STATS_PERCPU_LEN ARRAY_SIZE(dpa_stats_percpu)
+#define DPA_STATS_GLOBAL_LEN ARRAY_SIZE(dpa_stats_global)
+
+static int dpa_get_settings(struct net_device *net_dev,
+			    struct ethtool_cmd *et_cmd)
+{
+	int err;
+	struct dpa_priv_s *priv;
+
+	priv = netdev_priv(net_dev);
+
+	if (!priv->mac_dev->phy_dev) {
+		netdev_dbg(net_dev, "phy device not initialized\n");
+		return 0;
+	}
+
+	err = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd);
+
+	return err;
+}
+
+static int dpa_set_settings(struct net_device *net_dev,
+			    struct ethtool_cmd *et_cmd)
+{
+	int err;
+	struct dpa_priv_s *priv;
+
+	priv = netdev_priv(net_dev);
+
+	if (!priv->mac_dev->phy_dev) {
+		netdev_err(net_dev, "phy device not initialized\n");
+		return -ENODEV;
+	}
+
+	err = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd);
+	if (err < 0)
+		netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err);
+
+	return err;
+}
+
+static void dpa_get_drvinfo(struct net_device *net_dev,
+			    struct ethtool_drvinfo *drvinfo)
+{
+	int len;
+
+	strlcpy(drvinfo->driver, KBUILD_MODNAME,
+		sizeof(drvinfo->driver));
+	len = snprintf(drvinfo->version, sizeof(drvinfo->version),
+		       "%X", 0);
+	len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		       "%X", 0);
+
+	if (len >= sizeof(drvinfo->fw_version)) {
+		/* Truncated output */
+		netdev_notice(net_dev, "snprintf() = %d\n", len);
+	}
+	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+		sizeof(drvinfo->bus_info));
+}
+
+static u32 dpa_get_msglevel(struct net_device *net_dev)
+{
+	return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable;
+}
+
+static void dpa_set_msglevel(struct net_device *net_dev,
+			     u32 msg_enable)
+{
+	((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable;
+}
+
+static int dpa_nway_reset(struct net_device *net_dev)
+{
+	int err;
+	struct dpa_priv_s *priv;
+
+	priv = netdev_priv(net_dev);
+
+	if (!priv->mac_dev->phy_dev) {
+		netdev_err(net_dev, "phy device not initialized\n");
+		return -ENODEV;
+	}
+
+	err = 0;
+	if (priv->mac_dev->phy_dev->autoneg) {
+		err = phy_start_aneg(priv->mac_dev->phy_dev);
+		if (err < 0)
+			netdev_err(net_dev, "phy_start_aneg() = %d\n",
+				   err);
+	}
+
+	return err;
+}
+
+static void dpa_get_pauseparam(struct net_device *net_dev,
+			       struct ethtool_pauseparam *epause)
+{
+	struct dpa_priv_s *priv;
+	struct mac_device *mac_dev;
+	struct phy_device *phy_dev;
+
+	priv = netdev_priv(net_dev);
+	mac_dev = priv->mac_dev;
+
+	phy_dev = mac_dev->phy_dev;
+	if (!phy_dev) {
+		netdev_err(net_dev, "phy device not initialized\n");
+		return;
+	}
+
+	epause->autoneg = mac_dev->autoneg_pause;
+	epause->rx_pause = mac_dev->rx_pause_active;
+	epause->tx_pause = mac_dev->tx_pause_active;
+}
+
+static int dpa_set_pauseparam(struct net_device *net_dev,
+			      struct ethtool_pauseparam *epause)
+{
+	struct dpa_priv_s *priv;
+	struct mac_device *mac_dev;
+	struct phy_device *phy_dev;
+	int err;
+	u32 newadv, oldadv;
+	bool rx_pause, tx_pause;
+
+	priv = netdev_priv(net_dev);
+	mac_dev = priv->mac_dev;
+
+	phy_dev = mac_dev->phy_dev;
+	if (!phy_dev) {
+		netdev_err(net_dev, "phy device not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!(phy_dev->supported & SUPPORTED_Pause) ||
+	    (!(phy_dev->supported & SUPPORTED_Asym_Pause) &&
+	    (epause->rx_pause != epause->tx_pause)))
+		return -EINVAL;
+
+	/* The MAC should know how to handle PAUSE frame autonegotiation before
+	 * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
+	 * settings.
+	 */
+	mac_dev->autoneg_pause = !!epause->autoneg;
+	mac_dev->rx_pause_req = !!epause->rx_pause;
+	mac_dev->tx_pause_req = !!epause->tx_pause;
+
+	/* Determine the sym/asym advertised PAUSE capabilities from the desired
+	 * rx/tx pause settings.
+	 */
+	newadv = 0;
+	if (epause->rx_pause)
+		newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+	if (epause->tx_pause)
+		newadv |= ADVERTISED_Asym_Pause;
+
+	oldadv = phy_dev->advertising &
+			(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+	/* If there are differences between the old and the new advertised
+	 * values, restart PHY autonegotiation and advertise the new values.
+	 */
+	if (oldadv != newadv) {
+		phy_dev->advertising &= ~(ADVERTISED_Pause
+				| ADVERTISED_Asym_Pause);
+		phy_dev->advertising |= newadv;
+		if (phy_dev->autoneg) {
+			err = phy_start_aneg(phy_dev);
+			if (err < 0)
+				netdev_err(net_dev, "phy_start_aneg() = %d\n",
+					   err);
+		}
+	}
+
+	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+	if (err < 0)
+		netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
+
+	return err;
+}
+
+static int dpa_get_sset_count(struct net_device *net_dev, int type)
+{
+	unsigned int total_stats, num_stats;
+
+	num_stats   = num_online_cpus() + 1;
+	total_stats = num_stats * DPA_STATS_PERCPU_LEN + DPA_STATS_GLOBAL_LEN;
+
+	switch (type) {
+	case ETH_SS_STATS:
+		return total_stats;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void copy_stats(struct dpa_percpu_priv_s *percpu_priv, int num_cpus,
+			int crr_cpu, u64 bp_count, u64 *data)
+{
+	int num_values = num_cpus + 1;
+	int crr = 0;
+
+	/* update current CPU's stats and also add them to the total values */
+	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
+	data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
+	data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
+	data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
+
+	data[crr * num_values + crr_cpu] = bp_count;
+	data[crr++ * num_values + num_cpus] += bp_count;
+}
+
+static void dpa_get_ethtool_stats(struct net_device *net_dev,
+		struct ethtool_stats *stats, u64 *data)
+{
+	u64 bp_count, cg_time, cg_num, cg_status;
+	struct dpa_percpu_priv_s *percpu_priv;
+	struct qm_mcr_querycgr query_cgr;
+	struct dpa_rx_errors rx_errors;
+	struct dpa_ern_cnt ern_cnt;
+	struct dpa_priv_s *priv;
+	unsigned int num_cpus, offset;
+	struct dpa_bp *dpa_bp;
+	int total_stats, i;
+
+	total_stats = dpa_get_sset_count(net_dev, ETH_SS_STATS);
+	priv     = netdev_priv(net_dev);
+	dpa_bp   = priv->dpa_bp;
+	num_cpus = num_online_cpus();
+	bp_count = 0;
+
+	memset(&rx_errors, 0, sizeof(struct dpa_rx_errors));
+	memset(&ern_cnt, 0, sizeof(struct dpa_ern_cnt));
+	memset(data, 0, total_stats * sizeof(u64));
+
+	for_each_online_cpu(i) {
+		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
+
+		if (dpa_bp->percpu_count)
+			bp_count = *(per_cpu_ptr(dpa_bp->percpu_count, i));
+
+		rx_errors.dme += percpu_priv->rx_errors.dme;
+		rx_errors.fpe += percpu_priv->rx_errors.fpe;
+		rx_errors.fse += percpu_priv->rx_errors.fse;
+		rx_errors.phe += percpu_priv->rx_errors.phe;
+
+		ern_cnt.cg_tdrop     += percpu_priv->ern_cnt.cg_tdrop;
+		ern_cnt.wred         += percpu_priv->ern_cnt.wred;
+		ern_cnt.err_cond     += percpu_priv->ern_cnt.err_cond;
+		ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
+		ern_cnt.late_window  += percpu_priv->ern_cnt.late_window;
+		ern_cnt.fq_tdrop     += percpu_priv->ern_cnt.fq_tdrop;
+		ern_cnt.fq_retired   += percpu_priv->ern_cnt.fq_retired;
+		ern_cnt.orp_zero     += percpu_priv->ern_cnt.orp_zero;
+
+		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
+	}
+
+	offset = (num_cpus + 1) * DPA_STATS_PERCPU_LEN;
+	memcpy(data + offset, &rx_errors, sizeof(struct dpa_rx_errors));
+
+	offset += sizeof(struct dpa_rx_errors) / sizeof(u64);
+	memcpy(data + offset, &ern_cnt, sizeof(struct dpa_ern_cnt));
+
+	/* gather congestion related counters */
+	cg_num    = 0;
+	cg_status = 0;
+	cg_time   = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
+	if (qman_query_cgr(&priv->cgr_data.cgr, &query_cgr) == 0) {
+		cg_num    = priv->cgr_data.cgr_congested_count;
+		cg_status = query_cgr.cgr.cs;
+
+		/* reset congestion stats (like QMan API does */
+		priv->cgr_data.congested_jiffies   = 0;
+		priv->cgr_data.cgr_congested_count = 0;
+	}
+
+	offset += sizeof(struct dpa_ern_cnt) / sizeof(u64);
+	data[offset++] = cg_time;
+	data[offset++] = cg_num;
+	data[offset++] = cg_status;
+}
+
+static void dpa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data)
+{
+	unsigned int i, j, num_cpus, size;
+	char string_cpu[ETH_GSTRING_LEN];
+	u8 *strings;
+
+	strings   = data;
+	num_cpus  = num_online_cpus();
+	size      = DPA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+
+	for (i = 0; i < DPA_STATS_PERCPU_LEN; i++) {
+		for (j = 0; j < num_cpus; j++) {
+			snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
+				dpa_stats_percpu[i], j);
+			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+			strings += ETH_GSTRING_LEN;
+		}
+		snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
+			dpa_stats_percpu[i]);
+		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+		strings += ETH_GSTRING_LEN;
+	}
+	memcpy(strings, dpa_stats_global, size);
+}
+
+const struct ethtool_ops dpa_ethtool_ops = {
+	.get_settings = dpa_get_settings,
+	.set_settings = dpa_set_settings,
+	.get_drvinfo = dpa_get_drvinfo,
+	.get_msglevel = dpa_get_msglevel,
+	.set_msglevel = dpa_set_msglevel,
+	.nway_reset = dpa_nway_reset,
+	.get_pauseparam = dpa_get_pauseparam,
+	.set_pauseparam = dpa_set_pauseparam,
+	.get_link = ethtool_op_get_link,
+	.get_sset_count = dpa_get_sset_count,
+	.get_ethtool_stats = dpa_get_ethtool_stats,
+	.get_strings = dpa_get_strings,
+};
diff --git a/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h b/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h
new file mode 100644
index 0000000..92f2e87
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/crc_mac_addr_ext.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Define a macro that calculate the crc value of an Ethernet MAC address
+ * (48 bitd address)
+ */
+
+#ifndef __crc_mac_addr_ext_h
+#define __crc_mac_addr_ext_h
+
+#include <linux/types.h>
+
+static u32 crc_table[256] = {
+	0x00000000,
+	0x77073096,
+	0xee0e612c,
+	0x990951ba,
+	0x076dc419,
+	0x706af48f,
+	0xe963a535,
+	0x9e6495a3,
+	0x0edb8832,
+	0x79dcb8a4,
+	0xe0d5e91e,
+	0x97d2d988,
+	0x09b64c2b,
+	0x7eb17cbd,
+	0xe7b82d07,
+	0x90bf1d91,
+	0x1db71064,
+	0x6ab020f2,
+	0xf3b97148,
+	0x84be41de,
+	0x1adad47d,
+	0x6ddde4eb,
+	0xf4d4b551,
+	0x83d385c7,
+	0x136c9856,
+	0x646ba8c0,
+	0xfd62f97a,
+	0x8a65c9ec,
+	0x14015c4f,
+	0x63066cd9,
+	0xfa0f3d63,
+	0x8d080df5,
+	0x3b6e20c8,
+	0x4c69105e,
+	0xd56041e4,
+	0xa2677172,
+	0x3c03e4d1,
+	0x4b04d447,
+	0xd20d85fd,
+	0xa50ab56b,
+	0x35b5a8fa,
+	0x42b2986c,
+	0xdbbbc9d6,
+	0xacbcf940,
+	0x32d86ce3,
+	0x45df5c75,
+	0xdcd60dcf,
+	0xabd13d59,
+	0x26d930ac,
+	0x51de003a,
+	0xc8d75180,
+	0xbfd06116,
+	0x21b4f4b5,
+	0x56b3c423,
+	0xcfba9599,
+	0xb8bda50f,
+	0x2802b89e,
+	0x5f058808,
+	0xc60cd9b2,
+	0xb10be924,
+	0x2f6f7c87,
+	0x58684c11,
+	0xc1611dab,
+	0xb6662d3d,
+	0x76dc4190,
+	0x01db7106,
+	0x98d220bc,
+	0xefd5102a,
+	0x71b18589,
+	0x06b6b51f,
+	0x9fbfe4a5,
+	0xe8b8d433,
+	0x7807c9a2,
+	0x0f00f934,
+	0x9609a88e,
+	0xe10e9818,
+	0x7f6a0dbb,
+	0x086d3d2d,
+	0x91646c97,
+	0xe6635c01,
+	0x6b6b51f4,
+	0x1c6c6162,
+	0x856530d8,
+	0xf262004e,
+	0x6c0695ed,
+	0x1b01a57b,
+	0x8208f4c1,
+	0xf50fc457,
+	0x65b0d9c6,
+	0x12b7e950,
+	0x8bbeb8ea,
+	0xfcb9887c,
+	0x62dd1ddf,
+	0x15da2d49,
+	0x8cd37cf3,
+	0xfbd44c65,
+	0x4db26158,
+	0x3ab551ce,
+	0xa3bc0074,
+	0xd4bb30e2,
+	0x4adfa541,
+	0x3dd895d7,
+	0xa4d1c46d,
+	0xd3d6f4fb,
+	0x4369e96a,
+	0x346ed9fc,
+	0xad678846,
+	0xda60b8d0,
+	0x44042d73,
+	0x33031de5,
+	0xaa0a4c5f,
+	0xdd0d7cc9,
+	0x5005713c,
+	0x270241aa,
+	0xbe0b1010,
+	0xc90c2086,
+	0x5768b525,
+	0x206f85b3,
+	0xb966d409,
+	0xce61e49f,
+	0x5edef90e,
+	0x29d9c998,
+	0xb0d09822,
+	0xc7d7a8b4,
+	0x59b33d17,
+	0x2eb40d81,
+	0xb7bd5c3b,
+	0xc0ba6cad,
+	0xedb88320,
+	0x9abfb3b6,
+	0x03b6e20c,
+	0x74b1d29a,
+	0xead54739,
+	0x9dd277af,
+	0x04db2615,
+	0x73dc1683,
+	0xe3630b12,
+	0x94643b84,
+	0x0d6d6a3e,
+	0x7a6a5aa8,
+	0xe40ecf0b,
+	0x9309ff9d,
+	0x0a00ae27,
+	0x7d079eb1,
+	0xf00f9344,
+	0x8708a3d2,
+	0x1e01f268,
+	0x6906c2fe,
+	0xf762575d,
+	0x806567cb,
+	0x196c3671,
+	0x6e6b06e7,
+	0xfed41b76,
+	0x89d32be0,
+	0x10da7a5a,
+	0x67dd4acc,
+	0xf9b9df6f,
+	0x8ebeeff9,
+	0x17b7be43,
+	0x60b08ed5,
+	0xd6d6a3e8,
+	0xa1d1937e,
+	0x38d8c2c4,
+	0x4fdff252,
+	0xd1bb67f1,
+	0xa6bc5767,
+	0x3fb506dd,
+	0x48b2364b,
+	0xd80d2bda,
+	0xaf0a1b4c,
+	0x36034af6,
+	0x41047a60,
+	0xdf60efc3,
+	0xa867df55,
+	0x316e8eef,
+	0x4669be79,
+	0xcb61b38c,
+	0xbc66831a,
+	0x256fd2a0,
+	0x5268e236,
+	0xcc0c7795,
+	0xbb0b4703,
+	0x220216b9,
+	0x5505262f,
+	0xc5ba3bbe,
+	0xb2bd0b28,
+	0x2bb45a92,
+	0x5cb36a04,
+	0xc2d7ffa7,
+	0xb5d0cf31,
+	0x2cd99e8b,
+	0x5bdeae1d,
+	0x9b64c2b0,
+	0xec63f226,
+	0x756aa39c,
+	0x026d930a,
+	0x9c0906a9,
+	0xeb0e363f,
+	0x72076785,
+	0x05005713,
+	0x95bf4a82,
+	0xe2b87a14,
+	0x7bb12bae,
+	0x0cb61b38,
+	0x92d28e9b,
+	0xe5d5be0d,
+	0x7cdcefb7,
+	0x0bdbdf21,
+	0x86d3d2d4,
+	0xf1d4e242,
+	0x68ddb3f8,
+	0x1fda836e,
+	0x81be16cd,
+	0xf6b9265b,
+	0x6fb077e1,
+	0x18b74777,
+	0x88085ae6,
+	0xff0f6a70,
+	0x66063bca,
+	0x11010b5c,
+	0x8f659eff,
+	0xf862ae69,
+	0x616bffd3,
+	0x166ccf45,
+	0xa00ae278,
+	0xd70dd2ee,
+	0x4e048354,
+	0x3903b3c2,
+	0xa7672661,
+	0xd06016f7,
+	0x4969474d,
+	0x3e6e77db,
+	0xaed16a4a,
+	0xd9d65adc,
+	0x40df0b66,
+	0x37d83bf0,
+	0xa9bcae53,
+	0xdebb9ec5,
+	0x47b2cf7f,
+	0x30b5ffe9,
+	0xbdbdf21c,
+	0xcabac28a,
+	0x53b39330,
+	0x24b4a3a6,
+	0xbad03605,
+	0xcdd70693,
+	0x54de5729,
+	0x23d967bf,
+	0xb3667a2e,
+	0xc4614ab8,
+	0x5d681b02,
+	0x2a6f2b94,
+	0xb40bbe37,
+	0xc30c8ea1,
+	0x5a05df1b,
+	0x2d02ef8d
+};
+
+/* CRC calculation */
+#define GET_MAC_ADDR_CRC(addr, crc)				\
+{								\
+	u32 i;						\
+	u8  data;						\
+	crc = 0xffffffff;					\
+	for (i = 0; i < 6; i++) {				\
+		data = (u8)(addr >> ((5 - i) * 8));	\
+		crc = crc ^ data;				\
+		crc = crc_table[crc & 0xff] ^ (crc >> 8);	\
+	}							\
+}								\
+
+#endif /* __crc_mac_addr_ext_h */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.c b/linux/drivers/net/ethernet/freescale/fman/fman.c
new file mode 100644
index 0000000..5119b40
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.c
@@ -0,0 +1,2957 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+//  *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman.h"
+#include "fman_muram.h"
+#include <asm/mpc85xx.h>
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#ifdef __rtems__
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+
+/* General defines */
+#define FMAN_LIODN_TBL			64	/* size of LIODN table */
+#define MAX_NUM_OF_MACS			10
+#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS	4
+#define BASE_RX_PORTID			0x08
+#define BASE_TX_PORTID			0x28
+
+/* Modules registers offsets */
+#define BMI_OFFSET		0x00080000
+#define QMI_OFFSET		0x00080400
+#define DMA_OFFSET		0x000C2000
+#define FPM_OFFSET		0x000C3000
+#define IMEM_OFFSET		0x000C4000
+#define CGP_OFFSET		0x000DB000
+
+/* Exceptions bit map */
+#define EX_DMA_BUS_ERROR		0x80000000
+#define EX_DMA_READ_ECC			0x40000000
+#define EX_DMA_SYSTEM_WRITE_ECC	0x20000000
+#define EX_DMA_FM_WRITE_ECC		0x10000000
+#define EX_FPM_STALL_ON_TASKS		0x08000000
+#define EX_FPM_SINGLE_ECC		0x04000000
+#define EX_FPM_DOUBLE_ECC		0x02000000
+#define EX_QMI_SINGLE_ECC		0x01000000
+#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID	0x00800000
+#define EX_QMI_DOUBLE_ECC		0x00400000
+#define EX_BMI_LIST_RAM_ECC		0x00200000
+#define EX_BMI_STORAGE_PROFILE_ECC	0x00100000
+#define EX_BMI_STATISTICS_RAM_ECC	0x00080000
+#define EX_IRAM_ECC			0x00040000
+#define EX_MURAM_ECC			0x00020000
+#define EX_BMI_DISPATCH_RAM_ECC	0x00010000
+#define EX_DMA_SINGLE_PORT_ECC		0x00008000
+
+#define DFLT_EXCEPTIONS	\
+	 ((EX_DMA_BUS_ERROR)            | \
+	  (EX_DMA_READ_ECC)              | \
+	  (EX_DMA_SYSTEM_WRITE_ECC)      | \
+	  (EX_DMA_FM_WRITE_ECC)          | \
+	  (EX_FPM_STALL_ON_TASKS)        | \
+	  (EX_FPM_SINGLE_ECC)            | \
+	  (EX_FPM_DOUBLE_ECC)            | \
+	  (EX_QMI_DEQ_FROM_UNKNOWN_PORTID) | \
+	  (EX_BMI_LIST_RAM_ECC)          | \
+	  (EX_BMI_STORAGE_PROFILE_ECC)   | \
+	  (EX_BMI_STATISTICS_RAM_ECC)    | \
+	  (EX_MURAM_ECC)                 | \
+	  (EX_BMI_DISPATCH_RAM_ECC)      | \
+	  (EX_QMI_DOUBLE_ECC)            | \
+	  (EX_QMI_SINGLE_ECC))
+
+/* DMA defines */
+/* masks */
+#define DMA_MODE_AID_OR			0x20000000
+#define DMA_MODE_SBER			0x10000000
+#define DMA_MODE_BER			0x00200000
+#define DMA_MODE_ECC			0x00000020
+#define DMA_MODE_SECURE_PROT		0x00000800
+#define DMA_MODE_EMER_READ		0x00080000
+#define DMA_MODE_AXI_DBG_MASK		0x0F000000
+
+#define DMA_TRANSFER_PORTID_MASK	0xFF000000
+#define DMA_TRANSFER_TNUM_MASK		0x00FF0000
+#define DMA_TRANSFER_LIODN_MASK	0x00000FFF
+
+#define DMA_STATUS_BUS_ERR		0x08000000
+#define DMA_STATUS_READ_ECC		0x04000000
+#define DMA_STATUS_SYSTEM_WRITE_ECC	0x02000000
+#define DMA_STATUS_FM_WRITE_ECC	0x01000000
+#define DMA_STATUS_FM_SPDAT_ECC	0x00080000
+
+#define DMA_MODE_CACHE_OR_SHIFT		30
+#define DMA_MODE_AXI_DBG_SHIFT			24
+#define DMA_MODE_CEN_SHIFT			13
+#define DMA_MODE_CEN_MASK			0x00000007
+#define DMA_MODE_DBG_SHIFT			7
+#define DMA_MODE_EMER_LVL_SHIFT		6
+#define DMA_MODE_AID_MODE_SHIFT		4
+
+#define DMA_THRESH_COMMQ_SHIFT			24
+#define DMA_THRESH_READ_INT_BUF_SHIFT		16
+#define DMA_THRESH_READ_INT_BUF_MASK		0x0000003f
+#define DMA_THRESH_WRITE_INT_BUF_MASK		0x0000003f
+
+#define DMA_TRANSFER_PORTID_SHIFT		24
+#define DMA_TRANSFER_TNUM_SHIFT		16
+
+#define DMA_CAM_SIZEOF_ENTRY			0x40
+#define DMA_CAM_UNITS				8
+
+#define DMA_LIODN_SHIFT		16
+#define DMA_LIODN_BASE_MASK	0x00000FFF
+
+/* FPM defines */
+#define FPM_EV_MASK_DOUBLE_ECC		0x80000000
+#define FPM_EV_MASK_STALL		0x40000000
+#define FPM_EV_MASK_SINGLE_ECC		0x20000000
+#define FPM_EV_MASK_RELEASE_FM		0x00010000
+#define FPM_EV_MASK_DOUBLE_ECC_EN	0x00008000
+#define FPM_EV_MASK_STALL_EN		0x00004000
+#define FPM_EV_MASK_SINGLE_ECC_EN	0x00002000
+#define FPM_EV_MASK_EXTERNAL_HALT	0x00000008
+#define FPM_EV_MASK_ECC_ERR_HALT	0x00000004
+
+#define FPM_RAM_MURAM_ECC		0x00008000
+#define FPM_RAM_IRAM_ECC		0x00004000
+#define FPM_RAM_MURAM_TEST_ECC		0x20000000
+#define FPM_RAM_IRAM_TEST_ECC		0x10000000
+#define FPM_IRAM_ECC_ERR_EX_EN		0x00020000
+#define FPM_MURAM_ECC_ERR_EX_EN	0x00040000
+#define FPM_RAM_IRAM_ECC_EN		0x40000000
+#define FPM_RAM_RAMS_ECC_EN		0x80000000
+#define FPM_RAM_RAMS_ECC_EN_SRC_SEL	0x08000000
+
+#define FPM_REV1_MAJOR_MASK		0x0000FF00
+#define FPM_REV1_MINOR_MASK		0x000000FF
+
+#define FPM_DISP_LIMIT_SHIFT		24
+
+#define FPM_PRT_FM_CTL1			0x00000001
+#define FPM_PRT_FM_CTL2			0x00000002
+#define FPM_PORT_FM_CTL_PORTID_SHIFT	24
+#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT	16
+
+#define FPM_THR1_PRS_SHIFT		24
+#define FPM_THR1_KG_SHIFT		16
+#define FPM_THR1_PLCR_SHIFT		8
+#define FPM_THR1_BMI_SHIFT		0
+
+#define FPM_THR2_QMI_ENQ_SHIFT		24
+#define FPM_THR2_QMI_DEQ_SHIFT		0
+#define FPM_THR2_FM_CTL1_SHIFT		16
+#define FPM_THR2_FM_CTL2_SHIFT		8
+
+#define FPM_EV_MASK_CAT_ERR_SHIFT	1
+#define FPM_EV_MASK_DMA_ERR_SHIFT	0
+
+#define FPM_REV1_MAJOR_SHIFT		8
+
+#define FPM_RSTC_FM_RESET		0x80000000
+#define FPM_RSTC_MAC0_RESET		0x40000000
+#define FPM_RSTC_MAC1_RESET		0x20000000
+#define FPM_RSTC_MAC2_RESET		0x10000000
+#define FPM_RSTC_MAC3_RESET		0x08000000
+#define FPM_RSTC_MAC8_RESET		0x04000000
+#define FPM_RSTC_MAC4_RESET		0x02000000
+#define FPM_RSTC_MAC5_RESET		0x01000000
+#define FPM_RSTC_MAC6_RESET		0x00800000
+#define FPM_RSTC_MAC7_RESET		0x00400000
+#define FPM_RSTC_MAC9_RESET		0x00200000
+
+#define FPM_TS_INT_SHIFT		16
+#define FPM_TS_CTL_EN			0x80000000
+
+/* BMI defines */
+#define BMI_INIT_START				0x80000000
+#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC	0x80000000
+#define BMI_ERR_INTR_EN_LIST_RAM_ECC		0x40000000
+#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC	0x20000000
+#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC	0x10000000
+#define BMI_NUM_OF_TASKS_MASK			0x3F000000
+#define BMI_NUM_OF_EXTRA_TASKS_MASK		0x000F0000
+#define BMI_NUM_OF_DMAS_MASK			0x00000F00
+#define BMI_NUM_OF_EXTRA_DMAS_MASK		0x0000000F
+#define BMI_FIFO_SIZE_MASK			0x000003FF
+#define BMI_EXTRA_FIFO_SIZE_MASK		0x03FF0000
+#define BMI_CFG2_DMAS_MASK			0x0000003F
+#define BMI_CFG2_TASKS_MASK			0x0000003F
+
+#define BMI_CFG2_TASKS_SHIFT		16
+#define BMI_CFG2_DMAS_SHIFT		0
+#define BMI_CFG1_FIFO_SIZE_SHIFT	16
+#define BMI_NUM_OF_TASKS_SHIFT		24
+#define BMI_EXTRA_NUM_OF_TASKS_SHIFT	16
+#define BMI_NUM_OF_DMAS_SHIFT		8
+#define BMI_EXTRA_NUM_OF_DMAS_SHIFT	0
+
+#define BMI_FIFO_ALIGN			0x100
+
+#define BMI_EXTRA_FIFO_SIZE_SHIFT	16
+
+/* QMI defines */
+#define QMI_CFG_ENQ_EN			0x80000000
+#define QMI_CFG_DEQ_EN			0x40000000
+#define QMI_CFG_EN_COUNTERS		0x10000000
+#define QMI_CFG_DEQ_MASK		0x0000003F
+#define QMI_CFG_ENQ_MASK		0x00003F00
+#define QMI_CFG_ENQ_SHIFT		8
+
+#define QMI_ERR_INTR_EN_DOUBLE_ECC	0x80000000
+#define QMI_ERR_INTR_EN_DEQ_FROM_DEF	0x40000000
+#define QMI_INTR_EN_SINGLE_ECC		0x80000000
+
+#define QMI_TAPC_TAP			22
+
+#define QMI_GS_HALT_NOT_BUSY		0x00000002
+
+/* IRAM defines */
+#define IRAM_IADD_AIE			0x80000000
+#define IRAM_READY			0x80000000
+
+/* Default values */
+#define DEFAULT_CATASTROPHIC_ERR		0
+#define DEFAULT_DMA_ERR				0
+#define DEFAULT_AID_MODE			FMAN_DMA_AID_OUT_TNUM
+#define DEFAULT_DMA_COMM_Q_LOW			0x2A
+#define DEFAULT_DMA_COMM_Q_HIGH		0x3F
+#define DEFAULT_CACHE_OVERRIDE			0
+#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES		64
+#define DEFAULT_DMA_DBG_CNT_MODE		0
+#define DEFAULT_DMA_SOS_EMERGENCY		0
+#define DEFAULT_DMA_WATCHDOG			0
+#define DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER	0
+#define DEFAULT_DISP_LIMIT			0
+#define DEFAULT_PRS_DISP_TH			16
+#define DEFAULT_PLCR_DISP_TH			16
+#define DEFAULT_KG_DISP_TH			16
+#define DEFAULT_BMI_DISP_TH			16
+#define DEFAULT_QMI_ENQ_DISP_TH		16
+#define DEFAULT_QMI_DEQ_DISP_TH		16
+#define DEFAULT_FM_CTL1_DISP_TH		16
+#define DEFAULT_FM_CTL2_DISP_TH		16
+
+#define DFLT_AXI_DBG_NUM_OF_BEATS		1
+
+#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf)	\
+	((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf)	\
+	((dma_thresh_max_buf + 1) * 3 / 4)
+#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf)	\
+	((dma_thresh_max_buf + 1) / 2)
+#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
+	((dma_thresh_max_buf + 1) * 3 / 4)
+
+#define DMA_COMM_Q_LOW_FMAN_V3		0x2A
+#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)		\
+	((dma_thresh_max_commq + 1) / 2)
+#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq)	\
+	((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 :		\
+	DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
+
+#define DMA_COMM_Q_HIGH_FMAN_V3	0x3f
+#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)		\
+	((dma_thresh_max_commq + 1) * 3 / 4)
+#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq)	\
+	((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 :		\
+	DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
+
+#define TOTAL_NUM_OF_TASKS_FMAN_V3L	59
+#define TOTAL_NUM_OF_TASKS_FMAN_V3H	124
+#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks)	\
+	((major == 6) ? ((minor == 1 || minor == 4) ?			\
+	TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) :	\
+	bmi_max_num_of_tasks)
+
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3		64
+#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2		32
+#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major)			\
+	(major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 :		\
+	DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
+
+#define FM_TIMESTAMP_1_USEC_BIT             8
+
+/* Defines used for enabling/disabling FMan interrupts */
+#define ERR_INTR_EN_DMA         0x00010000
+#define ERR_INTR_EN_FPM         0x80000000
+#define ERR_INTR_EN_BMI         0x00800000
+#define ERR_INTR_EN_QMI         0x00400000
+#define ERR_INTR_EN_MURAM       0x00040000
+#define ERR_INTR_EN_MAC0        0x00004000
+#define ERR_INTR_EN_MAC1        0x00002000
+#define ERR_INTR_EN_MAC2        0x00001000
+#define ERR_INTR_EN_MAC3        0x00000800
+#define ERR_INTR_EN_MAC4        0x00000400
+#define ERR_INTR_EN_MAC5        0x00000200
+#define ERR_INTR_EN_MAC6        0x00000100
+#define ERR_INTR_EN_MAC7        0x00000080
+#define ERR_INTR_EN_MAC8        0x00008000
+#define ERR_INTR_EN_MAC9        0x00000040
+
+#define INTR_EN_QMI             0x40000000
+#define INTR_EN_MAC0            0x00080000
+#define INTR_EN_MAC1            0x00040000
+#define INTR_EN_MAC2            0x00020000
+#define INTR_EN_MAC3            0x00010000
+#define INTR_EN_MAC4            0x00000040
+#define INTR_EN_MAC5            0x00000020
+#define INTR_EN_MAC6            0x00000008
+#define INTR_EN_MAC7            0x00000002
+#define INTR_EN_MAC8            0x00200000
+#define INTR_EN_MAC9            0x00100000
+#define INTR_EN_REV0            0x00008000
+#define INTR_EN_REV1            0x00004000
+#define INTR_EN_REV2            0x00002000
+#define INTR_EN_REV3            0x00001000
+#define INTR_EN_TMR             0x01000000
+
+enum fman_dma_aid_mode {
+	FMAN_DMA_AID_OUT_PORT_ID = 0,		  /* 4 LSB of PORT_ID */
+	FMAN_DMA_AID_OUT_TNUM			  /* 4 LSB of TNUM */
+};
+
+struct fman_iram_regs {
+	u32 iadd;	/* FM IRAM instruction address register */
+	u32 idata;	/* FM IRAM instruction data register */
+	u32 itcfg;	/* FM IRAM timing config register */
+	u32 iready;	/* FM IRAM ready register */
+};
+
+struct fman_fpm_regs {
+	u32 fmfp_tnc;		/* FPM TNUM Control 0x00 */
+	u32 fmfp_prc;		/* FPM Port_ID FmCtl Association 0x04 */
+	u32 fmfp_brkc;		/* FPM Breakpoint Control 0x08 */
+	u32 fmfp_mxd;		/* FPM Flush Control 0x0c */
+	u32 fmfp_dist1;		/* FPM Dispatch Thresholds1 0x10 */
+	u32 fmfp_dist2;		/* FPM Dispatch Thresholds2 0x14 */
+	u32 fm_epi;		/* FM Error Pending Interrupts 0x18 */
+	u32 fm_rie;		/* FM Error Interrupt Enable 0x1c */
+	u32 fmfp_fcev[4];	/* FPM FMan-Controller Event 1-4 0x20-0x2f */
+	u32 res0030[4];		/* res 0x30 - 0x3f */
+	u32 fmfp_cee[4];	/* PM FMan-Controller Event 1-4 0x40-0x4f */
+	u32 res0050[4];		/* res 0x50-0x5f */
+	u32 fmfp_tsc1;		/* FPM TimeStamp Control1 0x60 */
+	u32 fmfp_tsc2;		/* FPM TimeStamp Control2 0x64 */
+	u32 fmfp_tsp;		/* FPM Time Stamp 0x68 */
+	u32 fmfp_tsf;		/* FPM Time Stamp Fraction 0x6c */
+	u32 fm_rcr;		/* FM Rams Control 0x70 */
+	u32 fmfp_extc;		/* FPM External Requests Control 0x74 */
+	u32 fmfp_ext1;		/* FPM External Requests Config1 0x78 */
+	u32 fmfp_ext2;		/* FPM External Requests Config2 0x7c */
+	u32 fmfp_drd[16];	/* FPM Data_Ram Data 0-15 0x80 - 0xbf */
+	u32 fmfp_dra;		/* FPM Data Ram Access 0xc0 */
+	u32 fm_ip_rev_1;	/* FM IP Block Revision 1 0xc4 */
+	u32 fm_ip_rev_2;	/* FM IP Block Revision 2 0xc8 */
+	u32 fm_rstc;		/* FM Reset Command 0xcc */
+	u32 fm_cld;		/* FM Classifier Debug 0xd0 */
+	u32 fm_npi;		/* FM Normal Pending Interrupts 0xd4 */
+	u32 fmfp_exte;		/* FPM External Requests Enable 0xd8 */
+	u32 fmfp_ee;		/* FPM Event&Mask 0xdc */
+	u32 fmfp_cev[4];	/* FPM CPU Event 1-4 0xe0-0xef */
+	u32 res00f0[4];		/* res 0xf0-0xff */
+	u32 fmfp_ps[50];	/* FPM Port Status 0x100-0x1c7 */
+	u32 res01c8[14];	/* res 0x1c8-0x1ff */
+	u32 fmfp_clfabc;	/* FPM CLFABC 0x200 */
+	u32 fmfp_clfcc;		/* FPM CLFCC 0x204 */
+	u32 fmfp_clfaval;	/* FPM CLFAVAL 0x208 */
+	u32 fmfp_clfbval;	/* FPM CLFBVAL 0x20c */
+	u32 fmfp_clfcval;	/* FPM CLFCVAL 0x210 */
+	u32 fmfp_clfamsk;	/* FPM CLFAMSK 0x214 */
+	u32 fmfp_clfbmsk;	/* FPM CLFBMSK 0x218 */
+	u32 fmfp_clfcmsk;	/* FPM CLFCMSK 0x21c */
+	u32 fmfp_clfamc;	/* FPM CLFAMC 0x220 */
+	u32 fmfp_clfbmc;	/* FPM CLFBMC 0x224 */
+	u32 fmfp_clfcmc;	/* FPM CLFCMC 0x228 */
+	u32 fmfp_decceh;	/* FPM DECCEH 0x22c */
+	u32 res0230[116];	/* res 0x230 - 0x3ff */
+	u32 fmfp_ts[128];	/* 0x400: FPM Task Status 0x400 - 0x5ff */
+	u32 res0600[0x400 - 384];
+};
+
+struct fman_bmi_regs {
+	u32 fmbm_init;		/* BMI Initialization 0x00 */
+	u32 fmbm_cfg1;		/* BMI Configuration 1 0x04 */
+	u32 fmbm_cfg2;		/* BMI Configuration 2 0x08 */
+	u32 res000c[5];		/* 0x0c - 0x1f */
+	u32 fmbm_ievr;		/* Interrupt Event Register 0x20 */
+	u32 fmbm_ier;		/* Interrupt Enable Register 0x24 */
+	u32 fmbm_ifr;		/* Interrupt Force Register 0x28 */
+	u32 res002c[5];		/* 0x2c - 0x3f */
+	u32 fmbm_arb[8];	/* BMI Arbitration 0x40 - 0x5f */
+	u32 res0060[12];	/* 0x60 - 0x8f */
+	u32 fmbm_dtc[3];	/* Debug Trap Counter 0x90 - 0x9b */
+	u32 res009c;		/* 0x9c */
+	u32 fmbm_dcv[3][4];	/* Debug Compare val 0xa0-0xcf */
+	u32 fmbm_dcm[3][4];	/* Debug Compare Mask 0xd0-0xff */
+	u32 fmbm_gde;		/* BMI Global Debug Enable 0x100 */
+	u32 fmbm_pp[63];	/* BMI Port Parameters 0x104 - 0x1ff */
+	u32 res0200;		/* 0x200 */
+	u32 fmbm_pfs[63];	/* BMI Port FIFO Size 0x204 - 0x2ff */
+	u32 res0300;		/* 0x300 */
+	u32 fmbm_spliodn[63];	/* Port Partition ID 0x304 - 0x3ff */
+};
+
+struct fman_qmi_regs {
+	u32 fmqm_gc;		/* General Configuration Register 0x00 */
+	u32 res0004;		/* 0x04 */
+	u32 fmqm_eie;		/* Error Interrupt Event Register 0x08 */
+	u32 fmqm_eien;		/* Error Interrupt Enable Register 0x0c */
+	u32 fmqm_eif;		/* Error Interrupt Force Register 0x10 */
+	u32 fmqm_ie;		/* Interrupt Event Register 0x14 */
+	u32 fmqm_ien;		/* Interrupt Enable Register 0x18 */
+	u32 fmqm_if;		/* Interrupt Force Register 0x1c */
+	u32 fmqm_gs;		/* Global Status Register 0x20 */
+	u32 fmqm_ts;		/* Task Status Register 0x24 */
+	u32 fmqm_etfc;		/* Enqueue Total Frame Counter 0x28 */
+	u32 fmqm_dtfc;		/* Dequeue Total Frame Counter 0x2c */
+	u32 fmqm_dc0;		/* Dequeue Counter 0 0x30 */
+	u32 fmqm_dc1;		/* Dequeue Counter 1 0x34 */
+	u32 fmqm_dc2;		/* Dequeue Counter 2 0x38 */
+	u32 fmqm_dc3;		/* Dequeue Counter 3 0x3c */
+	u32 fmqm_dfdc;		/* Dequeue FQID from Default Counter 0x40 */
+	u32 fmqm_dfcc;		/* Dequeue FQID from Context Counter 0x44 */
+	u32 fmqm_dffc;		/* Dequeue FQID from FD Counter 0x48 */
+	u32 fmqm_dcc;		/* Dequeue Confirm Counter 0x4c */
+	u32 res0050[7];		/* 0x50 - 0x6b */
+	u32 fmqm_tapc;		/* Tnum Aging Period Control 0x6c */
+	u32 fmqm_dmcvc;		/* Dequeue MAC Command Valid Counter 0x70 */
+	u32 fmqm_difdcc;	/* Dequeue Invalid FD Command Counter 0x74 */
+	u32 fmqm_da1v;		/* Dequeue A1 Valid Counter 0x78 */
+	u32 res007c;		/* 0x7c */
+	u32 fmqm_dtc;		/* 0x80 Debug Trap Counter 0x80 */
+	u32 fmqm_efddd;		/* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
+	u32 res0088[2];		/* 0x88 - 0x8f */
+	struct {
+		u32 fmqm_dtcfg1;	/* 0x90 dbg trap cfg 1 Register 0x00 */
+		u32 fmqm_dtval1;	/* Debug Trap Value 1 Register 0x04 */
+		u32 fmqm_dtm1;		/* Debug Trap Mask 1 Register 0x08 */
+		u32 fmqm_dtc1;		/* Debug Trap Counter 1 Register 0x0c */
+		u32 fmqm_dtcfg2;	/* dbg Trap cfg 2 Register 0x10 */
+		u32 fmqm_dtval2;	/* Debug Trap Value 2 Register 0x14 */
+		u32 fmqm_dtm2;		/* Debug Trap Mask 2 Register 0x18 */
+		u32 res001c;		/* 0x1c */
+	} dbg_traps[3];			/* 0x90 - 0xef */
+	u8 res00f0[0x400 - 0xf0];	/* 0xf0 - 0x3ff */
+};
+
+struct fman_dma_regs {
+	u32 fmdmsr;	/* FM DMA status register 0x00 */
+	u32 fmdmmr;	/* FM DMA mode register 0x04 */
+	u32 fmdmtr;	/* FM DMA bus threshold register 0x08 */
+	u32 fmdmhy;	/* FM DMA bus hysteresis register 0x0c */
+	u32 fmdmsetr;	/* FM DMA SOS emergency Threshold Register 0x10 */
+	u32 fmdmtah;	/* FM DMA transfer bus address high reg 0x14 */
+	u32 fmdmtal;	/* FM DMA transfer bus address low reg 0x18 */
+	u32 fmdmtcid;	/* FM DMA transfer bus communication ID reg 0x1c */
+	u32 fmdmra;	/* FM DMA bus internal ram address register 0x20 */
+	u32 fmdmrd;	/* FM DMA bus internal ram data register 0x24 */
+	u32 fmdmwcr;	/* FM DMA CAM watchdog counter value 0x28 */
+	u32 fmdmebcr;	/* FM DMA CAM base in MURAM register 0x2c */
+	u32 fmdmccqdr;	/* FM DMA CAM and CMD Queue Debug reg 0x30 */
+	u32 fmdmccqvr1;	/* FM DMA CAM and CMD Queue Value reg #1 0x34 */
+	u32 fmdmccqvr2;	/* FM DMA CAM and CMD Queue Value reg #2 0x38 */
+	u32 fmdmcqvr3;	/* FM DMA CMD Queue Value register #3 0x3c */
+	u32 fmdmcqvr4;	/* FM DMA CMD Queue Value register #4 0x40 */
+	u32 fmdmcqvr5;	/* FM DMA CMD Queue Value register #5 0x44 */
+	u32 fmdmsefrc;	/* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
+	u32 fmdmsqfrc;	/* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
+	u32 fmdmssrc;	/* FM DMA Semaphore SYNC Reject Counter 0x50 */
+	u32 fmdmdcr;	/* FM DMA Debug Counter 0x54 */
+	u32 fmdmemsr;	/* FM DMA Emergency Smoother Register 0x58 */
+	u32 res005c;	/* 0x5c */
+	u32 fmdmplr[FMAN_LIODN_TBL / 2];	/* DMA LIODN regs 0x60-0xdf */
+	u32 res00e0[0x400 - 56];
+};
+
+struct fman_rg {
+	struct fman_fpm_regs __iomem *fpm_rg;
+	struct fman_dma_regs __iomem *dma_rg;
+	struct fman_bmi_regs __iomem *bmi_rg;
+	struct fman_qmi_regs __iomem *qmi_rg;
+};
+
+struct fman_state_struct {
+	u8 fm_id;
+	u16 fm_clk_freq;
+	struct fman_rev_info rev_info;
+	bool enabled_time_stamp;
+	u8 count1_micro_bit;
+	u8 total_num_of_tasks;
+	u8 accumulated_num_of_tasks;
+	u32 accumulated_fifo_size;
+	u8 accumulated_num_of_open_dmas;
+	u8 accumulated_num_of_deq_tnums;
+	bool low_end_restriction;
+	u32 exceptions;
+	u32 extra_fifo_pool_size;
+	u8 extra_tasks_pool_size;
+	u8 extra_open_dmas_pool_size;
+	u16 port_mfl[MAX_NUM_OF_MACS];
+	u16 mac_mfl[MAX_NUM_OF_MACS];
+
+	/* SOC specific */
+	u32 fm_iram_size;
+	/* DMA */
+	u32 dma_thresh_max_commq;
+	u32 dma_thresh_max_buf;
+	u32 max_num_of_open_dmas;
+	/* QMI */
+	u32 qmi_max_num_of_tnums;
+	u32 qmi_def_tnums_thresh;
+	/* BMI */
+	u32 bmi_max_num_of_tasks;
+	u32 bmi_max_fifo_size;
+	/* General */
+	u32 fm_port_num_of_cg;
+	u32 num_of_rx_ports;
+	u32 total_fifo_size;
+
+	u32 qman_channel_base;
+	u32 num_of_qman_channels;
+
+	struct resource *res;
+};
+
+struct fman_cfg {
+	u8 disp_limit_tsh;
+	u8 prs_disp_tsh;
+	u8 plcr_disp_tsh;
+	u8 kg_disp_tsh;
+	u8 bmi_disp_tsh;
+	u8 qmi_enq_disp_tsh;
+	u8 qmi_deq_disp_tsh;
+	u8 fm_ctl1_disp_tsh;
+	u8 fm_ctl2_disp_tsh;
+	int dma_cache_override;
+	enum fman_dma_aid_mode dma_aid_mode;
+	bool dma_aid_override;
+	u32 dma_axi_dbg_num_of_beats;
+	u32 dma_cam_num_of_entries;
+	u32 dma_watchdog;
+	u8 dma_comm_qtsh_asrt_emer;
+	u32 dma_write_buf_tsh_asrt_emer;
+	u32 dma_read_buf_tsh_asrt_emer;
+	u8 dma_comm_qtsh_clr_emer;
+	u32 dma_write_buf_tsh_clr_emer;
+	u32 dma_read_buf_tsh_clr_emer;
+	u32 dma_sos_emergency;
+	int dma_dbg_cnt_mode;
+	bool dma_stop_on_bus_error;
+	bool dma_en_emergency;
+	u32 dma_emergency_bus_select;
+	int dma_emergency_level;
+	bool dma_en_emergency_smoother;
+	u32 dma_emergency_switch_counter;
+	bool halt_on_external_activ;
+	bool halt_on_unrecov_ecc_err;
+	int catastrophic_err;
+	int dma_err;
+	bool en_muram_test_mode;
+	bool en_iram_test_mode;
+	bool external_ecc_rams_enable;
+	u16 tnum_aging_period;
+	u32 exceptions;
+	u16 clk_freq;
+	bool pedantic_dma;
+	u32 cam_base_addr;
+	u32 fifo_base_addr;
+	u32 total_fifo_size;
+	u32 total_num_of_tasks;
+	bool qmi_deq_option_support;
+	u32 qmi_def_tnums_thresh;
+};
+
+struct fman_dts_params {
+	void __iomem *base_addr;		/* FMan virtual address */
+#ifndef __rtems__
+	struct resource *res;			/* FMan memory resource */
+#endif /* __rtems__ */
+	u8 id;					/* FMan ID */
+
+	int err_irq;				/* FMan Error IRQ */
+
+	u16 clk_freq;				/* FMan clock freq (In Mhz) */
+
+	u32 qman_channel_base;			/* QMan channels base */
+	u32 num_of_qman_channels;		/* Number of QMan channels */
+
+	phys_addr_t muram_phy_base_addr;	/* MURAM physical address */
+	resource_size_t muram_size;		/* MURAM size */
+};
+
+struct fman {
+	struct device *dev;
+	void __iomem *base_addr;
+	struct fman_intr_src intr_mng[FMAN_EV_CNT];
+
+	struct fman_fpm_regs __iomem *fpm_regs;
+	struct fman_bmi_regs __iomem *bmi_regs;
+	struct fman_qmi_regs __iomem *qmi_regs;
+	struct fman_dma_regs __iomem *dma_regs;
+	fman_exceptions_cb *exception_cb;
+	fman_bus_error_cb *bus_error_cb;
+	/* Spinlock for FMan use */
+	spinlock_t spinlock;
+	struct fman_state_struct *state;
+
+	struct fman_cfg *cfg;
+	struct muram_info *muram;
+	/* cam section in muram */
+	int cam_offset;
+	size_t cam_size;
+	/* Fifo in MURAM */
+	int fifo_offset;
+	size_t fifo_size;
+	bool reset_on_init;
+
+	u32 liodn_base[64];
+	u32 liodn_offset[64];
+
+	struct fman_dts_params dts_params;
+};
+
+static void fman_exceptions(struct fman *fman, enum fman_exceptions exception)
+{
+	pr_debug("FMan[%d] exception %d\n",
+		 fman->state->fm_id, exception);
+}
+
+static void fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+			   u64 __maybe_unused addr, u8 __maybe_unused tnum,
+			   u16 __maybe_unused liodn)
+{
+	pr_debug("FMan[%d] bus error: port_id[%d]\n",
+		 fman->state->fm_id, port_id);
+}
+
+static inline void call_mac_isr(struct fman *fman, u8 id)
+{
+	if (fman->intr_mng[id].isr_cb)
+		fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+}
+
+static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
+{
+	u8 sw_port_id = 0;
+
+	if (hw_port_id >= BASE_TX_PORTID) {
+		sw_port_id = hw_port_id - BASE_TX_PORTID;
+	} else if (hw_port_id >= BASE_RX_PORTID) {
+		sw_port_id = hw_port_id - BASE_RX_PORTID;
+	} else {
+		sw_port_id = 0;
+		WARN_ON(false);
+	}
+
+	return sw_port_id;
+}
+
+static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
+				       u8 port_id)
+{
+	u32 tmp = 0;
+
+	tmp = (u32)(port_id << FPM_PORT_FM_CTL_PORTID_SHIFT);
+
+	tmp |= (FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1);
+
+	/* order restoration */
+	if (port_id % 2)
+		tmp |= (FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+	else
+		tmp |= (FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT);
+
+	iowrite32be(tmp, &fpm_rg->fmfp_prc);
+}
+
+static void set_port_liodn(struct fman_rg *fman_rg, u8 port_id,
+			   u32 liodn_base, u32 liodn_ofst)
+{
+	u32 tmp;
+
+	/* set LIODN base for this port */
+	tmp = ioread32be(&fman_rg->dma_rg->fmdmplr[port_id / 2]);
+	if (port_id % 2) {
+		tmp &= ~DMA_LIODN_BASE_MASK;
+		tmp |= liodn_base;
+	} else {
+		tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
+		tmp |= liodn_base << DMA_LIODN_SHIFT;
+	}
+	iowrite32be(tmp, &fman_rg->dma_rg->fmdmplr[port_id / 2]);
+	iowrite32be(liodn_ofst, &fman_rg->bmi_rg->fmbm_spliodn[port_id - 1]);
+}
+
+static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+	u32 tmp;
+
+	tmp = ioread32be(&fpm_rg->fm_rcr);
+	if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+		iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+	else
+		iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
+			    FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+}
+
+static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+{
+	u32 tmp;
+
+	tmp = ioread32be(&fpm_rg->fm_rcr);
+	if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+		iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+	else
+		iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
+			    &fpm_rg->fm_rcr);
+}
+
+static void fman_defconfig(struct fman_cfg *cfg)
+{
+	memset(cfg, 0, sizeof(struct fman_cfg));
+
+	cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
+	cfg->dma_err = DEFAULT_DMA_ERR;
+	cfg->halt_on_external_activ = false;
+	cfg->halt_on_unrecov_ecc_err = false;
+	cfg->en_iram_test_mode = false;
+	cfg->en_muram_test_mode = false;
+	cfg->external_ecc_rams_enable = false;
+	cfg->dma_aid_override = false;
+	cfg->dma_aid_mode = DEFAULT_AID_MODE;
+	cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
+	cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
+	cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
+	cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
+	cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
+	cfg->dma_en_emergency = false;
+	cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
+	cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
+	cfg->dma_en_emergency_smoother = false;
+	cfg->dma_emergency_switch_counter =
+	    DEFAULT_DMA_EMERGENCY_SWITCH_COUNTER;
+	cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
+	cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
+	cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
+	cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
+	cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
+	cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
+	cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
+	cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
+	cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
+
+	cfg->pedantic_dma = false;
+	cfg->tnum_aging_period = 0;
+	cfg->dma_stop_on_bus_error = false;
+	cfg->qmi_deq_option_support = false;
+}
+
+static int dma_init(struct fman *fman)
+{
+	struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+	struct fman_cfg *cfg = fman->cfg;
+	u32 tmp_reg;
+
+	/* Init DMA Registers */
+
+	/* clear status reg events */
+	tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
+		   DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
+	iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
+
+	/* configure mode register */
+	tmp_reg = 0;
+	tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
+	if (cfg->dma_aid_override)
+		tmp_reg |= DMA_MODE_AID_OR;
+	if (cfg->exceptions & EX_DMA_BUS_ERROR)
+		tmp_reg |= DMA_MODE_BER;
+	if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
+	    (cfg->exceptions & EX_DMA_READ_ECC) |
+	    (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
+		tmp_reg |= DMA_MODE_ECC;
+	if (cfg->dma_stop_on_bus_error)
+		tmp_reg |= DMA_MODE_SBER;
+	if (cfg->dma_axi_dbg_num_of_beats)
+		tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
+			((cfg->dma_axi_dbg_num_of_beats - 1)
+			<< DMA_MODE_AXI_DBG_SHIFT));
+
+	if (cfg->dma_en_emergency) {
+		tmp_reg |= cfg->dma_emergency_bus_select;
+		tmp_reg |= cfg->dma_emergency_level << DMA_MODE_EMER_LVL_SHIFT;
+		if (cfg->dma_en_emergency_smoother)
+			iowrite32be(cfg->dma_emergency_switch_counter,
+				    &dma_rg->fmdmemsr);
+	}
+	tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
+		DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
+	tmp_reg |= DMA_MODE_SECURE_PROT;
+	tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
+	tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
+
+	if (cfg->pedantic_dma)
+		tmp_reg |= DMA_MODE_EMER_READ;
+
+	iowrite32be(tmp_reg, &dma_rg->fmdmmr);
+
+	/* configure thresholds register */
+	tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
+		DMA_THRESH_COMMQ_SHIFT);
+	tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
+		DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+	tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
+		DMA_THRESH_WRITE_INT_BUF_MASK;
+
+	iowrite32be(tmp_reg, &dma_rg->fmdmtr);
+
+	/* configure hysteresis register */
+	tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
+		DMA_THRESH_COMMQ_SHIFT);
+	tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
+		DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+	tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
+		DMA_THRESH_WRITE_INT_BUF_MASK;
+
+	iowrite32be(tmp_reg, &dma_rg->fmdmhy);
+
+	/* configure emergency threshold */
+	iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
+
+	/* configure Watchdog */
+	iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
+
+	iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
+
+	/* Allocate MURAM for CAM */
+	fman->cam_size =
+		(u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
+	fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
+	if (IS_ERR_VALUE(fman->cam_offset)) {
+		pr_err("MURAM alloc for DMA CAM failed\n");
+		return -ENOMEM;
+	}
+
+	if (fman->state->rev_info.major == 2) {
+		u32 __iomem *cam_base_addr;
+
+		fman_muram_free_mem(fman->muram, fman->cam_offset,
+				    fman->cam_size);
+
+		fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
+		fman->cam_offset = fman_muram_alloc(fman->muram,
+						    fman->cam_size);
+		if (IS_ERR_VALUE(fman->cam_offset)) {
+			pr_err("MURAM alloc for DMA CAM failed\n");
+			return -ENOMEM;
+		}
+
+		if (fman->cfg->dma_cam_num_of_entries % 8 ||
+		    fman->cfg->dma_cam_num_of_entries > 32) {
+			pr_err("wrong dma_cam_num_of_entries\n");
+			return -EINVAL;
+		}
+
+		cam_base_addr = (u32 __iomem *)
+			fman_muram_offset_to_vbase(fman->muram,
+						   fman->cam_offset);
+		out_be32(cam_base_addr,
+			 ~((1 << (32 - fman->cfg->dma_cam_num_of_entries)) -
+			 1));
+	}
+
+	fman->cfg->cam_base_addr = fman->cam_offset;
+
+	return 0;
+}
+
+static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
+{
+	u32 tmp_reg;
+	int i;
+
+	/* Init FPM Registers */
+
+	tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
+	iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
+
+	tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
+		   ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
+		   ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
+		   ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
+	iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
+
+	tmp_reg =
+		(((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
+		 ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
+		 ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
+		 ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
+	iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
+
+	/* define exceptions and error behavior */
+	tmp_reg = 0;
+	/* Clear events */
+	tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
+		    FPM_EV_MASK_SINGLE_ECC);
+	/* enable interrupts */
+	if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
+		tmp_reg |= FPM_EV_MASK_STALL_EN;
+	if (cfg->exceptions & EX_FPM_SINGLE_ECC)
+		tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
+	if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
+		tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
+	tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
+	tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
+	if (!cfg->halt_on_external_activ)
+		tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+	if (!cfg->halt_on_unrecov_ecc_err)
+		tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+	iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
+
+	/* clear all fmCtls event registers */
+	for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
+		iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
+
+	/* RAM ECC -  enable and clear events */
+	/* first we need to clear all parser memory,
+	 * as it is uninitialized and may cause ECC errors
+	 */
+	/* event bits */
+	tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
+	/* Rams enable not effected by RCR bit,
+	 * but by a COP configuration
+	 */
+	if (cfg->external_ecc_rams_enable)
+		tmp_reg |= FPM_RAM_RAMS_ECC_EN_SRC_SEL;
+
+	/* enable test mode */
+	if (cfg->en_muram_test_mode)
+		tmp_reg |= FPM_RAM_MURAM_TEST_ECC;
+	if (cfg->en_iram_test_mode)
+		tmp_reg |= FPM_RAM_IRAM_TEST_ECC;
+	iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
+
+	tmp_reg = 0;
+	if (cfg->exceptions & EX_IRAM_ECC) {
+		tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
+		enable_rams_ecc(fpm_rg);
+	}
+	if (cfg->exceptions & EX_MURAM_ECC) {
+		tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
+		enable_rams_ecc(fpm_rg);
+	}
+	iowrite32be(tmp_reg, &fpm_rg->fm_rie);
+}
+
+static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
+		     struct fman_cfg *cfg)
+{
+	u32 tmp_reg;
+
+	/* Init BMI Registers */
+
+	/* define common resources */
+	tmp_reg = cfg->fifo_base_addr;
+	tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
+
+	tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
+		    BMI_CFG1_FIFO_SIZE_SHIFT);
+	iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
+
+	tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
+		   BMI_CFG2_TASKS_SHIFT;
+	/* num of DMA's will be dynamically updated when each port is set */
+	iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
+
+	/* define unmaskable exceptions, enable and clear events */
+	tmp_reg = 0;
+	iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
+		    BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
+		    BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
+		    BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
+
+	if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
+		tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+	if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
+		tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+	if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
+		tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+	if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
+		tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+	iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
+}
+
+static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
+		     struct fman_cfg *cfg)
+{
+	u32 tmp_reg;
+	u16 period_in_fm_clocks;
+	u8 remainder;
+
+	/* Init QMI Registers */
+
+	/* Clear error interrupt events */
+
+	iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
+		    &qmi_rg->fmqm_eie);
+	tmp_reg = 0;
+	if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
+		tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+	if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
+		tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+	/* enable events */
+	iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
+
+	if (cfg->tnum_aging_period) {
+		/* tnum_aging_period is in units of usec, clk_freq in Mhz */
+		period_in_fm_clocks = (u16)
+			(cfg->tnum_aging_period * cfg->clk_freq);
+		/* period_in_fm_clocks must be a 64 multiple */
+		remainder = (u8)(period_in_fm_clocks % 64);
+		if (remainder) {
+			tmp_reg = (u32)((period_in_fm_clocks / 64) + 1);
+		} else {
+			tmp_reg = (u32)(period_in_fm_clocks / 64);
+			if (!tmp_reg)
+				tmp_reg = 1;
+		}
+		tmp_reg <<= QMI_TAPC_TAP;
+		iowrite32be(tmp_reg, &qmi_rg->fmqm_tapc);
+	}
+	tmp_reg = 0;
+	/* Clear interrupt events */
+	iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
+	if (cfg->exceptions & EX_QMI_SINGLE_ECC)
+		tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
+	/* enable events */
+	iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
+}
+
+static int enable(struct fman_rg *fman_rg, struct fman_cfg *cfg)
+{
+	u32 cfg_reg = 0;
+
+	/* Enable all modules */
+
+	/* clear&enable global counters	 - calculate reg and save for later,
+	 * because it's the same reg for QMI enable
+	 */
+	cfg_reg = QMI_CFG_EN_COUNTERS;
+	if (cfg->qmi_deq_option_support)
+		cfg_reg |= (u32)(((cfg->qmi_def_tnums_thresh) << 8) |
+				  cfg->qmi_def_tnums_thresh);
+
+	iowrite32be(BMI_INIT_START, &fman_rg->bmi_rg->fmbm_init);
+	iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
+		    &fman_rg->qmi_rg->fmqm_gc);
+
+	return 0;
+}
+
+static int set_exception(struct fman_rg *fman_rg,
+			 enum fman_exceptions exception, bool enable)
+{
+	u32 tmp;
+
+	switch (exception) {
+	case FMAN_EX_DMA_BUS_ERROR:
+		tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+		if (enable)
+			tmp |= DMA_MODE_BER;
+		else
+			tmp &= ~DMA_MODE_BER;
+		/* disable bus error */
+		iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+		break;
+	case FMAN_EX_DMA_READ_ECC:
+	case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+	case FMAN_EX_DMA_FM_WRITE_ECC:
+		tmp = ioread32be(&fman_rg->dma_rg->fmdmmr);
+		if (enable)
+			tmp |= DMA_MODE_ECC;
+		else
+			tmp &= ~DMA_MODE_ECC;
+		iowrite32be(tmp, &fman_rg->dma_rg->fmdmmr);
+		break;
+	case FMAN_EX_FPM_STALL_ON_TASKS:
+		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+		if (enable)
+			tmp |= FPM_EV_MASK_STALL_EN;
+		else
+			tmp &= ~FPM_EV_MASK_STALL_EN;
+		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+		break;
+	case FMAN_EX_FPM_SINGLE_ECC:
+		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+		if (enable)
+			tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
+		else
+			tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
+		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+		break;
+	case FMAN_EX_FPM_DOUBLE_ECC:
+		tmp = ioread32be(&fman_rg->fpm_rg->fmfp_ee);
+		if (enable)
+			tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
+		else
+			tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
+		iowrite32be(tmp, &fman_rg->fpm_rg->fmfp_ee);
+		break;
+	case FMAN_EX_QMI_SINGLE_ECC:
+		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_ien);
+		if (enable)
+			tmp |= QMI_INTR_EN_SINGLE_ECC;
+		else
+			tmp &= ~QMI_INTR_EN_SINGLE_ECC;
+		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_ien);
+		break;
+	case FMAN_EX_QMI_DOUBLE_ECC:
+		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+		if (enable)
+			tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+		else
+			tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
+		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+		break;
+	case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+		tmp = ioread32be(&fman_rg->qmi_rg->fmqm_eien);
+		if (enable)
+			tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+		else
+			tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+		iowrite32be(tmp, &fman_rg->qmi_rg->fmqm_eien);
+		break;
+	case FMAN_EX_BMI_LIST_RAM_ECC:
+		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+		if (enable)
+			tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+		else
+			tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
+		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+		break;
+	case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+		if (enable)
+			tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+		else
+			tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+		break;
+	case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+		if (enable)
+			tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+		else
+			tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+		break;
+	case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+		tmp = ioread32be(&fman_rg->bmi_rg->fmbm_ier);
+		if (enable)
+			tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+		else
+			tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+		iowrite32be(tmp, &fman_rg->bmi_rg->fmbm_ier);
+		break;
+	case FMAN_EX_IRAM_ECC:
+		tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+		if (enable) {
+			/* enable ECC if not enabled */
+			enable_rams_ecc(fman_rg->fpm_rg);
+			/* enable ECC interrupts */
+			tmp |= FPM_IRAM_ECC_ERR_EX_EN;
+		} else {
+			/* ECC mechanism may be disabled,
+			 * depending on driver status
+			 */
+			disable_rams_ecc(fman_rg->fpm_rg);
+			tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
+		}
+		iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+		break;
+	case FMAN_EX_MURAM_ECC:
+		tmp = ioread32be(&fman_rg->fpm_rg->fm_rie);
+		if (enable) {
+			/* enable ECC if not enabled */
+			enable_rams_ecc(fman_rg->fpm_rg);
+			/* enable ECC interrupts */
+			tmp |= FPM_MURAM_ECC_ERR_EX_EN;
+		} else {
+			/* ECC mechanism may be disabled,
+			 * depending on driver status
+			 */
+			disable_rams_ecc(fman_rg->fpm_rg);
+			tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
+		}
+		iowrite32be(tmp, &fman_rg->fpm_rg->fm_rie);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void resume(struct fman_fpm_regs __iomem *fpm_rg)
+{
+	u32 tmp;
+
+	tmp = ioread32be(&fpm_rg->fmfp_ee);
+	/* clear tmp_reg event bits in order not to clear standing events */
+	tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
+		 FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
+	tmp |= FPM_EV_MASK_RELEASE_FM;
+
+	iowrite32be(tmp, &fpm_rg->fmfp_ee);
+}
+
+static int fill_soc_specific_params(struct fman_state_struct *state)
+{
+	u8 minor = state->rev_info.minor;
+	/* P4080 - Major 2
+	 * P2041/P3041/P5020/P5040 - Major 3
+	 * Tx/Bx - Major 6
+	 */
+	switch (state->rev_info.major) {
+	case 3:
+		state->bmi_max_fifo_size	= 160 * 1024;
+		state->fm_iram_size		= 64 * 1024;
+		state->dma_thresh_max_commq	= 31;
+		state->dma_thresh_max_buf	= 127;
+		state->qmi_max_num_of_tnums	= 64;
+		state->qmi_def_tnums_thresh	= 48;
+		state->bmi_max_num_of_tasks	= 128;
+		state->max_num_of_open_dmas	= 32;
+		state->fm_port_num_of_cg	= 256;
+		state->num_of_rx_ports	= 6;
+		state->total_fifo_size	= 122 * 1024;
+		break;
+
+	case 2:
+		state->bmi_max_fifo_size	= 160 * 1024;
+		state->fm_iram_size		= 64 * 1024;
+		state->dma_thresh_max_commq	= 31;
+		state->dma_thresh_max_buf	= 127;
+		state->qmi_max_num_of_tnums	= 64;
+		state->qmi_def_tnums_thresh	= 48;
+		state->bmi_max_num_of_tasks	= 128;
+		state->max_num_of_open_dmas	= 32;
+		state->fm_port_num_of_cg	= 256;
+		state->num_of_rx_ports	= 5;
+		state->total_fifo_size	= 100 * 1024;
+		break;
+
+	case 6:
+		state->dma_thresh_max_commq	= 83;
+		state->dma_thresh_max_buf	= 127;
+		state->qmi_max_num_of_tnums	= 64;
+		state->qmi_def_tnums_thresh	= 32;
+		state->fm_port_num_of_cg	= 256;
+
+		/* FManV3L */
+		if (minor == 1 || minor == 4) {
+			state->bmi_max_fifo_size	= 192 * 1024;
+			state->bmi_max_num_of_tasks	= 64;
+			state->max_num_of_open_dmas	= 32;
+			state->num_of_rx_ports		= 5;
+			if (minor == 1)
+				state->fm_iram_size	= 32 * 1024;
+			else
+				state->fm_iram_size	= 64 * 1024;
+			state->total_fifo_size		= 156 * 1024;
+		}
+		/* FManV3H */
+		else if (minor == 0 || minor == 2 || minor == 3) {
+			state->bmi_max_fifo_size	= 384 * 1024;
+			state->fm_iram_size		= 64 * 1024;
+			state->bmi_max_num_of_tasks	= 128;
+			state->max_num_of_open_dmas	= 84;
+			state->num_of_rx_ports		= 8;
+			state->total_fifo_size		= 295 * 1024;
+		} else {
+			pr_err("Unsupported FManv3 version\n");
+			return -EINVAL;
+		}
+
+		break;
+	default:
+		pr_err("Unsupported FMan version\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool is_init_done(struct fman_cfg *cfg)
+{
+	/* Checks if FMan driver parameters were initialized */
+	if (!cfg)
+		return true;
+
+	return false;
+}
+
+static void free_init_resources(struct fman *fman)
+{
+	if (fman->cam_offset)
+		fman_muram_free_mem(fman->muram, fman->cam_offset,
+				    fman->cam_size);
+	if (fman->fifo_offset)
+		fman_muram_free_mem(fman->muram, fman->fifo_offset,
+				    fman->fifo_size);
+}
+
+static void bmi_err_event(struct fman *fman)
+{
+	u32 event, mask, force;
+	struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+
+	event = ioread32be(&bmi_rg->fmbm_ievr);
+	mask = ioread32be(&bmi_rg->fmbm_ier);
+	event &= mask;
+	/* clear the forced events */
+	force = ioread32be(&bmi_rg->fmbm_ifr);
+	if (force & event)
+		iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
+	/* clear the acknowledged events */
+	iowrite32be(event, &bmi_rg->fmbm_ievr);
+
+	if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
+		fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+	if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
+		fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+	if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
+		fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+	if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
+		fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+}
+
+static void qmi_err_event(struct fman *fman)
+{
+	u32 event, mask, force;
+	struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+
+	event = ioread32be(&qmi_rg->fmqm_eie);
+	mask = ioread32be(&qmi_rg->fmqm_eien);
+	event &= mask;
+
+	/* clear the forced events */
+	force = ioread32be(&qmi_rg->fmqm_eif);
+	if (force & event)
+		iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
+	/* clear the acknowledged events */
+	iowrite32be(event, &qmi_rg->fmqm_eie);
+
+	if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
+		fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+	if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
+		fman->exception_cb(fman, FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+}
+
+static void dma_err_event(struct fman *fman)
+{
+	u32 status, mask, com_id;
+	u8 tnum, port_id, relative_port_id;
+	u16 liodn;
+	struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+
+	status = ioread32be(&dma_rg->fmdmsr);
+	mask = ioread32be(&dma_rg->fmdmmr);
+
+	/* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
+	if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
+		status &= ~DMA_STATUS_BUS_ERR;
+
+	/* clear relevant bits if mask has no DMA_MODE_ECC */
+	if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
+		status &= ~(DMA_STATUS_FM_SPDAT_ECC |
+			    DMA_STATUS_READ_ECC |
+			    DMA_STATUS_SYSTEM_WRITE_ECC |
+			    DMA_STATUS_FM_WRITE_ECC);
+
+	/* clear set events */
+	iowrite32be(status, &dma_rg->fmdmsr);
+
+	if (status & DMA_STATUS_BUS_ERR) {
+		u64 addr;
+
+		addr = (u64)ioread32be(&dma_rg->fmdmtal);
+		addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
+
+		com_id = ioread32be(&dma_rg->fmdmtcid);
+		port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
+			       DMA_TRANSFER_PORTID_SHIFT));
+		relative_port_id =
+		hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+		tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
+			    DMA_TRANSFER_TNUM_SHIFT);
+		liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
+		fman->bus_error_cb(fman, relative_port_id, addr, tnum, liodn);
+	}
+	if (status & DMA_STATUS_FM_SPDAT_ECC)
+		fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+	if (status & DMA_STATUS_READ_ECC)
+		fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+	if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
+		fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+	if (status & DMA_STATUS_FM_WRITE_ECC)
+		fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+}
+
+static void fpm_err_event(struct fman *fman)
+{
+	u32 event;
+	struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+
+	event = ioread32be(&fpm_rg->fmfp_ee);
+	/* clear the all occurred events */
+	iowrite32be(event, &fpm_rg->fmfp_ee);
+
+	if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
+	    (event & FPM_EV_MASK_DOUBLE_ECC_EN))
+		fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+	if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
+		fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+	if ((event & FPM_EV_MASK_SINGLE_ECC) &&
+	    (event & FPM_EV_MASK_SINGLE_ECC_EN))
+		fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+}
+
+static void muram_err_intr(struct fman *fman)
+{
+	u32 event, mask;
+	struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+
+	event = ioread32be(&fpm_rg->fm_rcr);
+	mask = ioread32be(&fpm_rg->fm_rie);
+
+	/* clear MURAM event bit (do not clear IRAM event) */
+	iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
+
+	if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
+		fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+}
+
+static void qmi_event(struct fman *fman)
+{
+	u32 event, mask, force;
+	struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+
+	event = ioread32be(&qmi_rg->fmqm_ie);
+	mask = ioread32be(&qmi_rg->fmqm_ien);
+	event &= mask;
+	/* clear the forced events */
+	force = ioread32be(&qmi_rg->fmqm_if);
+	if (force & event)
+		iowrite32be(force & ~event, &qmi_rg->fmqm_if);
+	/* clear the acknowledged events */
+	iowrite32be(event, &qmi_rg->fmqm_ie);
+
+	if (event & QMI_INTR_EN_SINGLE_ECC)
+		fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+}
+
+static void enable_time_stamp(struct fman *fman)
+{
+	struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+	u16 fm_clk_freq = fman->state->fm_clk_freq;
+	u32 tmp, intgr, ts_freq;
+	u64 frac;
+
+	ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+	/* configure timestamp so that bit 8 will count 1 microsecond
+	 * Find effective count rate at TIMESTAMP least significant bits:
+	 * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
+	 * Find frequency ratio between effective count rate and the clock:
+	 * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
+	 * 256/600 = 0.4266666...
+	 */
+
+	intgr = ts_freq / fm_clk_freq;
+	/* we multiply by 2^16 to keep the fraction of the division
+	 * we do not div back, since we write this value as a fraction
+	 * see spec
+	 */
+
+	frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
+	/* we check remainder of the division in order to round up if not int */
+	if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
+		frac++;
+
+	tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
+	iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
+
+	/* enable timestamp with original clock */
+	iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
+	fman->state->enabled_time_stamp = true;
+}
+
+static int clear_iram(struct fman *fman)
+{
+	struct fman_iram_regs __iomem *iram;
+	int i;
+
+	iram = (struct fman_iram_regs __iomem *)(fman->base_addr + IMEM_OFFSET);
+
+	/* Enable the auto-increment */
+	out_be32(&iram->iadd, IRAM_IADD_AIE);
+	while (in_be32(&iram->iadd) != IRAM_IADD_AIE)
+		;
+
+	for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
+		out_be32(&iram->idata, 0xffffffff);
+
+	out_be32(&iram->iadd, fman->state->fm_iram_size - 4);
+	/* Memory barrier */
+	mb();
+	while (in_be32(&iram->idata) != 0xffffffff)
+		;
+
+	return 0;
+}
+
+static u32 get_exception_flag(enum fman_exceptions exception)
+{
+	u32 bit_mask;
+
+	switch (exception) {
+	case FMAN_EX_DMA_BUS_ERROR:
+		bit_mask = EX_DMA_BUS_ERROR;
+		break;
+	case FMAN_EX_DMA_SINGLE_PORT_ECC:
+		bit_mask = EX_DMA_SINGLE_PORT_ECC;
+		break;
+	case FMAN_EX_DMA_READ_ECC:
+		bit_mask = EX_DMA_READ_ECC;
+		break;
+	case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+		bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
+		break;
+	case FMAN_EX_DMA_FM_WRITE_ECC:
+		bit_mask = EX_DMA_FM_WRITE_ECC;
+		break;
+	case FMAN_EX_FPM_STALL_ON_TASKS:
+		bit_mask = EX_FPM_STALL_ON_TASKS;
+		break;
+	case FMAN_EX_FPM_SINGLE_ECC:
+		bit_mask = EX_FPM_SINGLE_ECC;
+		break;
+	case FMAN_EX_FPM_DOUBLE_ECC:
+		bit_mask = EX_FPM_DOUBLE_ECC;
+		break;
+	case FMAN_EX_QMI_SINGLE_ECC:
+		bit_mask = EX_QMI_SINGLE_ECC;
+		break;
+	case FMAN_EX_QMI_DOUBLE_ECC:
+		bit_mask = EX_QMI_DOUBLE_ECC;
+		break;
+	case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+		bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
+		break;
+	case FMAN_EX_BMI_LIST_RAM_ECC:
+		bit_mask = EX_BMI_LIST_RAM_ECC;
+		break;
+	case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+		bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
+		break;
+	case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+		bit_mask = EX_BMI_STATISTICS_RAM_ECC;
+		break;
+	case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+		bit_mask = EX_BMI_DISPATCH_RAM_ECC;
+		break;
+	case FMAN_EX_MURAM_ECC:
+		bit_mask = EX_MURAM_ECC;
+		break;
+	default:
+		bit_mask = 0;
+		break;
+	}
+
+	return bit_mask;
+}
+
+static int get_module_event(enum fman_event_modules module, u8 mod_id,
+			    enum fman_intr_type intr_type)
+{
+	int event;
+
+	switch (module) {
+	case FMAN_MOD_MAC:
+			event = (intr_type == FMAN_INTR_TYPE_ERR) ?
+			(FMAN_EV_ERR_MAC0 + mod_id) :
+			(FMAN_EV_MAC0 + mod_id);
+		break;
+	case FMAN_MOD_FMAN_CTRL:
+		if (intr_type == FMAN_INTR_TYPE_ERR)
+			event = FMAN_EV_CNT;
+		else
+			event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
+		break;
+	case FMAN_MOD_DUMMY_LAST:
+		event = FMAN_EV_CNT;
+		break;
+	default:
+		event = FMAN_EV_CNT;
+		break;
+	}
+
+	return event;
+}
+
+static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
+			    u32 *extra_size_of_fifo)
+{
+	struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+	u32 fifo = *size_of_fifo;
+	u32 extra_fifo = *extra_size_of_fifo;
+	u32 tmp;
+
+	/* if this is the first time a port requires extra_fifo_pool_size,
+	 * the total extra_fifo_pool_size must be initialized to 1 buffer per
+	 * port
+	 */
+	if (extra_fifo && !fman->state->extra_fifo_pool_size)
+		fman->state->extra_fifo_pool_size =
+			fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
+
+	fman->state->extra_fifo_pool_size =
+		max(fman->state->extra_fifo_pool_size, extra_fifo);
+
+	/* check that there are enough uncommitted fifo size */
+	if ((fman->state->accumulated_fifo_size + fifo) >
+	    (fman->state->total_fifo_size -
+	    fman->state->extra_fifo_pool_size)) {
+		pr_err("Requested fifo size and extra size exceed total FIFO size.\n");
+		return -EAGAIN;
+	}
+
+	/* Read, modify and write to HW */
+	tmp = (u32)((fifo / FMAN_BMI_FIFO_UNITS - 1) |
+		    ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+		    BMI_EXTRA_FIFO_SIZE_SHIFT));
+	iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
+
+	/* update accumulated */
+	fman->state->accumulated_fifo_size += fifo;
+
+	return 0;
+}
+
+static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
+			    u8 *num_of_extra_tasks)
+{
+	struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+	u8 tasks = *num_of_tasks;
+	u8 extra_tasks = *num_of_extra_tasks;
+	u32 tmp;
+
+	if (extra_tasks)
+		fman->state->extra_tasks_pool_size =
+		(u8)max(fman->state->extra_tasks_pool_size, extra_tasks);
+
+	/* check that there are enough uncommitted tasks */
+	if ((fman->state->accumulated_num_of_tasks + tasks) >
+	    (fman->state->total_num_of_tasks -
+	     fman->state->extra_tasks_pool_size)) {
+		pr_err("Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+		       fman->state->fm_id);
+		return -EAGAIN;
+	}
+	/* update accumulated */
+	fman->state->accumulated_num_of_tasks += tasks;
+
+	/* Write to HW */
+	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+	    ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
+	tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
+		(u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
+	iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+	return 0;
+}
+
+static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
+				u8 *num_of_open_dmas,
+				u8 *num_of_extra_open_dmas)
+{
+	struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+	u8 open_dmas = *num_of_open_dmas;
+	u8 extra_open_dmas = *num_of_extra_open_dmas;
+	u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
+	u32 tmp;
+
+	if (!open_dmas) {
+		/* Configuration according to values in the HW.
+		 * read the current number of open Dma's
+		 */
+		tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+		current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
+					 BMI_EXTRA_NUM_OF_DMAS_SHIFT);
+
+		tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+		current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
+				   BMI_NUM_OF_DMAS_SHIFT) + 1);
+
+		/* This is the first configuration and user did not
+		 * specify value (!open_dmas), reset values will be used
+		 * and we just save these values for resource management
+		 */
+		fman->state->extra_open_dmas_pool_size =
+			(u8)max(fman->state->extra_open_dmas_pool_size,
+				current_extra_val);
+		fman->state->accumulated_num_of_open_dmas += current_val;
+		*num_of_open_dmas = current_val;
+		*num_of_extra_open_dmas = current_extra_val;
+		return 0;
+	}
+
+	if (extra_open_dmas > current_extra_val)
+		fman->state->extra_open_dmas_pool_size =
+		    (u8)max(fman->state->extra_open_dmas_pool_size,
+			    extra_open_dmas);
+
+	if ((fman->state->rev_info.major < 6) &&
+	    (fman->state->accumulated_num_of_open_dmas - current_val +
+	     open_dmas > fman->state->max_num_of_open_dmas)) {
+		pr_err("Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+		       fman->state->fm_id);
+		return -EAGAIN;
+	} else if ((fman->state->rev_info.major >= 6) &&
+		   !((fman->state->rev_info.major == 6) &&
+		   (fman->state->rev_info.minor == 0)) &&
+		   (fman->state->accumulated_num_of_open_dmas -
+		   current_val + open_dmas >
+		   fman->state->dma_thresh_max_commq + 1)) {
+		pr_err("Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+		       fman->state->fm_id,
+		       fman->state->dma_thresh_max_commq + 1);
+		return -EAGAIN;
+	}
+
+	WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
+	/* update acummulated */
+	fman->state->accumulated_num_of_open_dmas -= current_val;
+	fman->state->accumulated_num_of_open_dmas += open_dmas;
+
+	if (fman->state->rev_info.major < 6)
+		total_num_dmas =
+		    (u8)(fman->state->accumulated_num_of_open_dmas +
+		    fman->state->extra_open_dmas_pool_size);
+
+	/* calculate reg */
+	tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+	    ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
+	tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
+			   (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
+	iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+
+	/* update total num of DMA's with committed number of open DMAS,
+	 * and max uncommitted pool.
+	 */
+	if (total_num_dmas) {
+		tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
+		tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
+		iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
+	}
+
+	return 0;
+}
+
+static int fman_config(struct fman *fman)
+{
+	void __iomem *base_addr;
+	int err;
+
+	base_addr = fman->dts_params.base_addr;
+
+	fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
+	if (!fman->state)
+		goto err_fm_state;
+
+	/* Allocate the FM driver's parameters structure */
+	fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
+	if (!fman->cfg)
+		goto err_fm_drv;
+
+	/* Initialize MURAM block */
+	fman->muram = fman_muram_init(fman->dts_params.muram_phy_base_addr,
+				      fman->dts_params.muram_size);
+	if (!fman->muram)
+		goto err_fm_soc_specific;
+
+	/* Initialize FM parameters which will be kept by the driver */
+	fman->state->fm_id = fman->dts_params.id;
+	fman->state->fm_clk_freq = fman->dts_params.clk_freq;
+	fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
+	fman->state->num_of_qman_channels =
+		fman->dts_params.num_of_qman_channels;
+#ifndef __rtems__
+	fman->state->res = fman->dts_params.res;
+#endif /* __rtems__ */
+	fman->exception_cb = fman_exceptions;
+	fman->bus_error_cb = fman_bus_error;
+	fman->fpm_regs =
+		(struct fman_fpm_regs __iomem *)(base_addr + FPM_OFFSET);
+	fman->bmi_regs =
+		(struct fman_bmi_regs __iomem *)(base_addr + BMI_OFFSET);
+	fman->qmi_regs =
+		(struct fman_qmi_regs __iomem *)(base_addr + QMI_OFFSET);
+	fman->dma_regs =
+		(struct fman_dma_regs __iomem *)(base_addr + DMA_OFFSET);
+	fman->base_addr = base_addr;
+
+	spin_lock_init(&fman->spinlock);
+	fman_defconfig(fman->cfg);
+
+	fman->cfg->qmi_deq_option_support = true;
+
+	fman->state->extra_fifo_pool_size = 0;
+	fman->state->exceptions = DFLT_EXCEPTIONS;
+	fman->reset_on_init = true;
+
+	/* Read FMan revision for future use*/
+	fman_get_revision(fman, &fman->state->rev_info);
+
+	err = fill_soc_specific_params(fman->state);
+	if (err)
+		goto err_fm_soc_specific;
+
+	/* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
+	if (fman->state->rev_info.major >= 6)
+		fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
+
+	fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
+
+	fman->state->total_num_of_tasks =
+	(u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
+				    fman->state->rev_info.minor,
+				    fman->state->bmi_max_num_of_tasks);
+
+	if (fman->state->rev_info.major < 6) {
+		fman->cfg->dma_comm_qtsh_clr_emer =
+		(u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
+					fman->state->dma_thresh_max_commq);
+
+		fman->cfg->dma_comm_qtsh_asrt_emer =
+		(u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
+					 fman->state->dma_thresh_max_commq);
+
+		fman->cfg->dma_cam_num_of_entries =
+		DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
+
+		fman->cfg->dma_read_buf_tsh_clr_emer =
+		DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+		fman->cfg->dma_read_buf_tsh_asrt_emer =
+		DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+		fman->cfg->dma_write_buf_tsh_clr_emer =
+		DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+
+		fman->cfg->dma_write_buf_tsh_asrt_emer =
+		DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+
+		fman->cfg->dma_axi_dbg_num_of_beats =
+		DFLT_AXI_DBG_NUM_OF_BEATS;
+	}
+
+	return 0;
+
+err_fm_soc_specific:
+	kfree(fman->cfg);
+err_fm_drv:
+	kfree(fman->state);
+err_fm_state:
+	kfree(fman);
+	return -EINVAL;
+}
+
+static int fman_init(struct fman *fman)
+{
+	struct fman_cfg *cfg = NULL;
+	struct fman_rg fman_rg;
+	int err = 0, i;
+
+	if (is_init_done(fman->cfg))
+		return -EINVAL;
+
+	fman_rg.bmi_rg = fman->bmi_regs;
+	fman_rg.qmi_rg = fman->qmi_regs;
+	fman_rg.fpm_rg = fman->fpm_regs;
+	fman_rg.dma_rg = fman->dma_regs;
+
+	fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
+
+	cfg = fman->cfg;
+
+	/* clear revision-dependent non existing exception */
+	if (fman->state->rev_info.major < 6)
+		fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
+
+	if (fman->state->rev_info.major >= 6)
+		fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
+
+	/* clear CPG */
+	memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+		  fman->state->fm_port_num_of_cg);
+
+	/* Save LIODN info before FMan reset
+	 * Skipping non-existent port 0 (i = 1)
+	 */
+	for (i = 1; i < FMAN_LIODN_TBL; i++) {
+		u32 liodn_base;
+
+		fman->liodn_offset[i] =
+			ioread32be(&fman_rg.bmi_rg->fmbm_spliodn[i - 1]);
+		liodn_base = ioread32be(&fman_rg.dma_rg->fmdmplr[i / 2]);
+		if (i % 2) {
+			/* FMDM_PLR LSB holds LIODN base for odd ports */
+			liodn_base &= DMA_LIODN_BASE_MASK;
+		} else {
+			/* FMDM_PLR MSB holds LIODN base for even ports */
+			liodn_base >>= DMA_LIODN_SHIFT;
+			liodn_base &= DMA_LIODN_BASE_MASK;
+		}
+		fman->liodn_base[i] = liodn_base;
+	}
+
+	/* Reset the FM if required. */
+	if (fman->reset_on_init) {
+		if (fman->state->rev_info.major >= 6) {
+			/* Errata A007273 */
+			pr_debug("FManV3 reset is not supported!\n");
+		} else {
+			out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
+			/* Memory barrier */
+			mb();
+			usleep_range(100, 300);
+		}
+
+		if (!!(ioread32be(&fman_rg.qmi_rg->fmqm_gs) &
+		    QMI_GS_HALT_NOT_BUSY)) {
+			resume(fman->fpm_regs);
+			usleep_range(100, 300);
+		}
+	}
+
+	if (clear_iram(fman) != 0)
+		return -EINVAL;
+
+	cfg->exceptions = fman->state->exceptions;
+
+	/* Init DMA Registers */
+
+	err = dma_init(fman);
+	if (err != 0) {
+		free_init_resources(fman);
+		return err;
+	}
+
+	/* Init FPM Registers */
+	fpm_init(fman->fpm_regs, fman->cfg);
+
+	/* define common resources */
+	/* allocate MURAM for FIFO according to total size */
+	fman->fifo_offset = fman_muram_alloc(fman->muram,
+					     fman->state->total_fifo_size);
+	if (IS_ERR_VALUE(fman->cam_offset)) {
+		free_init_resources(fman);
+		pr_err("MURAM alloc for BMI FIFO failed\n");
+		return -ENOMEM;
+	}
+
+	cfg->fifo_base_addr = fman->fifo_offset;
+	cfg->total_fifo_size = fman->state->total_fifo_size;
+	cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
+	cfg->clk_freq = fman->state->fm_clk_freq;
+
+	/* Init BMI Registers */
+	bmi_init(fman->bmi_regs, fman->cfg);
+
+	/* Init QMI Registers */
+	qmi_init(fman->qmi_regs, fman->cfg);
+
+	err = enable(&fman_rg, cfg);
+	if (err != 0)
+		return err;
+
+	enable_time_stamp(fman);
+
+	kfree(fman->cfg);
+	fman->cfg = NULL;
+
+	return 0;
+}
+
+static int fman_set_exception(struct fman *fman,
+			      enum fman_exceptions exception, bool enable)
+{
+	u32 bit_mask = 0;
+	struct fman_rg fman_rg;
+
+	if (!is_init_done(fman->cfg))
+		return -EINVAL;
+
+	fman_rg.bmi_rg = fman->bmi_regs;
+	fman_rg.qmi_rg = fman->qmi_regs;
+	fman_rg.fpm_rg = fman->fpm_regs;
+	fman_rg.dma_rg = fman->dma_regs;
+
+	bit_mask = get_exception_flag(exception);
+	if (bit_mask) {
+		if (enable)
+			fman->state->exceptions |= bit_mask;
+		else
+			fman->state->exceptions &= ~bit_mask;
+	} else {
+		pr_err("Undefined exception\n");
+		return -EINVAL;
+	}
+
+	return set_exception(&fman_rg, exception, enable);
+}
+
+void fman_register_intr(struct fman *fman, enum fman_event_modules module,
+			u8 mod_id, enum fman_intr_type intr_type,
+			void (*isr_cb)(void *src_arg), void *src_arg)
+{
+	int event = 0;
+
+	event = get_module_event(module, mod_id, intr_type);
+	WARN_ON(!(event < FMAN_EV_CNT));
+
+	/* register in local FM structure */
+	fman->intr_mng[event].isr_cb = isr_cb;
+	fman->intr_mng[event].src_handle = src_arg;
+}
+
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
+			  u8 mod_id, enum fman_intr_type intr_type)
+{
+	int event = 0;
+
+	event = get_module_event(module, mod_id, intr_type);
+	WARN_ON(!(event < FMAN_EV_CNT));
+
+	fman->intr_mng[event].isr_cb = NULL;
+	fman->intr_mng[event].src_handle = NULL;
+}
+
+int fman_set_port_params(struct fman *fman,
+			 struct fman_port_init_params *port_params)
+{
+	int err;
+	unsigned long int_flags;
+	u8 port_id = port_params->port_id, mac_id;
+	struct fman_rg fman_rg;
+
+	fman_rg.bmi_rg = fman->bmi_regs;
+	fman_rg.qmi_rg = fman->qmi_regs;
+	fman_rg.fpm_rg = fman->fpm_regs;
+	fman_rg.dma_rg = fman->dma_regs;
+
+	spin_lock_irqsave(&fman->spinlock, int_flags);
+
+	err = set_num_of_tasks(fman, port_params->port_id,
+			       &port_params->num_of_tasks,
+			       &port_params->num_of_extra_tasks);
+	if (err) {
+		spin_unlock_irqrestore(&fman->spinlock, int_flags);
+		return err;
+	}
+
+	/* TX Ports */
+	if (port_params->port_type != FMAN_PORT_TYPE_RX) {
+		u32 enq_th, deq_th, reg;
+
+		/* update qmi ENQ/DEQ threshold */
+		fman->state->accumulated_num_of_deq_tnums +=
+			port_params->deq_pipeline_depth;
+		enq_th = (ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+			  QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
+		/* if enq_th is too big, we reduce it to the max value
+		 * that is still 0
+		 */
+		if (enq_th >= (fman->state->qmi_max_num_of_tnums -
+		    fman->state->accumulated_num_of_deq_tnums)) {
+			enq_th =
+			fman->state->qmi_max_num_of_tnums -
+			fman->state->accumulated_num_of_deq_tnums - 1;
+
+			reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+			reg &= ~QMI_CFG_ENQ_MASK;
+			reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
+			iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+		}
+
+		deq_th = ioread32be(&fman_rg.qmi_rg->fmqm_gc) &
+				    QMI_CFG_DEQ_MASK;
+		/* if deq_th is too small, we enlarge it to the min
+		 * value that is still 0.
+		 * depTh may not be larger than 63
+		 * (fman->state->qmi_max_num_of_tnums-1).
+		 */
+		if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
+		    (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
+				deq_th =
+				fman->state->accumulated_num_of_deq_tnums + 1;
+			reg = ioread32be(&fman_rg.qmi_rg->fmqm_gc);
+			reg &= ~QMI_CFG_DEQ_MASK;
+			reg |= deq_th;
+			iowrite32be(reg, &fman_rg.qmi_rg->fmqm_gc);
+		}
+	}
+
+	err = set_size_of_fifo(fman, port_params->port_id,
+			       &port_params->size_of_fifo,
+			       &port_params->extra_size_of_fifo);
+	if (err) {
+		spin_unlock_irqrestore(&fman->spinlock, int_flags);
+		return err;
+	}
+
+	err = set_num_of_open_dmas(fman, port_params->port_id,
+				   &port_params->num_of_open_dmas,
+				   &port_params->num_of_extra_open_dmas);
+	if (err) {
+		spin_unlock_irqrestore(&fman->spinlock, int_flags);
+		return err;
+	}
+
+	set_port_liodn(&fman_rg, port_id, fman->liodn_base[port_id],
+		       fman->liodn_offset[port_id]);
+
+	if (fman->state->rev_info.major < 6)
+		set_port_order_restoration(fman_rg.fpm_rg, port_id);
+
+	mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+
+	if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
+		fman->state->port_mfl[mac_id] = port_params->max_frame_length;
+	} else {
+		pr_warn("Port max_frame_length is smaller than MAC current MTU\n");
+		spin_unlock_irqrestore(&fman->spinlock, int_flags);
+		return -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&fman->spinlock, int_flags);
+
+	return 0;
+}
+
+int fman_reset_mac(struct fman *fman, u8 mac_id)
+{
+	struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+	u32 msk, timeout = 100;
+
+	if (fman->state->rev_info.major >= 6) {
+		pr_warn("FMan MAC reset no available for FMan V3!\n");
+		return -EINVAL;
+	}
+
+	/* Get the relevant bit mask */
+	switch (mac_id) {
+	case 0:
+		msk = FPM_RSTC_MAC0_RESET;
+		break;
+	case 1:
+		msk = FPM_RSTC_MAC1_RESET;
+		break;
+	case 2:
+		msk = FPM_RSTC_MAC2_RESET;
+		break;
+	case 3:
+		msk = FPM_RSTC_MAC3_RESET;
+		break;
+	case 4:
+		msk = FPM_RSTC_MAC4_RESET;
+		break;
+	case 5:
+		msk = FPM_RSTC_MAC5_RESET;
+		break;
+	case 6:
+		msk = FPM_RSTC_MAC6_RESET;
+		break;
+	case 7:
+		msk = FPM_RSTC_MAC7_RESET;
+		break;
+	case 8:
+		msk = FPM_RSTC_MAC8_RESET;
+		break;
+	case 9:
+		msk = FPM_RSTC_MAC9_RESET;
+		break;
+	default:
+		pr_warn("Illegal MAC Id\n");
+		return -EINVAL;
+	}
+
+	/* reset */
+	iowrite32be(msk, &fpm_rg->fm_rstc);
+	while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
+		udelay(10);
+
+	if (!timeout)
+		return -EIO;
+
+	return 0;
+}
+
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
+{
+	/* if port is already initialized, check that MaxFrameLength is smaller
+	 * or equal to the port's max
+	 */
+	if ((!fman->state->port_mfl[mac_id]) ||
+	    (fman->state->port_mfl[mac_id] &&
+	    (mfl <= fman->state->port_mfl[mac_id]))) {
+		fman->state->mac_mfl[mac_id] = mfl;
+	} else {
+		pr_warn("MAC max_frame_length is larger than Port max_frame_length\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+u16 fman_get_clock_freq(struct fman *fman)
+{
+	return fman->state->fm_clk_freq;
+}
+
+u32 fman_get_bmi_max_fifo_size(struct fman *fman)
+{
+	return fman->state->bmi_max_fifo_size;
+}
+
+static void fman_event_isr(struct fman *fman)
+{
+	u32 pending;
+	struct fman_fpm_regs __iomem *fpm_rg;
+
+	if (!is_init_done(fman->cfg))
+		return;
+
+	fpm_rg = fman->fpm_regs;
+
+	/* normal interrupts */
+	pending = ioread32be(&fpm_rg->fm_npi);
+	if (!pending)
+		return;
+
+	if (pending & INTR_EN_QMI)
+		qmi_event(fman);
+
+	/* MAC interrupts */
+	if (pending & INTR_EN_MAC0)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+	if (pending & INTR_EN_MAC1)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+	if (pending & INTR_EN_MAC2)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+	if (pending & INTR_EN_MAC3)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+	if (pending & INTR_EN_MAC4)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+	if (pending & INTR_EN_MAC5)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+	if (pending & INTR_EN_MAC6)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+	if (pending & INTR_EN_MAC7)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+	if (pending & INTR_EN_MAC8)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+	if (pending & INTR_EN_MAC9)
+		call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+}
+
+static int fman_error_isr(struct fman *fman)
+{
+	u32 pending;
+	struct fman_fpm_regs __iomem *fpm_rg;
+
+	if (!is_init_done(fman->cfg))
+		return -EINVAL;
+
+	fpm_rg = fman->fpm_regs;
+
+	/* error interrupts */
+	pending = ioread32be(&fpm_rg->fm_epi);
+	if (!pending)
+		return -EINVAL;
+
+	if (pending & ERR_INTR_EN_BMI)
+		bmi_err_event(fman);
+	if (pending & ERR_INTR_EN_QMI)
+		qmi_err_event(fman);
+	if (pending & ERR_INTR_EN_FPM)
+		fpm_err_event(fman);
+	if (pending & ERR_INTR_EN_DMA)
+		dma_err_event(fman);
+	if (pending & ERR_INTR_EN_MURAM)
+		muram_err_intr(fman);
+
+	/* MAC error interrupts */
+	if (pending & ERR_INTR_EN_MAC0)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+	if (pending & ERR_INTR_EN_MAC1)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+	if (pending & ERR_INTR_EN_MAC2)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+	if (pending & ERR_INTR_EN_MAC3)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+	if (pending & ERR_INTR_EN_MAC4)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+	if (pending & ERR_INTR_EN_MAC5)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+	if (pending & ERR_INTR_EN_MAC6)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+	if (pending & ERR_INTR_EN_MAC7)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+	if (pending & ERR_INTR_EN_MAC8)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+	if (pending & ERR_INTR_EN_MAC9)
+		call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+
+	return 0;
+}
+
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
+{
+	u32 tmp;
+
+	tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
+	rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
+				FPM_REV1_MAJOR_SHIFT);
+	rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
+}
+
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
+{
+	int i;
+
+	if (fman->state->rev_info.major >= 6) {
+		u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+				  0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+		for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+			if (port_ids[i] == port_id)
+				break;
+		}
+	} else {
+		u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+				  0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+		for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+			if (port_ids[i] == port_id)
+				break;
+		}
+	}
+
+	if (i == fman->state->num_of_qman_channels)
+		return 0;
+
+	return fman->state->qman_channel_base + i;
+}
+
+struct resource *fman_get_mem_region(struct fman *fman)
+{
+	return fman->state->res;
+}
+
+/* Bootargs defines */
+/* Extra headroom for RX buffers - Default, min and max */
+#define FSL_FM_RX_EXTRA_HEADROOM	64
+#define FSL_FM_RX_EXTRA_HEADROOM_MIN	16
+#define FSL_FM_RX_EXTRA_HEADROOM_MAX	384
+
+/* Maximum frame length */
+#define FSL_FM_MAX_FRAME_SIZE			1522
+#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE		9600
+#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE		64
+
+/* Extra headroom for Rx buffers.
+ * FMan is instructed to allocate, on the Rx path, this amount of
+ * space at the beginning of a data buffer, beside the DPA private
+ * data area and the IC fields.
+ * Does not impact Tx buffer layout.
+ * Configurable from bootargs. 64 by default, it's needed on
+ * particular forwarding scenarios that add extra headers to the
+ * forwarded frame.
+ */
+int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+module_param(fsl_fm_rx_extra_headroom, int, 0);
+MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
+
+/* Max frame size, across all interfaces.
+ * Configurable from bootargs, to avoid allocating oversized (socket)
+ * buffers when not using jumbo frames.
+ * Must be large enough to accommodate the network MTU, but small enough
+ * to avoid wasting skb memory.
+ *
+ * Could be overridden once, at boot-time, via the
+ * fm_set_max_frm() callback.
+ */
+#ifndef __rtems__
+int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+#else /* __rtems__ */
+int fsl_fm_max_frm = FSL_FM_MAX_POSSIBLE_FRAME_SIZE;
+#endif /* __rtems__ */
+module_param(fsl_fm_max_frm, int, 0);
+MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+
+u16 fman_get_max_frm(void)
+{
+	static bool fm_check_mfl;
+
+	if (!fm_check_mfl) {
+		if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
+		    fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
+			pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+				fsl_fm_max_frm,
+				FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
+				FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
+				FSL_FM_MAX_FRAME_SIZE);
+			fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+		}
+		fm_check_mfl = true;
+	}
+
+	return fsl_fm_max_frm;
+}
+EXPORT_SYMBOL(fman_get_max_frm);
+
+int fman_get_rx_extra_headroom(void)
+{
+	static bool fm_check_rx_extra_headroom;
+
+	if (!fm_check_rx_extra_headroom) {
+		if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
+		    fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
+			pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+				fsl_fm_rx_extra_headroom,
+				FSL_FM_RX_EXTRA_HEADROOM_MIN,
+				FSL_FM_RX_EXTRA_HEADROOM_MAX,
+				FSL_FM_RX_EXTRA_HEADROOM);
+			fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+		}
+
+		fsl_fm_rx_extra_headroom = true;
+		fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
+	}
+
+	return fsl_fm_rx_extra_headroom;
+}
+EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+
+struct fman *fman_bind(struct device *fm_dev)
+{
+	return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
+}
+
+void fman_unbind(struct fman *fman)
+{
+	put_device(fman->dev);
+}
+
+struct device *fman_get_device(struct fman *fman)
+{
+	return fman->dev;
+}
+
+static irqreturn_t fman_irq(int irq, void *fman)
+{
+	fman_event_isr(fman);
+
+	return IRQ_HANDLED;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_muram_match[] = {
+	{
+	 .compatible = "fsl,fman-muram"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, fman_muram_match);
+#endif /* __rtems__ */
+
+static struct fman *read_dts_node(struct platform_device *of_dev)
+{
+	struct fman *fman;
+#ifndef __rtems__
+	struct device_node *fm_node, *muram_node;
+	struct resource *res;
+#else /* __rtems__ */
+	const char *fdt = bsp_fdt_get();
+	struct device_node *fm_node;
+#endif /* __rtems__ */
+	const u32 *u32_prop;
+	int lenp, err, irq;
+#ifndef __rtems__
+	struct clk *clk;
+	u32 clk_rate;
+#endif /* __rtems__ */
+	phys_addr_t phys_base_addr;
+#ifndef __rtems__
+	resource_size_t mem_size;
+#endif /* __rtems__ */
+
+	fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+	if (!fman)
+		return NULL;
+
+	fm_node = of_node_get(of_dev->dev.of_node);
+
+	u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
+	if (!u32_prop) {
+		pr_err("of_get_property(%s, cell-index) failed\n",
+		       fm_node->full_name);
+		goto fman_node_put;
+	}
+	if (WARN_ON(lenp != sizeof(u32)))
+		goto fman_node_put;
+
+	fman->dts_params.id = (u8)*u32_prop;
+
+#ifndef __rtems__
+	/* Get the FM interrupt */
+	res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("Can't get FMan IRQ resource\n");
+		goto fman_node_put;
+	}
+	irq = res->start;
+
+	/* Get the FM error interrupt */
+	res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
+	if (!res) {
+		pr_err("Can't get FMan Error IRQ resource\n");
+		goto fman_node_put;
+	}
+	fman->dts_params.err_irq = res->start;
+
+	/* Get the FM address */
+	res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("Can't get FMan memory resouce\n");
+		goto fman_node_put;
+	}
+
+	phys_base_addr = res->start;
+	mem_size = res->end + 1 - res->start;
+#else /* __rtems__ */
+	irq = of_irq_to_resource(fm_node, 0, NULL);
+	fman->dts_params.err_irq = of_irq_to_resource(fm_node, 1, NULL);
+	phys_base_addr = of_dev->dev.base;
+	fman->dts_params.base_addr = (void *)(uintptr_t)phys_base_addr;
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+	clk = of_clk_get_by_name(fm_node, NULL);
+	if (IS_ERR(clk)) {
+		pr_err("Failed to get FM%d clock structure\n",
+		       fman->dts_params.id);
+		goto fman_node_put;
+	}
+
+	clk_rate = clk_get_rate(clk);
+	if (!clk_rate) {
+		pr_err("Failed to determine FM%d clock rate\n",
+		       fman->dts_params.id);
+		goto fman_node_put;
+	}
+	/* Rounding to MHz */
+	fman->dts_params.clk_freq = (u16)((clk_rate + 500000) / 1000000);
+#else /* __rtems__ */
+	/* FIXME */
+	fman->dts_params.clk_freq = 733;
+#endif /* __rtems__ */
+
+	u32_prop = (const u32 *)of_get_property(fm_node,
+						"fsl,qman-channel-range",
+						&lenp);
+	if (!u32_prop) {
+		pr_err("of_get_property(%s, fsl,qman-channel-range) failed\n",
+		       fm_node->full_name);
+		goto fman_node_put;
+	}
+	if (WARN_ON(lenp != sizeof(u32) * 2))
+		goto fman_node_put;
+	fman->dts_params.qman_channel_base = u32_prop[0];
+	fman->dts_params.num_of_qman_channels = u32_prop[1];
+
+	/* Get the MURAM base address and size */
+#ifndef __rtems__
+	/* FIXME */
+	muram_node = of_find_matching_node(fm_node, fman_muram_match);
+	if (!muram_node) {
+		pr_err("could not find MURAM node\n");
+		goto fman_node_put;
+	}
+
+	err = of_address_to_resource(muram_node, 0, res);
+	if (err) {
+		of_node_put(muram_node);
+		pr_err("of_address_to_resource() = %d\n", err);
+		goto fman_node_put;
+	}
+
+	fman->dts_params.muram_phy_base_addr = res->start;
+	fman->dts_params.muram_size = res->end + 1 - res->start;
+#else /* __rtems__ */
+	{
+		int node = fdt_node_offset_by_compatible(fdt,
+		    fm_node->offset, "fsl,fman-muram");
+		struct device_node muram_node = {
+			.offset = node
+		};
+		struct resource res;
+
+		err = of_address_to_resource(&muram_node, 0, &res);
+		if (err != 0) {
+			pr_err("could not find MURAM node\n");
+			goto fman_node_put;
+		}
+		fman->dts_params.muram_phy_base_addr = phys_base_addr +
+		    res.start;
+		fman->dts_params.muram_size = res.end - res.start;
+	}
+#endif /* __rtems__ */
+	{
+		/* In B4 rev 2.0 (and above) the MURAM size is 512KB.
+		 * Check the SVR and update MURAM size if required.
+		 */
+		u32 svr;
+
+		svr = mfspr(SPRN_SVR);
+
+		if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) >= 2))
+			fman->dts_params.muram_size = 0x80000;
+	}
+
+#ifndef __rtems__
+	of_node_put(muram_node);
+#endif /* __rtems__ */
+	of_node_put(fm_node);
+
+	err = devm_request_irq(&of_dev->dev, irq, fman_irq,
+			       IRQF_NO_SUSPEND, "fman", fman);
+	if (err < 0) {
+		pr_err("Error: allocating irq %d (error = %d)\n", irq, err);
+		goto fman_free;
+	}
+
+#ifndef __rtems__
+	fman->dts_params.res =
+		devm_request_mem_region(&of_dev->dev, phys_base_addr,
+					mem_size, "fman");
+	if (!fman->dts_params.res) {
+		pr_err("request_mem_region() failed\n");
+		goto fman_free;
+	}
+
+	fman->dts_params.base_addr =
+		devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
+	if (fman->dts_params.base_addr == 0) {
+		pr_err("devm_ioremap() failed\n");
+		goto fman_free;
+	}
+#endif /* __rtems__ */
+
+	return fman;
+
+fman_node_put:
+	of_node_put(fm_node);
+fman_free:
+	kfree(fman);
+	return NULL;
+}
+
+static irqreturn_t fman_err_irq(int irq, void *fman)
+{
+	if (fman_error_isr(fman) == 0)
+		return IRQ_HANDLED;
+
+	return IRQ_NONE;
+}
+
+static int fman_probe(struct platform_device *of_dev)
+{
+	struct fman *fman;
+	struct device *dev;
+	int err;
+
+	dev = &of_dev->dev;
+
+	fman = read_dts_node(of_dev);
+	if (!fman)
+		return -EIO;
+
+	if (fman->dts_params.err_irq != 0) {
+		err = devm_request_irq(dev, fman->dts_params.err_irq,
+				       fman_err_irq,
+				       IRQF_SHARED | IRQF_NO_SUSPEND,
+				       "fman-err", fman);
+		if (err < 0) {
+			pr_err("Error: allocating irq %d (error = %d)\n",
+			       fman->dts_params.err_irq, err);
+			return -EINVAL;
+		}
+	}
+
+	err = fman_config(fman);
+	if (err) {
+		pr_err("FMan config failed\n");
+		return -EINVAL;
+	}
+
+	if (fman_init(fman) != 0) {
+		pr_err("FMan init failed\n");
+		return -EINVAL;
+	}
+
+	if (fman->dts_params.err_irq == 0) {
+		fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
+		fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
+		fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
+		fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
+		fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
+		fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
+		fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
+		fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
+		fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
+		fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
+		fman_set_exception(fman,
+				   FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
+		fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
+		fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
+				   false);
+		fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
+		fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
+	}
+
+	dev_set_drvdata(dev, fman);
+
+	fman->dev = dev;
+
+	pr_debug("FM%d probed\n", fman->dts_params.id);
+
+	return 0;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_match[] = {
+	{
+	 .compatible = "fsl,fman"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, fm_match);
+
+static struct platform_driver fman_driver = {
+	.driver = {
+		   .name = "fsl-fman",
+		   .of_match_table = fman_match,
+		   },
+	.probe = fman_probe,
+};
+
+builtin_platform_driver(fman_driver);
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+void
+fman_reset(struct fman *fman)
+{
+
+	/*
+	 * Ignore errata A007273, since we do not disable the Ethernet MAC
+	 * clocks.
+	 */
+
+	out_be32(&fman->fpm_regs->fm_rstc, FPM_RSTC_FM_RESET);
+	/* Memory barrier */
+	mb();
+	usleep_range(100, 300);
+
+	if (!!(ioread32be(&fman->qmi_regs->fmqm_gs) &
+	    QMI_GS_HALT_NOT_BUSY)) {
+		usleep_range(100, 300);
+	}
+}
+
+struct fman_softc {
+	struct platform_device of_dev;
+	struct device_node dn;
+};
+
+static int
+fman_dev_probe_fdt(struct fman_softc *sc, int unit)
+{
+	const char *fdt = bsp_fdt_get();
+	const char *name = "fsl,fman";
+	int node = 0;
+
+	while (1) {
+		node = fdt_node_offset_by_compatible(fdt, node, name);
+		if (node >= 0) {
+			int len;
+			const fdt32_t *p = fdt_getprop(fdt, node, "cell-index", &len);
+
+			if (p != NULL && len == sizeof(*p)) {
+				if (fdt32_to_cpu(*p) == (uint32_t)unit) {
+					sc->dn.offset = node;
+					sc->dn.full_name = name;
+					sc->of_dev.dev.of_node = &sc->dn;
+					sc->of_dev.dev.base = (uintptr_t)&qoriq.fman[unit];
+					return (BUS_PROBE_DEFAULT);
+				}
+			} else {
+				return (ENXIO);
+			}
+		} else {
+			return (ENXIO);
+		}
+	}
+}
+
+static int
+fman_dev_probe(device_t dev)
+{
+	struct fman_softc *sc = device_get_softc(dev);
+
+	device_set_desc(dev, "FMan");
+
+	return (fman_dev_probe_fdt(sc, device_get_unit(dev)));
+}
+
+static int
+fman_dev_attach(device_t dev)
+{
+	const char *fdt = bsp_fdt_get();
+	struct fman_softc *sc = device_get_softc(dev);
+	int node;
+	int err;
+
+	err = fman_probe(&sc->of_dev);
+	if (err != 0) {
+		return (ENXIO);
+	}
+
+	node = fdt_first_subnode(fdt, sc->dn.offset);
+	while (node >= 0) {
+		struct fman_ivars *ivars =
+		    kzalloc(sizeof(*ivars), GFP_KERNEL);
+		device_t child;
+
+		if (ivars == NULL) {
+			return (ENOMEM);
+		}
+
+		ivars->dn.offset = node;
+		ivars->of_dev.dev.of_node = &ivars->dn;
+		ivars->of_dev.dev.base = sc->of_dev.dev.base;
+		ivars->fman = dev_get_drvdata(&sc->of_dev.dev);
+
+		child = device_add_child(dev, NULL, -1);
+		if (child == NULL) {
+			kfree(ivars);
+			return (ENOMEM);
+		}
+
+		device_set_ivars(child, ivars);
+
+		err = device_probe_and_attach(child);
+		if (err != 0) {
+			kfree(ivars);
+		}
+
+		node = fdt_next_subnode(fdt, node);
+	}
+
+	return (0);
+}
+
+static int
+fman_dev_detach(device_t dev)
+{
+	struct fman_softc *sc = device_get_softc(dev);
+	int err;
+
+	err = bus_generic_detach(dev);
+	if (err == 0) {
+		fman_reset(dev_get_drvdata(&sc->of_dev.dev));
+	}
+
+	return (err);
+}
+
+static device_method_t fman_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe, fman_dev_probe),
+	DEVMETHOD(device_attach, fman_dev_attach),
+	DEVMETHOD(device_detach, fman_dev_detach),
+	DEVMETHOD(device_suspend, bus_generic_suspend),
+	DEVMETHOD(device_resume, bus_generic_resume),
+	DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+	DEVMETHOD_END
+};
+
+driver_t fman_driver = {
+	.name = "fman",
+	.methods = fman_methods,
+	.size = sizeof(struct fman_softc),
+};
+
+static devclass_t fman_devclass;
+
+DRIVER_MODULE(fman, nexus, fman_driver, fman_devclass, 0, 0);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman.h b/linux/drivers/net/ethernet/freescale/fman/fman.h
new file mode 100644
index 0000000..291d263
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman.h
@@ -0,0 +1,500 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_H
+#define __FM_H
+
+#include <linux/io.h>
+
+/* FM Frame descriptor macros  */
+/* Frame queue Context Override */
+#define FM_FD_CMD_FCO                   0x80000000
+#define FM_FD_CMD_RPD                   0x40000000  /* Read Prepended Data */
+#define FM_FD_CMD_DTC                   0x10000000  /* Do L4 Checksum */
+
+/* TX-Port: Unsupported Format */
+#define FM_FD_ERR_UNSUPPORTED_FORMAT    0x04000000
+/* TX Port: Length Error */
+#define FM_FD_ERR_LENGTH                0x02000000
+#define FM_FD_ERR_DMA                   0x01000000  /* DMA Data error */
+
+/* IPR frame (not error) */
+#define FM_FD_IPR                       0x00000001
+/* IPR non-consistent-sp */
+#define FM_FD_ERR_IPR_NCSP              (0x00100000 | FM_FD_IPR)
+/* IPR error */
+#define FM_FD_ERR_IPR                   (0x00200000 | FM_FD_IPR)
+/* IPR timeout */
+#define FM_FD_ERR_IPR_TO                (0x00300000 | FM_FD_IPR)
+/* TX Port: Length Error */
+#define FM_FD_ERR_IPRE                  (FM_FD_ERR_IPR & ~FM_FD_IPR)
+
+
+/* Rx FIFO overflow, FCS error, code error, running disparity error
+ * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
+ * PHY error control character detected.
+ */
+#define FM_FD_ERR_PHYSICAL              0x00080000
+/* Frame too long OR Frame size exceeds max_length_frame  */
+#define FM_FD_ERR_SIZE                  0x00040000
+/* classification discard */
+#define FM_FD_ERR_CLS_DISCARD           0x00020000
+/* Extract Out of Frame */
+#define FM_FD_ERR_EXTRACTION            0x00008000
+/* No Scheme Selected */
+#define FM_FD_ERR_NO_SCHEME             0x00004000
+/* Keysize Overflow */
+#define FM_FD_ERR_KEYSIZE_OVERFLOW      0x00002000
+/* Frame color is red */
+#define FM_FD_ERR_COLOR_RED             0x00000800
+/* Frame color is yellow */
+#define FM_FD_ERR_COLOR_YELLOW          0x00000400
+/* Parser Time out Exceed */
+#define FM_FD_ERR_PRS_TIMEOUT           0x00000080
+/* Invalid Soft Parser instruction */
+#define FM_FD_ERR_PRS_ILL_INSTRUCT      0x00000040
+/* Header error was identified during parsing */
+#define FM_FD_ERR_PRS_HDR_ERR           0x00000020
+/* Frame parsed beyind 256 first bytes */
+#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED  0x00000008
+
+/* non Frame-Manager error */
+#define FM_FD_RX_STATUS_ERR_NON_FM      0x00400000
+
+/* FMan driver defines */
+#define FMAN_BMI_FIFO_UNITS		0x100
+#define OFFSET_UNITS			16
+
+/* BMan defines */
+#define BM_MAX_NUM_OF_POOLS		64 /* Buffers pools */
+#define FMAN_PORT_MAX_EXT_POOLS_NUM	8  /* External BM pools per Rx port */
+
+struct fman; /* FMan data */
+#ifdef __rtems__
+#include <linux/platform_device.h>
+
+struct fman_ivars {
+	struct platform_device of_dev;
+	struct device_node dn;
+	struct fman *fman;
+};
+#endif /* __rtems__ */
+
+/* Enum for defining port types */
+enum fman_port_type {
+	FMAN_PORT_TYPE_TX = 0,	/* TX Port */
+	FMAN_PORT_TYPE_RX,	/* RX Port */
+};
+
+struct fman_rev_info {
+	u8 major;			/* Major revision */
+	u8 minor;			/* Minor revision */
+};
+
+enum fman_exceptions {
+	FMAN_EX_DMA_BUS_ERROR = 0,	/* DMA bus error. */
+	FMAN_EX_DMA_READ_ECC,		/* Read Buffer ECC error */
+	FMAN_EX_DMA_SYSTEM_WRITE_ECC,	/* Write Buffer ECC err on sys side */
+	FMAN_EX_DMA_FM_WRITE_ECC,	/* Write Buffer ECC error on FM side */
+	FMAN_EX_DMA_SINGLE_PORT_ECC,	/* Single Port ECC error on FM side */
+	FMAN_EX_FPM_STALL_ON_TASKS,	/* Stall of tasks on FPM */
+	FMAN_EX_FPM_SINGLE_ECC,		/* Single ECC on FPM. */
+	FMAN_EX_FPM_DOUBLE_ECC,		/* Double ECC error on FPM ram access */
+	FMAN_EX_QMI_SINGLE_ECC,	/* Single ECC on QMI. */
+	FMAN_EX_QMI_DOUBLE_ECC,	/* Double bit ECC occurred on QMI */
+	FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
+	FMAN_EX_BMI_LIST_RAM_ECC,	/* Linked List RAM ECC error */
+	FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
+	FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
+	FMAN_EX_BMI_DISPATCH_RAM_ECC,	/* Dispatch RAM ECC Error Enable */
+	FMAN_EX_IRAM_ECC,		/* Double bit ECC occurred on IRAM */
+	FMAN_EX_MURAM_ECC		/* Double bit ECC occurred on MURAM */
+};
+
+/* Parse results memory layout */
+struct fman_prs_result {
+	u8 lpid;		/* Logical port id */
+	u8 shimr;		/* Shim header result  */
+	u16 l2r;		/* Layer 2 result */
+	u16 l3r;		/* Layer 3 result */
+	u8 l4r;		/* Layer 4 result */
+	u8 cplan;		/* Classification plan id */
+	u16 nxthdr;		/* Next Header  */
+	u16 cksum;		/* Running-sum */
+	/* Flags&fragment-offset field of the last IP-header */
+	u16 flags_frag_off;
+	/* Routing type field of a IPV6 routing extension header */
+	u8 route_type;
+	/* Routing Extension Header Present; last bit is IP valid */
+	u8 rhp_ip_valid;
+	u8 shim_off[2];		/* Shim offset */
+	u8 ip_pid_off;		/* IP PID (last IP-proto) offset */
+	u8 eth_off;		/* ETH offset */
+	u8 llc_snap_off;	/* LLC_SNAP offset */
+	u8 vlan_off[2];		/* VLAN offset */
+	u8 etype_off;		/* ETYPE offset */
+	u8 pppoe_off;		/* PPP offset */
+	u8 mpls_off[2];		/* MPLS offset */
+	u8 ip_off[2];		/* IP offset */
+	u8 gre_off;		/* GRE offset */
+	u8 l4_off;		/* Layer 4 offset */
+	u8 nxthdr_off;		/** Parser end point */
+} __attribute__((__packed__));
+
+/* A structure for defining buffer prefix area content. */
+struct fman_buffer_prefix_content {
+	/* Number of bytes to be left at the beginning of the external
+	 * buffer; Note that the private-area will start from the base
+	 * of the buffer address.
+	 */
+	u16 priv_data_size;
+	/* true to pass the parse result to/from the FM;
+	 * User may use FM_PORT_GetBufferPrsResult() in
+	 * order to get the parser-result from a buffer.
+	 */
+	bool pass_prs_result;
+	/* true to pass the timeStamp to/from the FM User */
+	bool pass_time_stamp;
+	/* true to pass the KG hash result to/from the FM User may
+	 * use FM_PORT_GetBufferHashResult() in order to get the
+	 * parser-result from a buffer.
+	 */
+	bool pass_hash_result;
+	/* Add all other Internal-Context information: AD,
+	 * hash-result, key, etc.
+	 */
+	u16 data_align;
+};
+
+/* A structure of information about each of the external
+ * buffer pools used by a port or storage-profile.
+ */
+struct fman_ext_pool_params {
+	u8 id;		    /* External buffer pool id */
+	u16 size;		    /* External buffer pool buffer size */
+};
+
+/* A structure for informing the driver about the external
+ * buffer pools allocated in the BM and used by a port or a
+ * storage-profile.
+ */
+struct fman_ext_pools {
+	u8 num_of_pools_used; /* Number of pools use by this port */
+	struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+					/* Parameters for each port */
+};
+
+/* A structure for defining BM pool depletion criteria */
+struct fman_buf_pool_depletion {
+	/* select mode in which pause frames will be sent after a
+	 * number of pools (all together!) are depleted
+	 */
+	bool pools_grp_mode_enable;
+	/* the number of depleted pools that will invoke pause
+	 * frames transmission.
+	 */
+	u8 num_of_pools;
+	/* For each pool, true if it should be considered for
+	 * depletion (Note - this pool must be used by this port!).
+	 */
+	bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
+	/* select mode in which pause frames will be sent
+	 * after a single-pool is depleted;
+	 */
+	bool single_pool_mode_enable;
+	/* For each pool, true if it should be considered
+	 * for depletion (Note - this pool must be used by this port!)
+	 */
+	bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
+};
+
+/** fman_exceptions_cb
+ * fman		- Pointer to FMan
+ * exception	- The exception.
+ *
+ * Exceptions user callback routine, will be called upon an exception
+ * passing the exception identification.
+ */
+typedef void (fman_exceptions_cb)(struct fman *fman,
+				  enum fman_exceptions exception);
+
+/** fman_bus_error_cb
+ * fman		- Pointer to FMan
+ * port_id	- Port id
+ * addr		- Address that caused the error
+ * tnum		- Owner of error
+ * liodn	- Logical IO device number
+ *
+ * Bus error user callback routine, will be called upon bus error,
+ * passing parameters describing the errors and the owner.
+ */
+typedef void (fman_bus_error_cb)(struct fman *fman, u8 port_id, u64 addr,
+				 u8 tnum, u16 liodn);
+
+/* Enum for inter-module interrupts registration */
+enum fman_event_modules {
+	FMAN_MOD_MAC = 0,		/* MAC event */
+	FMAN_MOD_FMAN_CTRL,	/* FMAN Controller */
+	FMAN_MOD_DUMMY_LAST
+};
+
+/* Enum for interrupts types */
+enum fman_intr_type {
+	FMAN_INTR_TYPE_ERR,
+	FMAN_INTR_TYPE_NORMAL
+};
+
+/* Enum for inter-module interrupts registration */
+enum fman_inter_module_event {
+	FMAN_EV_ERR_MAC0 = 0,	/* MAC 0 error event */
+	FMAN_EV_ERR_MAC1,		/* MAC 1 error event */
+	FMAN_EV_ERR_MAC2,		/* MAC 2 error event */
+	FMAN_EV_ERR_MAC3,		/* MAC 3 error event */
+	FMAN_EV_ERR_MAC4,		/* MAC 4 error event */
+	FMAN_EV_ERR_MAC5,		/* MAC 5 error event */
+	FMAN_EV_ERR_MAC6,		/* MAC 6 error event */
+	FMAN_EV_ERR_MAC7,		/* MAC 7 error event */
+	FMAN_EV_ERR_MAC8,		/* MAC 8 error event */
+	FMAN_EV_ERR_MAC9,		/* MAC 9 error event */
+	FMAN_EV_MAC0,		/* MAC 0 event (Magic packet detection) */
+	FMAN_EV_MAC1,		/* MAC 1 event (Magic packet detection) */
+	FMAN_EV_MAC2,		/* MAC 2 (Magic packet detection) */
+	FMAN_EV_MAC3,		/* MAC 3 (Magic packet detection) */
+	FMAN_EV_MAC4,		/* MAC 4 (Magic packet detection) */
+	FMAN_EV_MAC5,		/* MAC 5 (Magic packet detection) */
+	FMAN_EV_MAC6,		/* MAC 6 (Magic packet detection) */
+	FMAN_EV_MAC7,		/* MAC 7 (Magic packet detection) */
+	FMAN_EV_MAC8,		/* MAC 8 event (Magic packet detection) */
+	FMAN_EV_MAC9,		/* MAC 9 event (Magic packet detection) */
+	FMAN_EV_FMAN_CTRL_0,	/* Fman controller event 0 */
+	FMAN_EV_FMAN_CTRL_1,	/* Fman controller event 1 */
+	FMAN_EV_FMAN_CTRL_2,	/* Fman controller event 2 */
+	FMAN_EV_FMAN_CTRL_3,	/* Fman controller event 3 */
+	FMAN_EV_CNT
+};
+
+struct fman_intr_src {
+	void (*isr_cb)(void *src_arg);
+	void *src_handle;
+};
+
+/* Structure for port-FM communication during fman_port_init. */
+struct fman_port_init_params {
+	u8 port_id;			/* port Id */
+	enum fman_port_type port_type;	/* Port type */
+	u16 port_speed;			/* Port speed */
+	u16 liodn_offset;		/* Port's requested resource */
+	u8 num_of_tasks;		/* Port's requested resource */
+	u8 num_of_extra_tasks;		/* Port's requested resource */
+	u8 num_of_open_dmas;		/* Port's requested resource */
+	u8 num_of_extra_open_dmas;	/* Port's requested resource */
+	u32 size_of_fifo;		/* Port's requested resource */
+	u32 extra_size_of_fifo;		/* Port's requested resource */
+	u8 deq_pipeline_depth;		/* Port's requested resource */
+	u16 max_frame_length;		/* Port's max frame length. */
+	u16 liodn_base;
+	/* LIODN base for this port, to be used together with LIODN offset. */
+};
+
+struct fman;
+
+/**
+ * fman_get_revision
+ * @fman		- Pointer to the FMan module
+ * @rev_info		- A structure of revision information parameters.
+ *
+ * Returns the FM revision
+ *
+ * Allowed only following fman_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
+
+/**
+ * fman_register_intr
+ * @fman:	A Pointer to FMan device
+ * @mod:	Calling module
+ * @mod_id:	Module id (if more than 1 exists, '0' if not)
+ * @intr_type:	Interrupt type (error/normal) selection.
+ * @f_isr:	The interrupt service routine.
+ * @h_src_arg:	Argument to be passed to f_isr.
+ *
+ * Used to register an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
+			u8 mod_id, enum fman_intr_type intr_type,
+			void (*f_isr)(void *h_src_arg), void *h_src_arg);
+
+/**
+ * fman_unregister_intr
+ * @fman:	A Pointer to FMan device
+ * @mod:	Calling module
+ * @mod_id:	Module id (if more than 1 exists, '0' if not)
+ * @intr_type:	Interrupt type (error/normal) selection.
+ *
+ * Used to unregister an event handler to be processed by FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
+			  u8 mod_id, enum fman_intr_type intr_type);
+
+/**
+ * fman_set_port_params
+ * @fman:		A Pointer to FMan device
+ * @port_params:	Port parameters
+ *
+ * Used by FMan Port to pass parameters to the FMan
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_port_params(struct fman *fman,
+			 struct fman_port_init_params *port_params);
+
+/**
+ * fman_reset_mac
+ * @fman:	A Pointer to FMan device
+ * @mac_id:	MAC id to be reset
+ *
+ * Reset a specific MAC
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_reset_mac(struct fman *fman, u8 mac_id);
+
+/**
+ * fman_get_clock_freq
+ * @fman:	A Pointer to FMan device
+ *
+ * Get FMan clock frequency
+ *
+ * Return: FMan clock frequency
+ */
+
+u16 fman_get_clock_freq(struct fman *fman);
+
+/**
+ * fman_get_bmi_max_fifo_size
+ * @fman:	A Pointer to FMan device
+ *
+ * Get FMan maximum FIFO size
+ *
+ * Return: FMan Maximum FIFO size
+ */
+u32 fman_get_bmi_max_fifo_size(struct fman *fman);
+
+/**
+ * fman_set_mac_max_frame
+ * @fman:	A Pointer to FMan device
+ * @mac_id:	MAC id
+ * @mfl:	Maximum frame length
+ *
+ * Set maximum frame length of specific MAC in FMan driver
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
+
+/**
+ * fman_get_qman_channel_id
+ * @fman:	A Pointer to FMan device
+ * @port_id:	Port id
+ *
+ * Get QMan channel ID associated to the Port id
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
+
+/**
+ * fman_get_mem_region
+ * @fman:	A Pointer to FMan device
+ *
+ * Get FMan memory region
+ *
+ * Return: A structure with FMan memory region information
+ */
+struct resource *fman_get_mem_region(struct fman *fman);
+
+/**
+ * fman_get_max_frm
+ *
+ * Return: Max frame length configured in the FM driver
+ */
+u16 fman_get_max_frm(void);
+
+/**
+ * fman_get_rx_extra_headroom
+ *
+ * Return: Extra headroom size configured in the FM driver
+ */
+int fman_get_rx_extra_headroom(void);
+
+/**
+ * fman_bind
+ * @dev:	FMan OF device pointer
+ *
+ * Bind to a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan device
+ */
+struct fman *fman_bind(struct device *dev);
+
+/**
+ * fman_unbind
+ * @fman:	Pointer to the FMan device
+ *
+ * Un-bind from a specific FMan device.
+ *
+ * Allowed only after the port was created.
+ */
+void fman_unbind(struct fman *fman);
+
+/**
+ * fman_get_device
+ * @fman:	A pointer to the FMan device.
+ *
+ * Get the FMan device pointer
+ *
+ * Return: Pointer to FMan device.
+ */
+struct device *fman_get_device(struct fman *fman);
+#ifdef __rtems__
+void fman_reset(struct fman *fman);
+#endif /* __rtems__ */
+
+#endif /* __FM_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
new file mode 100644
index 0000000..5be951b
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -0,0 +1,1786 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "crc_mac_addr_ext.h"
+
+#include "fman_dtsec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/phy.h>
+
+/* MII	Management Command Register */
+#define MIIMCOM_READ_CYCLE		0x00000001
+
+/* MII	Management Address Register */
+#define MIIMADD_PHY_ADDR_SHIFT		8
+
+/* MII Management Indicator Register */
+#define MIIMIND_BUSY			0x00000001
+
+/* PHY Control Register */
+#define PHY_CR_PHY_RESET	0x8000
+#define PHY_CR_SPEED0		0x2000
+#define PHY_CR_ANE		0x1000
+#define PHY_CR_RESET_AN		0x0200
+#define PHY_CR_FULLDUPLEX	0x0100
+#define PHY_CR_SPEED1		0x0040
+
+#define PHY_TBICON_SRESET	0x8000
+#define PHY_TBICON_CLK_SEL	0x0020
+#define PHY_TBIANA_SGMII	0x4001
+#define PHY_TBIANA_1000X	0x01a0
+
+#define DTSEC_TO_MII_OFFSET	0x1000
+
+/* Interrupt Mask Register (IMASK) */
+#define DTSEC_IMASK_BREN	0x80000000
+#define DTSEC_IMASK_RXCEN	0x40000000
+#define DTSEC_IMASK_MSROEN	0x04000000
+#define DTSEC_IMASK_GTSCEN	0x02000000
+#define DTSEC_IMASK_BTEN	0x01000000
+#define DTSEC_IMASK_TXCEN	0x00800000
+#define DTSEC_IMASK_TXEEN	0x00400000
+#define DTSEC_IMASK_LCEN	0x00040000
+#define DTSEC_IMASK_CRLEN	0x00020000
+#define DTSEC_IMASK_XFUNEN	0x00010000
+#define DTSEC_IMASK_ABRTEN	0x00008000
+#define DTSEC_IMASK_IFERREN	0x00004000
+#define DTSEC_IMASK_MAGEN	0x00000800
+#define DTSEC_IMASK_MMRDEN	0x00000400
+#define DTSEC_IMASK_MMWREN	0x00000200
+#define DTSEC_IMASK_GRSCEN	0x00000100
+#define DTSEC_IMASK_TDPEEN	0x00000002
+#define DTSEC_IMASK_RDPEEN	0x00000001
+
+#define DTSEC_EVENTS_MASK		\
+	 ((u32)(DTSEC_IMASK_BREN    |	\
+		DTSEC_IMASK_RXCEN   |	\
+		DTSEC_IMASK_BTEN    |	\
+		DTSEC_IMASK_TXCEN   |	\
+		DTSEC_IMASK_TXEEN   |	\
+		DTSEC_IMASK_ABRTEN  |	\
+		DTSEC_IMASK_LCEN    |	\
+		DTSEC_IMASK_CRLEN   |	\
+		DTSEC_IMASK_XFUNEN  |	\
+		DTSEC_IMASK_IFERREN |	\
+		DTSEC_IMASK_MAGEN   |	\
+		DTSEC_IMASK_TDPEEN  |	\
+		DTSEC_IMASK_RDPEEN))
+
+/* dtsec timestamp event bits */
+#define TMR_PEMASK_TSREEN	0x00010000
+#define TMR_PEVENT_TSRE		0x00010000
+
+/* Group address bit indication */
+#define MAC_GROUP_ADDRESS	0x0000010000000000ULL
+
+/* Defaults */
+#define DEFAULT_HALFDUP_RETRANSMIT		0xf
+#define DEFAULT_HALFDUP_COLL_WINDOW		0x37
+#define DEFAULT_HALFDUP_ALT_BACKOFF_VAL	0x0A
+#define DEFAULT_TX_PAUSE_TIME			0xf000
+#define DEFAULT_TBIPA				5
+#define DEFAULT_RX_PREPEND			0
+#define DEFAULT_PREAMBLE_LEN			7
+#define DEFAULT_TX_PAUSE_TIME_EXTD		0
+#define DEFAULT_NON_BACK_TO_BACK_IPG1		0x40
+#define DEFAULT_NON_BACK_TO_BACK_IPG2		0x60
+#define DEFAULT_MIN_IFG_ENFORCEMENT		0x50
+#define DEFAULT_BACK_TO_BACK_IPG		0x60
+#define DEFAULT_MAXIMUM_FRAME			0x600
+#define DEFAULT_TBI_PHY_ADDR			5
+
+#define DTSEC_DEFAULT_EXCEPTIONS		 \
+	((u32)((DTSEC_IMASK_BREN)		|\
+			(DTSEC_IMASK_RXCEN)	|\
+			(DTSEC_IMASK_BTEN)	|\
+			(DTSEC_IMASK_TXCEN)	|\
+			(DTSEC_IMASK_TXEEN)	|\
+			(DTSEC_IMASK_ABRTEN)	|\
+			(DTSEC_IMASK_LCEN)	|\
+			(DTSEC_IMASK_CRLEN)	|\
+			(DTSEC_IMASK_XFUNEN)	|\
+			(DTSEC_IMASK_IFERREN)	|\
+			(DTSEC_IMASK_MAGEN)	|\
+			(DTSEC_IMASK_TDPEEN)	|\
+			(DTSEC_IMASK_RDPEEN)))
+
+/* register related defines (bits, field offsets..) */
+#define DTSEC_ID2_INT_REDUCED_OFF	0x00010000
+
+#define DTSEC_ECNTRL_GMIIM		0x00000040
+#define DTSEC_ECNTRL_TBIM		0x00000020
+#define DTSEC_ECNTRL_SGMIIM		0x00000002
+#define DTSEC_ECNTRL_RPM		0x00000010
+#define DTSEC_ECNTRL_R100M		0x00000008
+#define DTSEC_ECNTRL_QSGMIIM		0x00000001
+
+#define DTSEC_TCTRL_THDF		0x00000800
+#define DTSEC_TCTRL_TTSE		0x00000040
+#define DTSEC_TCTRL_GTS			0x00000020
+
+#define RCTRL_PAL_MASK			0x001f0000
+#define RCTRL_PAL_SHIFT			16
+#define RCTRL_CFA			0x00008000
+#define RCTRL_GHTX			0x00000400
+#define RCTRL_RTSE			0x00000040
+#define RCTRL_GRS			0x00000020
+#define RCTRL_BC_REJ			0x00000010
+#define RCTRL_MPROM			0x00000008
+#define RCTRL_RSF			0x00000004
+#define RCTRL_UPROM			0x00000001
+#define RCTRL_PROM			(RCTRL_UPROM | RCTRL_MPROM)
+
+#define MACCFG1_SOFT_RESET		0x80000000
+#define MACCFG1_LOOPBACK		0x00000100
+#define MACCFG1_RX_FLOW			0x00000020
+#define MACCFG1_TX_FLOW			0x00000010
+#define MACCFG1_TX_EN			0x00000001
+#define MACCFG1_RX_EN			0x00000004
+
+#define MACCFG2_NIBBLE_MODE		0x00000100
+#define MACCFG2_BYTE_MODE		0x00000200
+#define MACCFG2_PRE_AM_RX_EN		0x00000080
+#define MACCFG2_PRE_AM_TX_EN		0x00000040
+#define MACCFG2_LENGTH_CHECK		0x00000010
+#define MACCFG2_PAD_CRC_EN		0x00000004
+#define MACCFG2_CRC_EN			0x00000002
+#define MACCFG2_FULL_DUPLEX		0x00000001
+#define MACCFG2_PREAMBLE_LENGTH_MASK	0x0000f000
+#define MACCFG2_PREAMBLE_LENGTH_SHIFT	12
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT	24
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT	16
+#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT	8
+
+#define IPGIFG_NON_BACK_TO_BACK_IPG_1	0x7F000000
+#define IPGIFG_NON_BACK_TO_BACK_IPG_2	0x007F0000
+#define IPGIFG_MIN_IFG_ENFORCEMENT	0x0000FF00
+#define IPGIFG_BACK_TO_BACK_IPG	0x0000007F
+
+#define HAFDUP_ALT_BEB				0x00080000
+#define HAFDUP_BP_NO_BACKOFF			0x00040000
+#define HAFDUP_NO_BACKOFF			0x00020000
+#define HAFDUP_EXCESS_DEFER			0x00010000
+#define HAFDUP_COLLISION_WINDOW		0x000003ff
+#define HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK	0x00f00000
+#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT	20
+#define HAFDUP_RETRANSMISSION_MAX_SHIFT	12
+#define HAFDUP_RETRANSMISSION_MAX		0x0000f000
+
+#define NUM_OF_HASH_REGS	8	/* Number of hash table registers */
+
+#define PTV_PTE_MASK		0xffff0000
+#define PTV_PT_MASK		0x0000ffff
+#define PTV_PTE_SHIFT		16
+
+#define MAX_PACKET_ALIGNMENT		31
+#define MAX_INTER_PACKET_GAP		0x7f
+#define MAX_INTER_PALTERNATE_BEB	0x0f
+#define MAX_RETRANSMISSION		0x0f
+#define MAX_COLLISION_WINDOW		0x03ff
+
+/* Hash table size (32 bits*8 regs) */
+#define DTSEC_HASH_TABLE_SIZE		256
+/* Extended Hash table size (32 bits*16 regs) */
+#define EXTENDED_HASH_TABLE_SIZE	512
+
+/* maximum number of phys */
+#define MAX_PHYS			32
+
+/* MII Configuration Control Memory Map Registers */
+struct dtsec_mii_regs {
+	u32 reserved1[72];
+	u32 miimcfg;	/* MII Mgmt:configuration */
+	u32 miimcom;	/* MII Mgmt:command	  */
+	u32 miimadd;	/* MII Mgmt:address	  */
+	u32 miimcon;	/* MII Mgmt:control 3	  */
+	u32 miimstat;	/* MII Mgmt:status	  */
+	u32 miimind;	/* MII Mgmt:indicators	  */
+};
+
+/* dTSEC Memory Map registers */
+struct dtsec_regs {
+	/* dTSEC General Control and Status Registers */
+	u32 tsec_id;		/* 0x000 ETSEC_ID register */
+	u32 tsec_id2;		/* 0x004 ETSEC_ID2 register */
+	u32 ievent;		/* 0x008 Interrupt event register */
+	u32 imask;		/* 0x00C Interrupt mask register */
+	u32 reserved0010[1];
+	u32 ecntrl;		/* 0x014 E control register */
+	u32 ptv;		/* 0x018 Pause time value register */
+	u32 tbipa;		/* 0x01C TBI PHY address register */
+	u32 tmr_ctrl;		/* 0x020 Time-stamp Control register */
+	u32 tmr_pevent;		/* 0x024 Time-stamp event register */
+	u32 tmr_pemask;		/* 0x028 Timer event mask register */
+	u32 reserved002c[5];
+	u32 tctrl;		/* 0x040 Transmit control register */
+	u32 reserved0044[3];
+	u32 rctrl;		/* 0x050 Receive control register */
+	u32 reserved0054[11];
+	u32 igaddr[8];		/* 0x080-0x09C Individual/group address */
+	u32 gaddr[8];		/* 0x0A0-0x0BC Group address registers 0-7 */
+	u32 reserved00c0[16];
+	u32 maccfg1;		/* 0x100 MAC configuration #1 */
+	u32 maccfg2;		/* 0x104 MAC configuration #2 */
+	u32 ipgifg;		/* 0x108 IPG/IFG */
+	u32 hafdup;		/* 0x10C Half-duplex */
+	u32 maxfrm;		/* 0x110 Maximum frame */
+	u32 reserved0114[10];
+	u32 ifstat;		/* 0x13C Interface status */
+	u32 macstnaddr1;	/* 0x140 Station Address,part 1 */
+	u32 macstnaddr2;	/* 0x144 Station Address,part 2 */
+	struct {
+		u32 exact_match1;	/* octets 1-4 */
+		u32 exact_match2;	/* octets 5-6 */
+	} macaddr[15];		/* 0x148-0x1BC mac exact match addresses 1-15 */
+	u32 reserved01c0[16];
+	u32 tr64;	/* 0x200 Tx and Rx 64 byte frame counter */
+	u32 tr127;	/* 0x204 Tx and Rx 65 to 127 byte frame counter */
+	u32 tr255;	/* 0x208 Tx and Rx 128 to 255 byte frame counter */
+	u32 tr511;	/* 0x20C Tx and Rx 256 to 511 byte frame counter */
+	u32 tr1k;	/* 0x210 Tx and Rx 512 to 1023 byte frame counter */
+	u32 trmax;	/* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
+	u32 trmgv;
+	/* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
+	u32 rbyt;	/* 0x21C receive byte counter */
+	u32 rpkt;	/* 0x220 receive packet counter */
+	u32 rfcs;	/* 0x224 receive FCS error counter */
+	u32 rmca;	/* 0x228 RMCA Rx multicast packet counter */
+	u32 rbca;	/* 0x22C Rx broadcast packet counter */
+	u32 rxcf;	/* 0x230 Rx control frame packet counter */
+	u32 rxpf;	/* 0x234 Rx pause frame packet counter */
+	u32 rxuo;	/* 0x238 Rx unknown OP code counter */
+	u32 raln;	/* 0x23C Rx alignment error counter */
+	u32 rflr;	/* 0x240 Rx frame length error counter */
+	u32 rcde;	/* 0x244 Rx code error counter */
+	u32 rcse;	/* 0x248 Rx carrier sense error counter */
+	u32 rund;	/* 0x24C Rx undersize packet counter */
+	u32 rovr;	/* 0x250 Rx oversize packet counter */
+	u32 rfrg;	/* 0x254 Rx fragments counter */
+	u32 rjbr;	/* 0x258 Rx jabber counter */
+	u32 rdrp;	/* 0x25C Rx drop */
+	u32 tbyt;	/* 0x260 Tx byte counter */
+	u32 tpkt;	/* 0x264 Tx packet counter */
+	u32 tmca;	/* 0x268 Tx multicast packet counter */
+	u32 tbca;	/* 0x26C Tx broadcast packet counter */
+	u32 txpf;	/* 0x270 Tx pause control frame counter */
+	u32 tdfr;	/* 0x274 Tx deferral packet counter */
+	u32 tedf;	/* 0x278 Tx excessive deferral packet counter */
+	u32 tscl;	/* 0x27C Tx single collision packet counter */
+	u32 tmcl;	/* 0x280 Tx multiple collision packet counter */
+	u32 tlcl;	/* 0x284 Tx late collision packet counter */
+	u32 txcl;	/* 0x288 Tx excessive collision packet counter */
+	u32 tncl;	/* 0x28C Tx total collision counter */
+	u32 reserved0290[1];
+	u32 tdrp;	/* 0x294 Tx drop frame counter */
+	u32 tjbr;	/* 0x298 Tx jabber frame counter */
+	u32 tfcs;	/* 0x29C Tx FCS error counter */
+	u32 txcf;	/* 0x2A0 Tx control frame counter */
+	u32 tovr;	/* 0x2A4 Tx oversize frame counter */
+	u32 tund;	/* 0x2A8 Tx undersize frame counter */
+	u32 tfrg;	/* 0x2AC Tx fragments frame counter */
+	u32 car1;	/* 0x2B0 carry register one register* */
+	u32 car2;	/* 0x2B4 carry register two register* */
+	u32 cam1;	/* 0x2B8 carry register one mask register */
+	u32 cam2;	/* 0x2BC carry register two mask register */
+	u32 reserved02c0[848];
+};
+
+/* struct dtsec_cfg - dTSEC configuration
+ * Transmit half-duplex flow control, under software control for 10/100-Mbps
+ * half-duplex media. If set, back pressure is applied to media by raising
+ * carrier.
+ * halfdup_retransmit:
+ * Number of retransmission attempts following a collision.
+ * If this is exceeded dTSEC aborts transmission due to excessive collisions.
+ * The standard specifies the attempt limit to be 15.
+ * halfdup_coll_window:
+ * The number of bytes of the frame during which collisions may occur.
+ * The default value of 55 corresponds to the frame byte at the end of the
+ * standard 512-bit slot time window. If collisions are detected after this
+ * byte, the late collision event is asserted and transmission of current
+ * frame is aborted.
+ * rx_drop_bcast:
+ * Discard broadcast frames. If set, all broadcast frames will be discarded
+ * by dTSEC.
+ * rx_short_frm:
+ * Accept short frames. If set, dTSEC will accept frames of length 14-63 bytes.
+ * rx_len_check:
+ * Length check for received frames. If set, the MAC checks the frame's length
+ * field on receive to ensure it matches the actual data field length.
+ * This only works for received frames with length field less than 1500.
+ * No check is performed for larger frames.
+ * tx_pad_crc:
+ * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
+ * appends a CRC to every frame regardless of padding requirement.
+ * tx_crc:
+ * Transmission CRC enable. If set, the MAC appends a CRC to all frames.
+ * If frames presented to the MAC have a valid length and contain a valid CRC,
+ * tx_crc should be reset. This field is ignored if tx_pad_crc is set.
+ * rx_ctrl_acc:
+ * Control frame accept. If set, this overrides 802.3 standard control frame
+ * behavior, and all Ethernet frames that have an ethertype of 0x8808 are
+ * treated as normal Ethernet frames and passed up to the packet interface on
+ * a DA match. Received pause control frames are passed to the packet
+ * interface only if Rx flow control is also disabled.
+ * See dtsec_accept_rx_pause_frames() function.
+ * tx_pause_time:
+ * Transmit pause time value. This pause value is used as part of the pause
+ * frame to be sent when a transmit pause frame is initiated.
+ * If set to 0 this disables transmission of pause frames.
+ * rx_preamble:
+ * Receive preamble enable. If set, the MAC recovers the received Ethernet
+ * 7-byte preamble and passes it to the packet interface at the start of each
+ * received frame.
+ * This field should be reset for internal MAC loop-back mode.
+ * tx_preamble:
+ * User defined preamble enable for transmitted frames.
+ * If set, a user-defined preamble must passed to the MAC and it is
+ * transmitted instead of the standard preamble.
+ * preamble_len:
+ * Length, in bytes, of the preamble field preceding each Ethernet
+ * start-of-frame delimiter byte. The default value of 0x7 should be used in
+ * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
+ * rx_prepend:
+ * Packet alignment padding length. The specified number of bytes (1-31)
+ * of zero padding are inserted before the start of each received frame.
+ * For Ethernet, where optional preamble extraction is enabled, the padding
+ * appears before the preamble, otherwise the padding precedes the
+ * layer 2 header.
+ *
+ * This structure contains basic dTSEC configuration and must be passed to
+ * init() function. A default set of configuration values can be
+ * obtained by calling set_dflts().
+ */
+struct dtsec_cfg {
+	bool halfdup_on;
+	bool halfdup_alt_backoff_en;
+	bool halfdup_excess_defer;
+	bool halfdup_no_backoff;
+	bool halfdup_bp_no_backoff;
+	u32 halfdup_alt_backoff_val;
+	u16 halfdup_retransmit;
+	u16 halfdup_coll_window;
+	bool rx_drop_bcast;
+	bool rx_short_frm;
+	bool rx_len_check;
+	bool tx_pad_crc;
+	bool tx_crc;
+	bool rx_ctrl_acc;
+	u16 tx_pause_time;
+	u16 tbipa;
+	bool ptp_tsu_en;
+	bool ptp_exception_en;
+	bool rx_preamble;
+	bool tx_preamble;
+	u32 preamble_len;
+	u32 rx_prepend;
+	bool loopback;
+	bool rx_time_stamp_en;
+	bool tx_time_stamp_en;
+	bool rx_flow;
+	bool tx_flow;
+	bool rx_group_hash_exd;
+	bool rx_promisc;
+	u8 tbi_phy_addr;
+	u16 tx_pause_time_extd;
+	u16 maximum_frame;
+	u32 non_back_to_back_ipg1;
+	u32 non_back_to_back_ipg2;
+	u32 min_ifg_enforcement;
+	u32 back_to_back_ipg;
+};
+
+struct fman_mac {
+	/* pointer to dTSEC memory mapped registers */
+	struct dtsec_regs __iomem *regs;
+	/* pointer to dTSEC MII memory mapped registers */
+	struct dtsec_mii_regs __iomem *mii_regs;
+	/* MII management clock */
+	u16 mii_mgmt_clk;
+	/* MAC address of device */
+	u64 addr;
+	/* Ethernet physical interface */
+	phy_interface_t phy_if;
+	u16 max_speed;
+	void *dev_id; /* device cookie used by the exception cbs */
+	fman_mac_exception_cb *exception_cb;
+	fman_mac_exception_cb *event_cb;
+	/* Number of individual addresses in registers for this station */
+	u8 num_of_ind_addr_in_regs;
+	/* pointer to driver's global address hash table */
+	struct eth_hash_t *multicast_addr_hash;
+	/* pointer to driver's individual address hash table */
+	struct eth_hash_t *unicast_addr_hash;
+	u8 mac_id;
+	u8 tbi_phy_addr;
+	u32 exceptions;
+	bool ptp_tsu_enabled;
+	bool en_tsu_err_exeption;
+	struct dtsec_cfg *dtsec_drv_param;
+	void *fm;
+	struct fman_rev_info fm_rev_info;
+	bool basex_if;
+};
+
+static u32 calc_mii_mgmt_clk(struct fman_mac *dtsec)
+{
+	u16 fm_clk_freq, dtsec_freq;
+	u32 mgmt_clk;
+
+	fm_clk_freq = fman_get_clock_freq(dtsec->fm);
+	if (fm_clk_freq  == 0) {
+		pr_err("Can't get clock for MAC!\n");
+		return 0;
+	}
+
+	dtsec_freq = (u16)(fm_clk_freq >> 1);
+
+	if (dtsec_freq < 80)
+		mgmt_clk = 1;
+	else if (dtsec_freq < 120)
+		mgmt_clk = 2;
+	else if (dtsec_freq < 160)
+		mgmt_clk = 3;
+	else if (dtsec_freq < 200)
+		mgmt_clk = 4;
+	else if (dtsec_freq < 280)
+		mgmt_clk = 5;
+	else if (dtsec_freq < 400)
+		mgmt_clk = 6;
+	else
+		mgmt_clk = 7;
+
+	return mgmt_clk;
+}
+
+static int mii_write_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 data)
+{
+	struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
+	u32 tmp;
+	int count;
+
+	/* Setup the MII Mgmt clock speed */
+	iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
+
+	/* Stop the MII management read cycle */
+	iowrite32be(0, &regs->miimcom);
+	/* Dummy read to make sure MIIMCOM is written */
+	tmp = ioread32be(&regs->miimcom);
+
+	/* Setting up MII Management Address Register */
+	tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
+	iowrite32be(tmp, &regs->miimadd);
+
+	/* Setting up MII Management Control Register with data */
+	iowrite32be((u32)data, &regs->miimcon);
+	/* Dummy read to make sure MIIMCON is written */
+	tmp = ioread32be(&regs->miimcon);
+
+	/* Wait until MII management write is complete */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
+
+	if (count == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int mii_read_reg(struct fman_mac *dtsec, u8 addr, u8 reg, u16 *data)
+{
+	struct dtsec_mii_regs __iomem *regs = dtsec->mii_regs;
+	u32 tmp;
+	int count;
+
+	/* Setup the MII Mgmt clock speed */
+	iowrite32be(dtsec->mii_mgmt_clk, &regs->miimcfg);
+
+	/* Setting up the MII Management Address Register */
+	tmp = (u32)((addr << MIIMADD_PHY_ADDR_SHIFT) | reg);
+	iowrite32be(tmp, &regs->miimadd);
+
+	/* Perform an MII management read cycle */
+	iowrite32be(MIIMCOM_READ_CYCLE, &regs->miimcom);
+	/* Dummy read to make sure MIIMCOM is written */
+	tmp = ioread32be(&regs->miimcom);
+
+	/* Wait until MII management write is complete */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&regs->miimind)) & MIIMIND_BUSY) && count--);
+
+	if (count == 0)
+		return -EBUSY;
+
+	/* Read MII management status  */
+	*data = (u16)ioread32be(&regs->miimstat);
+
+	iowrite32be(0, &regs->miimcom);
+	/* Dummy read to make sure MIIMCOM is written */
+	tmp = ioread32be(&regs->miimcom);
+
+	if (*data == 0xffff) {
+		pr_warn("Read wrong data(0xffff):phy_addr 0x%x,reg 0x%x",
+			addr, reg);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static void set_dflts(struct dtsec_cfg *cfg)
+{
+	cfg->halfdup_on = false;
+	cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
+	cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
+	cfg->halfdup_excess_defer = true;
+	cfg->halfdup_no_backoff = false;
+	cfg->halfdup_bp_no_backoff = false;
+	cfg->halfdup_alt_backoff_val = DEFAULT_HALFDUP_ALT_BACKOFF_VAL;
+	cfg->halfdup_alt_backoff_en = false;
+	cfg->rx_drop_bcast = false;
+	cfg->rx_short_frm = true;
+	cfg->rx_len_check = false;
+	cfg->tx_pad_crc = true;
+	cfg->tx_crc = false;
+	cfg->rx_ctrl_acc = false;
+	cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
+	/* PHY address 0 is reserved (DPAA RM) */
+	cfg->tbipa = DEFAULT_TBIPA;
+	cfg->rx_prepend = DEFAULT_RX_PREPEND;
+	cfg->ptp_tsu_en = true;
+	cfg->ptp_exception_en = true;
+	cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
+	cfg->rx_preamble = false;
+	cfg->tx_preamble = false;
+	cfg->loopback = false;
+	cfg->rx_time_stamp_en = false;
+	cfg->tx_time_stamp_en = false;
+	cfg->rx_flow = true;
+	cfg->tx_flow = true;
+	cfg->rx_group_hash_exd = false;
+	cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
+	cfg->rx_promisc = false;
+	cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
+	cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
+	cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
+	cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
+	cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
+	cfg->tbi_phy_addr = DEFAULT_TBI_PHY_ADDR;
+}
+
+static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
+		phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+		u32 exception_mask)
+{
+	bool is_rgmii, is_sgmii, is_qsgmii;
+	int i;
+	u32 tmp;
+
+	/* Soft reset */
+	iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
+	iowrite32be(0, &regs->maccfg1);
+
+	/* dtsec_id2 */
+	tmp = ioread32be(&regs->tsec_id2);
+
+	/* check RGMII support */
+	if (iface == PHY_INTERFACE_MODE_RGMII ||
+	    iface == PHY_INTERFACE_MODE_RMII)
+		if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+			return -EINVAL;
+
+	if (iface == PHY_INTERFACE_MODE_SGMII ||
+	    iface == PHY_INTERFACE_MODE_MII)
+		if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+			return -EINVAL;
+
+	is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+	is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
+	is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
+
+	tmp = 0;
+	if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
+		tmp |= DTSEC_ECNTRL_GMIIM;
+	if (is_sgmii)
+		tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
+	if (is_qsgmii)
+		tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
+			DTSEC_ECNTRL_QSGMIIM);
+	if (is_rgmii)
+		tmp |= DTSEC_ECNTRL_RPM;
+	if (iface_speed == SPEED_100)
+		tmp |= DTSEC_ECNTRL_R100M;
+
+	iowrite32be(tmp, &regs->ecntrl);
+
+	tmp = 0;
+	if (cfg->halfdup_on)
+		tmp |= DTSEC_TCTRL_THDF;
+	if (cfg->tx_time_stamp_en)
+		tmp |= DTSEC_TCTRL_TTSE;
+
+	iowrite32be(tmp, &regs->tctrl);
+
+	tmp = 0;
+
+	if (cfg->tx_pause_time)
+		tmp |= cfg->tx_pause_time;
+	if (cfg->tx_pause_time_extd)
+		tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
+	iowrite32be(tmp, &regs->ptv);
+
+	tmp = 0;
+	tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
+	if (cfg->rx_ctrl_acc)
+		tmp |= RCTRL_CFA;
+	if (cfg->rx_group_hash_exd)
+		tmp |= RCTRL_GHTX;
+	if (cfg->rx_time_stamp_en)
+		tmp |= RCTRL_RTSE;
+	if (cfg->rx_drop_bcast)
+		tmp |= RCTRL_BC_REJ;
+	if (cfg->rx_short_frm)
+		tmp |= RCTRL_RSF;
+	if (cfg->rx_promisc)
+		tmp |= RCTRL_PROM;
+
+	iowrite32be(tmp, &regs->rctrl);
+
+	/* Assign a Phy Address to the TBI (TBIPA).
+	 * Done also in cases where TBI is not selected to avoid conflict with
+	 * the external PHY's Physical address
+	 */
+	iowrite32be(cfg->tbipa, &regs->tbipa);
+
+	iowrite32be(0, &regs->tmr_ctrl);
+
+	if (cfg->ptp_tsu_en) {
+		tmp = 0;
+		tmp |= TMR_PEVENT_TSRE;
+		iowrite32be(tmp, &regs->tmr_pevent);
+
+		if (cfg->ptp_exception_en) {
+			tmp = 0;
+			tmp |= TMR_PEMASK_TSREEN;
+			iowrite32be(tmp, &regs->tmr_pemask);
+		}
+	}
+
+	tmp = 0;
+	if (cfg->loopback)
+		tmp |= MACCFG1_LOOPBACK;
+	if (cfg->rx_flow)
+		tmp |= MACCFG1_RX_FLOW;
+	if (cfg->tx_flow)
+		tmp |= MACCFG1_TX_FLOW;
+	iowrite32be(tmp, &regs->maccfg1);
+
+	tmp = 0;
+
+	if (iface_speed < SPEED_1000)
+		tmp |= MACCFG2_NIBBLE_MODE;
+	else if (iface_speed == SPEED_1000)
+		tmp |= MACCFG2_BYTE_MODE;
+
+	tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
+		MACCFG2_PREAMBLE_LENGTH_MASK;
+	if (cfg->rx_preamble)
+		tmp |= MACCFG2_PRE_AM_RX_EN;
+	if (cfg->tx_preamble)
+		tmp |= MACCFG2_PRE_AM_TX_EN;
+	if (cfg->rx_len_check)
+		tmp |= MACCFG2_LENGTH_CHECK;
+	if (cfg->tx_pad_crc)
+		tmp |= MACCFG2_PAD_CRC_EN;
+	if (cfg->tx_crc)
+		tmp |= MACCFG2_CRC_EN;
+	if (!cfg->halfdup_on)
+		tmp |= MACCFG2_FULL_DUPLEX;
+	iowrite32be(tmp, &regs->maccfg2);
+
+	tmp = (((cfg->non_back_to_back_ipg1 <<
+		 IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
+		& IPGIFG_NON_BACK_TO_BACK_IPG_1)
+	       | ((cfg->non_back_to_back_ipg2 <<
+		   IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
+		 & IPGIFG_NON_BACK_TO_BACK_IPG_2)
+	       | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
+		 & IPGIFG_MIN_IFG_ENFORCEMENT)
+	       | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
+	iowrite32be(tmp, &regs->ipgifg);
+
+	tmp = 0;
+
+	if (cfg->halfdup_alt_backoff_en) {
+		tmp = HAFDUP_ALT_BEB;
+		tmp |= (cfg->halfdup_alt_backoff_val <<
+			HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT) &
+			HAFDUP_ALTERNATE_BEB_TRUNCATION_MASK;
+	}
+	if (cfg->halfdup_bp_no_backoff)
+		tmp |= HAFDUP_BP_NO_BACKOFF;
+	if (cfg->halfdup_no_backoff)
+		tmp |= HAFDUP_NO_BACKOFF;
+	if (cfg->halfdup_excess_defer)
+		tmp |= HAFDUP_EXCESS_DEFER;
+	tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
+		& HAFDUP_RETRANSMISSION_MAX);
+	tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
+
+	iowrite32be(tmp, &regs->hafdup);
+
+	/* Initialize Maximum frame length */
+	iowrite32be(cfg->maximum_frame, &regs->maxfrm);
+
+	iowrite32be(0xffffffff, &regs->cam1);
+	iowrite32be(0xffffffff, &regs->cam2);
+
+	iowrite32be(exception_mask, &regs->imask);
+
+	iowrite32be(0xffffffff, &regs->ievent);
+
+	tmp = (u32)((macaddr[5] << 24) |
+		    (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
+	iowrite32be(tmp, &regs->macstnaddr1);
+
+	tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
+	iowrite32be(tmp, &regs->macstnaddr2);
+
+	/* HASH */
+	for (i = 0; i < NUM_OF_HASH_REGS; i++) {
+		/* Initialize IADDRx */
+		iowrite32be(0, &regs->igaddr[i]);
+		/* Initialize GADDRx */
+		iowrite32be(0, &regs->gaddr[i]);
+	}
+
+	return 0;
+}
+
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+	u32 tmp;
+
+	tmp = (u32)((adr[5] << 24) |
+		    (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+	iowrite32be(tmp, &regs->macstnaddr1);
+
+	tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+	iowrite32be(tmp, &regs->macstnaddr2);
+}
+
+static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
+		       bool enable)
+{
+	int reg_idx = (bucket >> 5) & 0xf;
+	int bit_idx = bucket & 0x1f;
+	u32 bit_mask = 0x80000000 >> bit_idx;
+	u32 __iomem *reg;
+
+	if (reg_idx > 7)
+		reg = &regs->gaddr[reg_idx - 8];
+	else
+		reg = &regs->igaddr[reg_idx];
+
+	if (enable)
+		iowrite32be(ioread32be(reg) | bit_mask, reg);
+	else
+		iowrite32be(ioread32be(reg) & (~bit_mask), reg);
+}
+
+static int check_init_parameters(struct fman_mac *dtsec)
+{
+	if (dtsec->max_speed >= SPEED_10000) {
+		pr_err("1G MAC driver supports 1G or lower speeds\n");
+		return -EINVAL;
+	}
+	if (dtsec->addr == 0) {
+		pr_err("Ethernet MAC Must have a valid MAC Address\n");
+		return -EINVAL;
+	}
+	if (dtsec->max_speed >= SPEED_1000 &&
+	    dtsec->dtsec_drv_param->halfdup_on) {
+		pr_err("Ethernet MAC 1G can't work in half duplex\n");
+		return -EINVAL;
+	}
+
+	/* FM_RX_PREAM_4_ERRATA_DTSEC_A001 Errata workaround */
+	if (dtsec->dtsec_drv_param->rx_preamble) {
+		pr_err("preamble_rx_en\n");
+		return -EINVAL;
+	}
+
+	if (((dtsec->dtsec_drv_param)->tx_preamble ||
+	     (dtsec->dtsec_drv_param)->rx_preamble) &&
+	    ((dtsec->dtsec_drv_param)->preamble_len != 0x7)) {
+		pr_err("Preamble length should be 0x7 bytes\n");
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->halfdup_on &&
+	    (dtsec->dtsec_drv_param->tx_time_stamp_en ||
+	     dtsec->dtsec_drv_param->rx_time_stamp_en)) {
+		pr_err("1588 timeStamp disabled in half duplex mode\n");
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->rx_flow &&
+	    (dtsec->dtsec_drv_param)->rx_ctrl_acc) {
+		pr_err("Receive control frame can not be accepted\n");
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->rx_prepend >
+	    MAX_PACKET_ALIGNMENT) {
+		pr_err("packetAlignmentPadding can't be > than %d\n",
+		       MAX_PACKET_ALIGNMENT);
+		return -EINVAL;
+	}
+	if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
+	     MAX_INTER_PACKET_GAP) ||
+	    ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
+	     MAX_INTER_PACKET_GAP) ||
+	     ((dtsec->dtsec_drv_param)->back_to_back_ipg >
+	      MAX_INTER_PACKET_GAP)) {
+		pr_err("Inter packet gap can't be greater than %d\n",
+		       MAX_INTER_PACKET_GAP);
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->halfdup_alt_backoff_val >
+	    MAX_INTER_PALTERNATE_BEB) {
+		pr_err("alternateBackoffVal can't be greater than %d\n",
+		       MAX_INTER_PALTERNATE_BEB);
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
+	    MAX_RETRANSMISSION) {
+		pr_err("maxRetransmission can't be greater than %d\n",
+		       MAX_RETRANSMISSION);
+		return -EINVAL;
+	}
+	if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
+	    MAX_COLLISION_WINDOW) {
+		pr_err("collisionWindow can't be greater than %d\n",
+		       MAX_COLLISION_WINDOW);
+		return -EINVAL;
+	/* If Auto negotiation process is disabled, need to set up the PHY
+	 * using the MII Management Interface
+	 */
+	}
+	if (dtsec->dtsec_drv_param->tbipa > MAX_PHYS) {
+		pr_err("PHY address (should be 0-%d)\n", MAX_PHYS);
+		return -ERANGE;
+	}
+	if (!dtsec->exception_cb) {
+		pr_err("uninitialized exception_cb\n");
+		return -EINVAL;
+	}
+	if (!dtsec->event_cb) {
+		pr_err("uninitialized event_cb\n");
+		return -EINVAL;
+	}
+
+	/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+	if (dtsec->dtsec_drv_param->rx_len_check) {
+		pr_warn("Length Check!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+	u32 bit_mask;
+
+	switch (exception) {
+	case FM_MAC_EX_1G_BAB_RX:
+		bit_mask = DTSEC_IMASK_BREN;
+		break;
+	case FM_MAC_EX_1G_RX_CTL:
+		bit_mask = DTSEC_IMASK_RXCEN;
+		break;
+	case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
+		bit_mask = DTSEC_IMASK_GTSCEN;
+		break;
+	case FM_MAC_EX_1G_BAB_TX:
+		bit_mask = DTSEC_IMASK_BTEN;
+		break;
+	case FM_MAC_EX_1G_TX_CTL:
+		bit_mask = DTSEC_IMASK_TXCEN;
+		break;
+	case FM_MAC_EX_1G_TX_ERR:
+		bit_mask = DTSEC_IMASK_TXEEN;
+		break;
+	case FM_MAC_EX_1G_LATE_COL:
+		bit_mask = DTSEC_IMASK_LCEN;
+		break;
+	case FM_MAC_EX_1G_COL_RET_LMT:
+		bit_mask = DTSEC_IMASK_CRLEN;
+		break;
+	case FM_MAC_EX_1G_TX_FIFO_UNDRN:
+		bit_mask = DTSEC_IMASK_XFUNEN;
+		break;
+	case FM_MAC_EX_1G_MAG_PCKT:
+		bit_mask = DTSEC_IMASK_MAGEN;
+		break;
+	case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
+		bit_mask = DTSEC_IMASK_MMRDEN;
+		break;
+	case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
+		bit_mask = DTSEC_IMASK_MMWREN;
+		break;
+	case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
+		bit_mask = DTSEC_IMASK_GRSCEN;
+		break;
+	case FM_MAC_EX_1G_DATA_ERR:
+		bit_mask = DTSEC_IMASK_TDPEEN;
+		break;
+	case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
+		bit_mask = DTSEC_IMASK_MSROEN;
+		break;
+	default:
+		bit_mask = 0;
+		break;
+	}
+
+	return bit_mask;
+}
+
+static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
+{
+	/* Checks if dTSEC driver parameters were initialized */
+	if (!dtsec_drv_params)
+		return true;
+
+	return false;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+	u32 crc;
+
+	/* CRC calculation */
+	GET_MAC_ADDR_CRC(eth_addr, crc);
+
+	crc = bitrev32(crc);
+
+	return crc;
+}
+
+static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+
+	if (is_init_done(dtsec->dtsec_drv_param))
+		return 0;
+
+	return (u16)ioread32be(&regs->maxfrm);
+}
+
+static void dtsec_isr(void *handle)
+{
+	struct fman_mac *dtsec = (struct fman_mac *)handle;
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 event;
+
+	/* do not handle MDIO events */
+	event = ioread32be(&regs->ievent) &
+		(u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
+
+	event &= ioread32be(&regs->imask);
+
+	iowrite32be(event, &regs->ievent);
+
+	if (event & DTSEC_IMASK_BREN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
+	if (event & DTSEC_IMASK_RXCEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
+	if (event & DTSEC_IMASK_GTSCEN)
+		dtsec->exception_cb(dtsec->dev_id,
+				    FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
+	if (event & DTSEC_IMASK_BTEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
+	if (event & DTSEC_IMASK_TXCEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
+	if (event & DTSEC_IMASK_TXEEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
+	if (event & DTSEC_IMASK_LCEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
+	if (event & DTSEC_IMASK_CRLEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
+	if (event & DTSEC_IMASK_XFUNEN) {
+		/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+		if (dtsec->fm_rev_info.major == 2) {
+			u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
+			/* a. Write 0x00E0_0C00 to DTSEC_ID
+			 *	This is a read only register
+			 * b. Read and save the value of TPKT
+			 */
+			tpkt1 = in_be32(&regs->tpkt);
+
+			/* c. Read the register at dTSEC address offset 0x32C */
+			tmp_reg1 = in_be32(&regs->reserved02c0[27]);
+
+			/* d. Compare bits [9:15] to bits [25:31] of the
+			 * register at address offset 0x32C.
+			 */
+			if ((tmp_reg1 & 0x007F0000) !=
+				(tmp_reg1 & 0x0000007F)) {
+				/* If they are not equal, save the value of
+				 * this register and wait for at least
+				 * MAXFRM*16 ns
+				 */
+				usleep_range((u32)(min
+					(dtsec_get_max_frame_length(dtsec) *
+					16 / 1000, 1)), (u32)
+					(min(dtsec_get_max_frame_length
+					(dtsec) * 16 / 1000, 1) + 1));
+			}
+
+			/* e. Read and save TPKT again and read the register
+			 * at dTSEC address offset 0x32C again
+			 */
+			tpkt2 = in_be32(&regs->tpkt);
+			tmp_reg2 = in_be32(&regs->reserved02c0[27]);
+
+			/* f. Compare the value of TPKT saved in step b to
+			 * value read in step e. Also compare bits [9:15] of
+			 * the register at offset 0x32C saved in step d to the
+			 * value of bits [9:15] saved in step e. If the two
+			 * registers values are unchanged, then the transmit
+			 * portion of the dTSEC controller is locked up and
+			 * the user should proceed to the recover sequence.
+			 */
+			if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
+				(tmp_reg2 & 0x007F0000))) {
+				/* recover sequence */
+
+				/* a.Write a 1 to RCTRL[GRS] */
+
+				out_be32(&regs->rctrl,
+					 in_be32(&regs->rctrl) | RCTRL_GRS);
+
+				/* b.Wait until IEVENT[GRSC]=1, or at least
+				 * 100 us has elapsed.
+				 */
+				for (i = 0; i < 100; i++) {
+					if (in_be32(&regs->ievent) &
+					    DTSEC_IMASK_GRSCEN)
+						break;
+					udelay(1);
+				}
+				if (in_be32(&regs->ievent) & DTSEC_IMASK_GRSCEN)
+					out_be32(&regs->ievent,
+						 DTSEC_IMASK_GRSCEN);
+				else
+					pr_debug("Rx lockup due to Tx lockup\n");
+
+				/* c.Write a 1 to bit n of FM_RSTC
+				 * (offset 0x0CC of FPM)
+				 */
+				fman_reset_mac(dtsec->fm, dtsec->mac_id);
+
+				/* d.Wait 4 Tx clocks (32 ns) */
+				udelay(1);
+
+				/* e.Write a 0 to bit n of FM_RSTC. */
+				/* cleared by FMAN
+				 */
+			}
+		}
+
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
+	}
+	if (event & DTSEC_IMASK_MAGEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
+	if (event & DTSEC_IMASK_GRSCEN)
+		dtsec->exception_cb(dtsec->dev_id,
+				    FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
+	if (event & DTSEC_IMASK_TDPEEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
+	if (event & DTSEC_IMASK_RDPEEN)
+		dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
+
+	/* masked interrupts */
+	WARN_ON(event & DTSEC_IMASK_ABRTEN);
+	WARN_ON(event & DTSEC_IMASK_IFERREN);
+}
+
+static void dtsec_1588_isr(void *handle)
+{
+	struct fman_mac *dtsec = (struct fman_mac *)handle;
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 event;
+
+	if (dtsec->ptp_tsu_enabled) {
+		event = ioread32be(&regs->tmr_pevent);
+		event &= ioread32be(&regs->tmr_pemask);
+
+		if (event) {
+			iowrite32be(event, &regs->tmr_pevent);
+			WARN_ON(event & TMR_PEVENT_TSRE);
+			dtsec->exception_cb(dtsec->dev_id,
+					    FM_MAC_EX_1G_1588_TS_RX_ERR);
+		}
+	}
+}
+
+static void free_init_resources(struct fman_mac *dtsec)
+{
+	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+			     FMAN_INTR_TYPE_ERR);
+	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+			     FMAN_INTR_TYPE_NORMAL);
+
+	/* release the driver's group hash table */
+	free_hash_table(dtsec->multicast_addr_hash);
+	dtsec->multicast_addr_hash = NULL;
+
+	/* release the driver's individual hash table */
+	free_hash_table(dtsec->unicast_addr_hash);
+	dtsec->unicast_addr_hash = NULL;
+}
+
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+{
+	if (is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	dtsec->dtsec_drv_param->maximum_frame = new_val;
+
+	return 0;
+}
+
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+{
+	if (is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+
+	return 0;
+}
+
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 tmp;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	/* Enable */
+	tmp = ioread32be(&regs->maccfg1);
+	if (mode & COMM_MODE_RX)
+		tmp |= MACCFG1_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp |= MACCFG1_TX_EN;
+
+	iowrite32be(tmp, &regs->maccfg1);
+
+	/* Graceful start - clear the graceful receive stop bit */
+	if (mode & COMM_MODE_TX)
+		iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
+			    &regs->tctrl);
+	if (mode & COMM_MODE_RX)
+		iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
+			    &regs->rctrl);
+
+	return 0;
+}
+
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 tmp;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	/* Gracefull stop - Assert the graceful transmit stop bit */
+	if (mode & COMM_MODE_RX) {
+		tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+		iowrite32be(tmp, &regs->rctrl);
+
+		if (dtsec->fm_rev_info.major == 2)
+			usleep_range(100, 200);
+		else
+			udelay(10);
+	}
+
+	if (mode & COMM_MODE_TX) {
+		if (dtsec->fm_rev_info.major == 2)
+			pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
+		else
+			pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
+	}
+
+	tmp = ioread32be(&regs->maccfg1);
+	if (mode & COMM_MODE_RX)
+		tmp &= ~MACCFG1_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp &= ~MACCFG1_TX_EN;
+
+	iowrite32be(tmp, &regs->maccfg1);
+
+	return 0;
+}
+
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+			      u8 __maybe_unused priority,
+			      u16 pause_time, u16 __maybe_unused thresh_time)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 ptv = 0;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+	if (dtsec->fm_rev_info.major == 2)
+		if (0 < pause_time && pause_time <= 320) {
+			pr_warn("pause-time: %d illegal.Should be > 320\n",
+				pause_time);
+			return -EINVAL;
+		}
+
+	if (pause_time) {
+		ptv = ioread32be(&regs->ptv);
+		ptv &= PTV_PTE_MASK;
+		ptv |= pause_time & PTV_PT_MASK;
+		iowrite32be(ptv, &regs->ptv);
+
+		/* trigger the transmission of a flow-control pause frame */
+		iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
+			    &regs->maccfg1);
+	} else
+		iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
+			    &regs->maccfg1);
+
+	return 0;
+}
+
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 tmp;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->maccfg1);
+	if (en)
+		tmp |= MACCFG1_RX_FLOW;
+	else
+		tmp &= ~MACCFG1_RX_FLOW;
+	iowrite32be(tmp, &regs->maccfg1);
+
+	return 0;
+}
+
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+{
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	/* Initialize MAC Station Address registers (1 & 2)
+	 * Station address have to be swapped (big endian to little endian
+	 */
+	dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
+	set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+
+	return 0;
+}
+
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	struct eth_hash_entry *hash_entry;
+	u64 addr;
+	s32 bucket;
+	u32 crc;
+	bool mcast, ghtx;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+	/* Cannot handle unicast mac addr when GHTX is on */
+	if (ghtx && !mcast) {
+		pr_err("Could not compute hash bucket\n");
+		return -EINVAL;
+	}
+	crc = get_mac_addr_hash_code(addr);
+
+	/* considering the 9 highest order bits in crc H[8:0]:
+	 *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
+	 *and H[5:1] (next 5 bits) identify the hash bit
+	 *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
+	 *and H[4:0] (next 5 bits) identify the hash bit.
+	 *
+	 *In bucket index output the low 5 bits identify the hash register
+	 *bit, while the higher 4 bits identify the hash register
+	 */
+
+	if (ghtx) {
+		bucket = (s32)((crc >> 23) & 0x1ff);
+	} else {
+		bucket = (s32)((crc >> 24) & 0xff);
+		/* if !ghtx and mcast the bit must be set in gaddr instead of
+		 *igaddr.
+		 */
+		if (mcast)
+			bucket += 0x100;
+	}
+
+	set_bucket(dtsec->regs, bucket, true);
+
+	/* Create element to be added to the driver hash table */
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	if (!hash_entry)
+		return -ENOMEM;
+	hash_entry->addr = addr;
+	INIT_LIST_HEAD(&hash_entry->node);
+
+	if (addr & MAC_GROUP_ADDRESS)
+		/* Group Address */
+		list_add_tail(&hash_entry->node,
+			      &dtsec->multicast_addr_hash->lsts[bucket]);
+	else
+		list_add_tail(&hash_entry->node,
+			      &dtsec->unicast_addr_hash->lsts[bucket]);
+
+	return 0;
+}
+
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	struct list_head *pos;
+	struct eth_hash_entry *hash_entry = NULL;
+	u64 addr;
+	s32 bucket;
+	u32 crc;
+	bool mcast, ghtx;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
+	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
+
+	/* Cannot handle unicast mac addr when GHTX is on */
+	if (ghtx && !mcast) {
+		pr_err("Could not compute hash bucket\n");
+		return -EINVAL;
+	}
+	crc = get_mac_addr_hash_code(addr);
+
+	if (ghtx) {
+		bucket = (s32)((crc >> 23) & 0x1ff);
+	} else {
+		bucket = (s32)((crc >> 24) & 0xff);
+		/* if !ghtx and mcast the bit must be set
+		 * in gaddr instead of igaddr.
+		 */
+		if (mcast)
+			bucket += 0x100;
+	}
+
+	if (addr & MAC_GROUP_ADDRESS) {
+		/* Group Address */
+		list_for_each(pos,
+			      &dtsec->multicast_addr_hash->lsts[bucket]) {
+			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+			if (hash_entry->addr == addr) {
+				list_del_init(&hash_entry->node);
+				kfree(hash_entry);
+				break;
+			}
+		}
+		if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
+			set_bucket(dtsec->regs, bucket, false);
+	} else {
+		/* Individual Address */
+		list_for_each(pos,
+			      &dtsec->unicast_addr_hash->lsts[bucket]) {
+			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+			if (hash_entry->addr == addr) {
+				list_del_init(&hash_entry->node);
+				kfree(hash_entry);
+				break;
+			}
+		}
+		if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
+			set_bucket(dtsec->regs, bucket, false);
+	}
+
+	/* address does not exist */
+	WARN_ON(!hash_entry);
+
+	return 0;
+}
+
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 tmp;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	/* Set unicast promiscuous */
+	tmp = ioread32be(&regs->rctrl);
+	if (new_val)
+		tmp |= RCTRL_UPROM;
+	else
+		tmp &= ~RCTRL_UPROM;
+
+	iowrite32be(tmp, &regs->rctrl);
+
+	/* Set multicast promiscuous */
+	tmp = ioread32be(&regs->rctrl);
+	if (new_val)
+		tmp |= RCTRL_MPROM;
+	else
+		tmp &= ~RCTRL_MPROM;
+
+	iowrite32be(tmp, &regs->rctrl);
+
+	return 0;
+}
+
+int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 tmp;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->maccfg2);
+
+	/* Full Duplex */
+	tmp |= MACCFG2_FULL_DUPLEX;
+
+	tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
+	if (speed < SPEED_1000)
+		tmp |= MACCFG2_NIBBLE_MODE;
+	else if (speed == SPEED_1000)
+		tmp |= MACCFG2_BYTE_MODE;
+	iowrite32be(tmp, &regs->maccfg2);
+
+	tmp = ioread32be(&regs->ecntrl);
+	if (speed == SPEED_100)
+		tmp |= DTSEC_ECNTRL_R100M;
+	else
+		tmp &= ~DTSEC_ECNTRL_R100M;
+	iowrite32be(tmp, &regs->ecntrl);
+
+	return 0;
+}
+
+int dtsec_restart_autoneg(struct fman_mac *dtsec)
+{
+	u16 tmp_reg16;
+	int err;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	err = mii_read_reg(dtsec, dtsec->tbi_phy_addr, 0, &tmp_reg16);
+	if (err) {
+		pr_err("Autonegotiation restart failed\n");
+		return err;
+	}
+
+	tmp_reg16 &= ~(PHY_CR_SPEED0 | PHY_CR_SPEED1);
+	tmp_reg16 |=
+	    (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX | PHY_CR_SPEED1);
+
+	mii_write_reg(dtsec, dtsec->tbi_phy_addr, 0, tmp_reg16);
+
+	return 0;
+}
+
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	*mac_version = ioread32be(&regs->tsec_id);
+
+	return 0;
+}
+
+int dtsec_set_exception(struct fman_mac *dtsec,
+			enum fman_mac_exceptions exception, bool enable)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	u32 bit_mask = 0;
+
+	if (!is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
+		bit_mask = get_exception_flag(exception);
+		if (bit_mask) {
+			if (enable)
+				dtsec->exceptions |= bit_mask;
+			else
+				dtsec->exceptions &= ~bit_mask;
+		} else {
+			pr_err("Undefined exception\n");
+			return -EINVAL;
+		}
+		if (enable)
+			iowrite32be(ioread32be(&regs->imask) | bit_mask,
+				    &regs->imask);
+		else
+			iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
+				    &regs->imask);
+	} else {
+		if (!dtsec->ptp_tsu_enabled) {
+			pr_err("Exception valid for 1588 only\n");
+			return -EINVAL;
+		}
+		switch (exception) {
+		case FM_MAC_EX_1G_1588_TS_RX_ERR:
+			if (enable) {
+				dtsec->en_tsu_err_exeption = true;
+				iowrite32be(ioread32be(&regs->tmr_pemask) |
+					    TMR_PEMASK_TSREEN,
+					    &regs->tmr_pemask);
+			} else {
+				dtsec->en_tsu_err_exeption = false;
+				iowrite32be(ioread32be(&regs->tmr_pemask) &
+					    ~TMR_PEMASK_TSREEN,
+					    &regs->tmr_pemask);
+			}
+			break;
+		default:
+			pr_err("Undefined exception\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int dtsec_init(struct fman_mac *dtsec)
+{
+	struct dtsec_regs __iomem *regs = dtsec->regs;
+	struct dtsec_cfg *dtsec_drv_param;
+	int err;
+	u16 max_frm_ln;
+	enet_addr_t eth_addr;
+
+	if (is_init_done(dtsec->dtsec_drv_param))
+		return -EINVAL;
+
+	if (DEFAULT_RESET_ON_INIT &&
+	    (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
+		pr_err("Can't reset MAC!\n");
+		return -EINVAL;
+	}
+
+	err = check_init_parameters(dtsec);
+	if (err)
+		return err;
+
+	dtsec_drv_param = dtsec->dtsec_drv_param;
+
+	MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
+
+	err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
+		   dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions);
+	if (err) {
+		free_init_resources(dtsec);
+		pr_err("DTSEC version doesn't support this i/f mode\n");
+		return err;
+	}
+
+	if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
+		u16 tmp_reg16;
+
+		/* Configure the TBI PHY Control Register */
+		tmp_reg16 = PHY_TBICON_CLK_SEL | PHY_TBICON_SRESET;
+		mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
+			      tmp_reg16);
+
+		tmp_reg16 = PHY_TBICON_CLK_SEL;
+		mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 17,
+			      tmp_reg16);
+
+		tmp_reg16 =
+		    (PHY_CR_PHY_RESET | PHY_CR_ANE | PHY_CR_FULLDUPLEX |
+		     PHY_CR_SPEED1);
+		mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+
+		if (dtsec->basex_if)
+			tmp_reg16 = PHY_TBIANA_1000X;
+		else
+			tmp_reg16 = PHY_TBIANA_SGMII;
+		mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 4, tmp_reg16);
+
+		tmp_reg16 =
+		    (PHY_CR_ANE | PHY_CR_RESET_AN | PHY_CR_FULLDUPLEX |
+		     PHY_CR_SPEED1);
+
+		mii_write_reg(dtsec, (u8)dtsec_drv_param->tbipa, 0, tmp_reg16);
+	}
+
+	/* Max Frame Length */
+	max_frm_ln = (u16)ioread32be(&regs->maxfrm);
+	err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
+	if (err) {
+		pr_err("Setting max frame length failed\n");
+		free_init_resources(dtsec);
+		return -EINVAL;
+	}
+
+	dtsec->multicast_addr_hash =
+	alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
+	if (!dtsec->multicast_addr_hash) {
+		free_init_resources(dtsec);
+		pr_err("MC hash table is failed\n");
+		return -ENOMEM;
+	}
+
+	dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
+	if (!dtsec->unicast_addr_hash) {
+		free_init_resources(dtsec);
+		pr_err("UC hash table is failed\n");
+		return -ENOMEM;
+	}
+
+	/* register err intr handler for dtsec to FPM (err) */
+	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+			   FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
+	/* register 1588 intr handler for TMR to FPM (normal) */
+	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+			   FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
+
+	kfree(dtsec_drv_param);
+	dtsec->dtsec_drv_param = NULL;
+
+	return 0;
+}
+
+int dtsec_free(struct fman_mac *dtsec)
+{
+	free_init_resources(dtsec);
+
+	kfree(dtsec->dtsec_drv_param);
+	dtsec->dtsec_drv_param = NULL;
+	kfree(dtsec);
+
+	return 0;
+}
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params)
+{
+	struct fman_mac *dtsec;
+	struct dtsec_cfg *dtsec_drv_param;
+	void __iomem *base_addr;
+
+	base_addr = params->base_addr;
+
+	/* allocate memory for the UCC GETH data structure. */
+	dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
+	if (!dtsec)
+		return NULL;
+
+	/* allocate memory for the d_tsec driver parameters data structure. */
+	dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
+	if (!dtsec_drv_param)
+		goto err_dtsec;
+
+	/* Plant parameter structure pointer */
+	dtsec->dtsec_drv_param = dtsec_drv_param;
+
+	set_dflts(dtsec_drv_param);
+
+	dtsec->regs = (struct dtsec_regs __iomem *)(base_addr);
+	dtsec->mii_regs = (struct dtsec_mii_regs __iomem *)
+		(base_addr + DTSEC_TO_MII_OFFSET);
+	dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+	dtsec->max_speed = params->max_speed;
+	dtsec->phy_if = params->phy_if;
+	dtsec->mac_id = params->mac_id;
+	dtsec->exceptions = DTSEC_DEFAULT_EXCEPTIONS;
+	dtsec->exception_cb = params->exception_cb;
+	dtsec->event_cb = params->event_cb;
+	dtsec->dev_id = params->dev_id;
+	dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
+	dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
+	dtsec->tbi_phy_addr = dtsec->dtsec_drv_param->tbi_phy_addr;
+
+	dtsec->fm = params->fm;
+	dtsec->basex_if = params->basex_if;
+	dtsec->mii_mgmt_clk = calc_mii_mgmt_clk(dtsec);
+	if (dtsec->mii_mgmt_clk == 0) {
+		pr_err("Can't calculate MII management clock\n");
+		goto err_dtsec;
+	}
+
+	/* Save FMan revision */
+	fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+	return dtsec;
+
+err_dtsec:
+	kfree(dtsec);
+	return NULL;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h
new file mode 100644
index 0000000..c4467c0
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DTSEC_H
+#define __DTSEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *dtsec_config(struct fman_mac_params *params);
+int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_adjust_link(struct fman_mac *dtsec,
+		      u16 speed);
+int dtsec_restart_autoneg(struct fman_mac *dtsec);
+int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
+int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
+int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
+int dtsec_init(struct fman_mac *dtsec);
+int dtsec_free(struct fman_mac *dtsec);
+int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
+int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
+			      u16 pause_time, u16 thresh_time);
+int dtsec_set_exception(struct fman_mac *dtsec,
+			enum fman_mac_exceptions exception, bool enable);
+int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
+int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
+
+#endif /* __DTSEC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_mac.h b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
new file mode 100644
index 0000000..7a5e752
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* FM MAC ... */
+#ifndef __FM_MAC_H
+#define __FM_MAC_H
+
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+struct fman_mac;
+
+/* Ethernet Address */
+typedef u8 enet_addr_t[ETH_ALEN];
+
+#define ENET_ADDR_TO_UINT64(_enet_addr)		\
+	(u64)(((u64)(_enet_addr)[0] << 40) |		\
+	      ((u64)(_enet_addr)[1] << 32) |		\
+	      ((u64)(_enet_addr)[2] << 24) |		\
+	      ((u64)(_enet_addr)[3] << 16) |		\
+	      ((u64)(_enet_addr)[4] << 8) |		\
+	      ((u64)(_enet_addr)[5]))
+
+#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
+	do { \
+		int i; \
+		for (i = 0; i < ETH_ALEN; i++) \
+			(_enet_addr)[i] = \
+			(u8)((_addr64) >> ((5 - i) * 8)); \
+	} while (0)
+
+/* defaults */
+#define DEFAULT_RESET_ON_INIT                 false
+
+/* PFC defines */
+#define FSL_FM_PAUSE_TIME_ENABLE	0xf000
+#define FSL_FM_PAUSE_TIME_DISABLE	0
+#define FSL_FM_PAUSE_THRESH_DEFAULT	0
+
+#define FM_MAC_NO_PFC   0xff
+
+/* HASH defines */
+#define ETH_HASH_ENTRY_OBJ(ptr)	\
+	hlist_entry_safe(ptr, struct eth_hash_entry, node)
+
+/* Enumeration (bit flags) of communication modes (Transmit,
+ * receive or both).
+ */
+enum comm_mode {
+	COMM_MODE_NONE = 0,	/* No transmit/receive communication */
+	COMM_MODE_RX = 1,	/* Only receive communication */
+	COMM_MODE_TX = 2,	/* Only transmit communication */
+	COMM_MODE_RX_AND_TX = 3	/* Both transmit and receive communication */
+};
+
+/* FM MAC Exceptions */
+enum fman_mac_exceptions {
+	FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
+	/* 10GEC MDIO scan event interrupt */
+	, FM_MAC_EX_10G_MDIO_CMD_CMPL
+	/* 10GEC MDIO command completion interrupt */
+	, FM_MAC_EX_10G_REM_FAULT
+	/* 10GEC, mEMAC Remote fault interrupt */
+	, FM_MAC_EX_10G_LOC_FAULT
+	/* 10GEC, mEMAC Local fault interrupt */
+	, FM_MAC_EX_10G_TX_ECC_ER
+	/* 10GEC, mEMAC Transmit frame ECC error interrupt */
+	, FM_MAC_EX_10G_TX_FIFO_UNFL
+	/* 10GEC, mEMAC Transmit FIFO underflow interrupt */
+	, FM_MAC_EX_10G_TX_FIFO_OVFL
+	/* 10GEC, mEMAC Transmit FIFO overflow interrupt */
+	, FM_MAC_EX_10G_TX_ER
+	/* 10GEC Transmit frame error interrupt */
+	, FM_MAC_EX_10G_RX_FIFO_OVFL
+	/* 10GEC, mEMAC Receive FIFO overflow interrupt */
+	, FM_MAC_EX_10G_RX_ECC_ER
+	/* 10GEC, mEMAC Receive frame ECC error interrupt */
+	, FM_MAC_EX_10G_RX_JAB_FRM
+	/* 10GEC Receive jabber frame interrupt */
+	, FM_MAC_EX_10G_RX_OVRSZ_FRM
+	/* 10GEC Receive oversized frame interrupt */
+	, FM_MAC_EX_10G_RX_RUNT_FRM
+	/* 10GEC Receive runt frame interrupt */
+	, FM_MAC_EX_10G_RX_FRAG_FRM
+	/* 10GEC Receive fragment frame interrupt */
+	, FM_MAC_EX_10G_RX_LEN_ER
+	/* 10GEC Receive payload length error interrupt */
+	, FM_MAC_EX_10G_RX_CRC_ER
+	/* 10GEC Receive CRC error interrupt */
+	, FM_MAC_EX_10G_RX_ALIGN_ER
+	/* 10GEC Receive alignment error interrupt */
+	, FM_MAC_EX_1G_BAB_RX
+	/* dTSEC Babbling receive error */
+	, FM_MAC_EX_1G_RX_CTL
+	/* dTSEC Receive control (pause frame) interrupt */
+	, FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
+	/* dTSEC Graceful transmit stop complete */
+	, FM_MAC_EX_1G_BAB_TX
+	/* dTSEC Babbling transmit error */
+	, FM_MAC_EX_1G_TX_CTL
+	/* dTSEC Transmit control (pause frame) interrupt */
+	, FM_MAC_EX_1G_TX_ERR
+	/* dTSEC Transmit error */
+	, FM_MAC_EX_1G_LATE_COL
+	/* dTSEC Late collision */
+	, FM_MAC_EX_1G_COL_RET_LMT
+	/* dTSEC Collision retry limit */
+	, FM_MAC_EX_1G_TX_FIFO_UNDRN
+	/* dTSEC Transmit FIFO underrun */
+	, FM_MAC_EX_1G_MAG_PCKT
+	/* dTSEC Magic Packet detection */
+	, FM_MAC_EX_1G_MII_MNG_RD_COMPLET
+	/* dTSEC MII management read completion */
+	, FM_MAC_EX_1G_MII_MNG_WR_COMPLET
+	/* dTSEC MII management write completion */
+	, FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
+	/* dTSEC Graceful receive stop complete */
+	, FM_MAC_EX_1G_DATA_ERR
+	/* dTSEC Internal data error on transmit */
+	, FM_MAC_1G_RX_DATA_ERR
+	/* dTSEC Internal data error on receive */
+	, FM_MAC_EX_1G_1588_TS_RX_ERR
+	/* dTSEC Time-Stamp Receive Error */
+	, FM_MAC_EX_1G_RX_MIB_CNT_OVFL
+	/* dTSEC MIB counter overflow */
+	, FM_MAC_EX_TS_FIFO_ECC_ERR
+	/* mEMAC Time-stamp FIFO ECC error interrupt;
+	 * not supported on T4240/B4860 rev1 chips
+	 */
+	, FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
+	/* mEMAC Magic Packet Indication Interrupt */
+};
+
+struct eth_hash_entry {
+	u64 addr;		/* Ethernet Address  */
+	struct list_head node;
+};
+
+typedef void (fman_mac_exception_cb)(void *dev_id,
+				    enum fman_mac_exceptions exceptions);
+
+/* FMan MAC config input */
+struct fman_mac_params {
+	/* Base of memory mapped FM MAC registers */
+	void __iomem *base_addr;
+	/* MAC address of device; First octet is sent first */
+	enet_addr_t addr;
+	/* MAC ID; numbering of dTSEC and 1G-mEMAC:
+	 * 0 - FM_MAX_NUM_OF_1G_MACS;
+	 * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
+	 * 0 - FM_MAX_NUM_OF_10G_MACS
+	 */
+	u8 mac_id;
+	/* PHY interface */
+	phy_interface_t	 phy_if;
+	/* Note that the speed should indicate the maximum rate that
+	 * this MAC should support rather than the actual speed;
+	 */
+	u16 max_speed;
+	/* A handle to the FM object this port related to */
+	void *fm;
+	/* MDIO exceptions interrupt source - not valid for all
+	 * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+	 * mdio-irq, or for polling
+	 */
+	void *dev_id; /* device cookie used by the exception cbs */
+	fman_mac_exception_cb *event_cb;    /* MDIO Events Callback Routine */
+	fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
+	/* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
+	 * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
+	 * to interface between MAC and phy/backplane, SGMII phy can still
+	 * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
+	*/
+	bool basex_if;
+};
+
+struct eth_hash_t {
+	u16 size;
+	struct list_head *lsts;
+};
+
+static inline struct eth_hash_entry
+*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
+{
+	struct eth_hash_entry *hash_entry = NULL;
+
+	if (!list_empty(addr_lst)) {
+		hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
+		list_del_init(&hash_entry->node);
+	}
+	return hash_entry;
+}
+
+static inline void free_hash_table(struct eth_hash_t *hash)
+{
+	struct eth_hash_entry *hash_entry;
+	int i = 0;
+
+	if (hash) {
+		if (hash->lsts) {
+			for (i = 0; i < hash->size; i++) {
+				hash_entry =
+				dequeue_addr_from_hash_entry(&hash->lsts[i]);
+				while (hash_entry) {
+					kfree(hash_entry);
+					hash_entry =
+					dequeue_addr_from_hash_entry(&hash->
+								     lsts[i]);
+				}
+			}
+
+			kfree(hash->lsts);
+		}
+
+		kfree(hash);
+	}
+}
+
+static inline struct eth_hash_t *alloc_hash_table(u16 size)
+{
+	u32 i;
+	struct eth_hash_t *hash;
+
+	/* Allocate address hash table */
+	hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+	if (!hash)
+		return NULL;
+
+	hash->size = size;
+
+	hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
+				   GFP_KERNEL);
+	if (!hash->lsts) {
+		kfree(hash);
+		return NULL;
+	}
+
+	for (i = 0; i < hash->size; i++)
+		INIT_LIST_HEAD(&hash->lsts[i]);
+
+	return hash;
+}
+
+#endif /* __FM_MAC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.c b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
new file mode 100644
index 0000000..5730194
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -0,0 +1,1382 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_memac.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/phy.h>
+
+/* MII Management Registers */
+#define MDIO_CFG_CLK_DIV_MASK		0x0080ff80
+#define MDIO_CFG_HOLD_MASK		0x0000001c
+#define MDIO_CFG_ENC45			0x00000040
+#define MDIO_CFG_BSY			0x00000001
+
+#define MDIO_CTL_PHY_ADDR_SHIFT	5
+
+#define MDIO_DATA_BSY			0x80000000
+
+/* Internal PHY access */
+#define PHY_MDIO_ADDR			0
+
+/* Internal PHY Registers - SGMII */
+#define PHY_SGMII_CR_RESET_AN           0x0200
+#define PHY_SGMII_CR_AN_ENABLE          0x1000
+#define PHY_SGMII_CR_DEF_VAL            0x1140
+#define PHY_SGMII_DEV_ABILITY_SGMII     0x4001
+#define PHY_SGMII_DEV_ABILITY_1000X     0x01A0
+#define PHY_SGMII_IF_MODE_DUPLEX_FULL   0x0000
+#define PHY_SGMII_IF_MODE_DUPLEX_HALF   0x0010
+#define PHY_SGMII_IF_MODE_SPEED_GB      0x0008
+#define PHY_SGMII_IF_MODE_SPEED_100M    0x0004
+#define PHY_SGMII_IF_MODE_SPEED_10M     0x0000
+#define PHY_SGMII_IF_MODE_AN            0x0002
+#define PHY_SGMII_IF_MODE_SGMII         0x0001
+#define PHY_SGMII_IF_MODE_1000X         0x0000
+
+/* Offset from the MEM map to the MDIO mem map */
+#define MEMAC_TO_MII_OFFSET         0x030
+/* Num of additional exact match MAC adr regs */
+#define MEMAC_NUM_OF_PADDRS 7
+
+/* Control and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_MG		0x80000000 /* 00 Magic Packet detection */
+#define CMD_CFG_REG_LOWP_RXETY	0x01000000 /* 07 Rx low power indication */
+#define CMD_CFG_TX_LOWP_ENA	0x00800000 /* 08 Tx Low Power Idle Enable */
+#define CMD_CFG_SFD_ANY		0x00200000 /* 10 Disable SFD check */
+#define CMD_CFG_PFC_MODE	0x00080000 /* 12 Enable PFC */
+#define CMD_CFG_NO_LEN_CHK	0x00020000 /* 14 Payload length check disable */
+#define CMD_CFG_SEND_IDLE	0x00010000 /* 15 Force idle generation */
+#define CMD_CFG_CNT_FRM_EN	0x00002000 /* 18 Control frame rx enable */
+#define CMD_CFG_SW_RESET	0x00001000 /* 19 S/W Reset, self clearing bit */
+#define CMD_CFG_TX_PAD_EN	0x00000800 /* 20 Enable Tx padding of frames */
+#define CMD_CFG_LOOPBACK_EN	0x00000400 /* 21 XGMII/GMII loopback enable */
+#define CMD_CFG_TX_ADDR_INS	0x00000200 /* 22 Tx source MAC addr insertion */
+#define CMD_CFG_PAUSE_IGNORE	0x00000100 /* 23 Ignore Pause frame quanta */
+#define CMD_CFG_PAUSE_FWD	0x00000080 /* 24 Terminate/frwd Pause frames */
+#define CMD_CFG_CRC_FWD		0x00000040 /* 25 Terminate/frwd CRC of frames */
+#define CMD_CFG_PAD_EN		0x00000020 /* 26 Frame padding removal */
+#define CMD_CFG_PROMIS_EN	0x00000010 /* 27 Promiscuous operation enable */
+#define CMD_CFG_WAN_MODE	0x00000008 /* 28 WAN mode enable */
+#define CMD_CFG_RX_EN		0x00000002 /* 30 MAC receive path enable */
+#define CMD_CFG_TX_EN		0x00000001 /* 31 MAC transmit path enable */
+
+/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
+#define TX_FIFO_SECTIONS_TX_EMPTY_MASK			0xFFFF0000
+#define TX_FIFO_SECTIONS_TX_AVAIL_MASK			0x0000FFFF
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G		0x00400000
+#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G		0x00100000
+#define TX_FIFO_SECTIONS_TX_AVAIL_10G			0x00000019
+#define TX_FIFO_SECTIONS_TX_AVAIL_1G			0x00000020
+#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G		0x00000060
+
+#define GET_TX_EMPTY_DEFAULT_VALUE(_val)				\
+do {									\
+	_val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK;			\
+	((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ?			\
+			(_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
+			(_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
+} while (0)
+
+/* Interface Mode Register (IF_MODE) */
+
+#define IF_MODE_MASK		0x00000003 /* 30-31 Mask on i/f mode bits */
+#define IF_MODE_XGMII		0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_GMII		0x00000002 /* 30-31 GMII (1G) interface */
+#define IF_MODE_RGMII		0x00000004
+#define IF_MODE_RGMII_AUTO	0x00008000
+#define IF_MODE_RGMII_1000	0x00004000 /* 10 - 1000Mbps RGMII */
+#define IF_MODE_RGMII_100	0x00000000 /* 00 - 100Mbps RGMII */
+#define IF_MODE_RGMII_10	0x00002000 /* 01 - 10Mbps RGMII */
+#define IF_MODE_RGMII_SP_MASK	0x00006000 /* Setsp mask bits */
+#define IF_MODE_RGMII_FD	0x00001000 /* Full duplex RGMII */
+#define IF_MODE_HD		0x00000040 /* Half duplex operation */
+
+/* Hash table Control Register (HASHTABLE_CTRL) */
+#define HASH_CTRL_MCAST_EN	0x00000100
+/* 26-31 Hash table address code */
+#define HASH_CTRL_ADDR_MASK	0x0000003F
+/* MAC mcast indication */
+#define GROUP_ADDRESS		0x0000010000000000LL
+#define HASH_TABLE_SIZE		64	/* Hash tbl size */
+
+/* Interrupt Mask Register (IMASK) */
+#define MEMAC_IMASK_MGI		0x40000000 /* 1 Magic pkt detect indication */
+#define MEMAC_IMASK_TSECC_ER	0x20000000 /* 2 Timestamp FIFO ECC error evnt */
+#define MEMAC_IMASK_TECC_ER	0x02000000 /* 6 Transmit frame ECC error evnt */
+#define MEMAC_IMASK_RECC_ER	0x01000000 /* 7 Receive frame ECC error evnt */
+
+#define MEMAC_ALL_ERRS_IMASK					\
+		((u32)(MEMAC_IMASK_TSECC_ER	|	\
+		       MEMAC_IMASK_TECC_ER		|	\
+		       MEMAC_IMASK_RECC_ER		|	\
+		       MEMAC_IMASK_MGI))
+
+#define MEMAC_IEVNT_PCS			0x80000000 /* PCS (XG). Link sync (G) */
+#define MEMAC_IEVNT_AN			0x40000000 /* Auto-negotiation */
+#define MEMAC_IEVNT_LT			0x20000000 /* Link Training/New page */
+#define MEMAC_IEVNT_MGI			0x00004000 /* Magic pkt detection */
+#define MEMAC_IEVNT_TS_ECC_ER		0x00002000 /* Timestamp FIFO ECC error*/
+#define MEMAC_IEVNT_RX_FIFO_OVFL	0x00001000 /* Rx FIFO overflow */
+#define MEMAC_IEVNT_TX_FIFO_UNFL	0x00000800 /* Tx FIFO underflow */
+#define MEMAC_IEVNT_TX_FIFO_OVFL	0x00000400 /* Tx FIFO overflow */
+#define MEMAC_IEVNT_TX_ECC_ER		0x00000200 /* Tx frame ECC error */
+#define MEMAC_IEVNT_RX_ECC_ER		0x00000100 /* Rx frame ECC error */
+#define MEMAC_IEVNT_LI_FAULT		0x00000080 /* Link Interruption flt */
+#define MEMAC_IEVNT_RX_EMPTY		0x00000040 /* Rx FIFO empty */
+#define MEMAC_IEVNT_TX_EMPTY		0x00000020 /* Tx FIFO empty */
+#define MEMAC_IEVNT_RX_LOWP		0x00000010 /* Low Power Idle */
+#define MEMAC_IEVNT_PHY_LOS		0x00000004 /* Phy loss of signal */
+#define MEMAC_IEVNT_REM_FAULT		0x00000002 /* Remote fault (XGMII) */
+#define MEMAC_IEVNT_LOC_FAULT		0x00000001 /* Local fault (XGMII) */
+
+#define DEFAULT_PAUSE_QUANTA	0xf000
+#define DEFAULT_FRAME_LENGTH	0x600
+#define DEFAULT_TX_IPG_LENGTH	12
+
+#define MEMAC_DEFAULT_EXCEPTIONS				\
+	((u32)(MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |	\
+		MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI))
+
+#define CLXY_PAUSE_QUANTA_CLX_PQNT	0x0000FFFF
+#define CLXY_PAUSE_QUANTA_CLY_PQNT	0xFFFF0000
+#define CLXY_PAUSE_THRESH_CLX_QTH	0x0000FFFF
+#define CLXY_PAUSE_THRESH_CLY_QTH	0xFFFF0000
+
+struct mac_addr {
+	/* Lower 32 bits of 48-bit MAC address */
+	u32 mac_addr_l;
+	/* Upper 16 bits of 48-bit MAC address */
+	u32 mac_addr_u;
+};
+
+/* MII Configuration Control Memory Map Registers */
+struct memac_mii_regs {
+	u32 mdio_cfg;	/* 0x030  */
+	u32 mdio_ctrl;	/* 0x034  */
+	u32 mdio_data;	/* 0x038  */
+	u32 mdio_addr;	/* 0x03c  */
+};
+
+/* memory map */
+struct memac_regs {
+	u32 res0000[2];			/* General Control and Status */
+	u32 command_config;		/* 0x008 Ctrl and cfg */
+	struct mac_addr mac_addr0;	/* 0x00C-0x010 MAC_ADDR_0...1 */
+	u32 maxfrm;			/* 0x014 Max frame length */
+	u32 res0018[1];
+	u32 rx_fifo_sections;		/* Receive FIFO configuration reg */
+	u32 tx_fifo_sections;		/* Transmit FIFO configuration reg */
+	u32 res0024[2];
+	u32 hashtable_ctrl;		/* 0x02C Hash table control */
+	u32 res0030[4];
+	u32 ievent;			/* 0x040 Interrupt event */
+	u32 tx_ipg_length;		/* 0x044 Transmitter inter-packet-gap */
+	u32 res0048;
+	u32 imask;			/* 0x04C Interrupt mask */
+	u32 res0050;
+	u32 pause_quanta[4];		/* 0x054 Pause quanta */
+	u32 pause_thresh[4];		/* 0x064 Pause quanta threshold */
+	u32 rx_pause_status;		/* 0x074 Receive pause status */
+	u32 res0078[2];
+	struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
+	u32 lpwake_timer;		/* 0x0B8 Low Power Wakeup Timer */
+	u32 sleep_timer;		/* 0x0BC Transmit EEE Low Power Timer */
+	u32 res00c0[8];
+	u32 statn_config;		/* 0x0E0 Statistics configuration */
+	u32 res00e4[7];
+	/* Rx Statistics Counter */
+	u32 reoct_l;
+	u32 reoct_u;
+	u32 roct_l;
+	u32 roct_u;
+	u32 raln_l;
+	u32 raln_u;
+	u32 rxpf_l;
+	u32 rxpf_u;
+	u32 rfrm_l;
+	u32 rfrm_u;
+	u32 rfcs_l;
+	u32 rfcs_u;
+	u32 rvlan_l;
+	u32 rvlan_u;
+	u32 rerr_l;
+	u32 rerr_u;
+	u32 ruca_l;
+	u32 ruca_u;
+	u32 rmca_l;
+	u32 rmca_u;
+	u32 rbca_l;
+	u32 rbca_u;
+	u32 rdrp_l;
+	u32 rdrp_u;
+	u32 rpkt_l;
+	u32 rpkt_u;
+	u32 rund_l;
+	u32 rund_u;
+	u32 r64_l;
+	u32 r64_u;
+	u32 r127_l;
+	u32 r127_u;
+	u32 r255_l;
+	u32 r255_u;
+	u32 r511_l;
+	u32 r511_u;
+	u32 r1023_l;
+	u32 r1023_u;
+	u32 r1518_l;
+	u32 r1518_u;
+	u32 r1519x_l;
+	u32 r1519x_u;
+	u32 rovr_l;
+	u32 rovr_u;
+	u32 rjbr_l;
+	u32 rjbr_u;
+	u32 rfrg_l;
+	u32 rfrg_u;
+	u32 rcnp_l;
+	u32 rcnp_u;
+	u32 rdrntp_l;
+	u32 rdrntp_u;
+	u32 res01d0[12];
+	/* Tx Statistics Counter */
+	u32 teoct_l;
+	u32 teoct_u;
+	u32 toct_l;
+	u32 toct_u;
+	u32 res0210[2];
+	u32 txpf_l;
+	u32 txpf_u;
+	u32 tfrm_l;
+	u32 tfrm_u;
+	u32 tfcs_l;
+	u32 tfcs_u;
+	u32 tvlan_l;
+	u32 tvlan_u;
+	u32 terr_l;
+	u32 terr_u;
+	u32 tuca_l;
+	u32 tuca_u;
+	u32 tmca_l;
+	u32 tmca_u;
+	u32 tbca_l;
+	u32 tbca_u;
+	u32 res0258[2];
+	u32 tpkt_l;
+	u32 tpkt_u;
+	u32 tund_l;
+	u32 tund_u;
+	u32 t64_l;
+	u32 t64_u;
+	u32 t127_l;
+	u32 t127_u;
+	u32 t255_l;
+	u32 t255_u;
+	u32 t511_l;
+	u32 t511_u;
+	u32 t1023_l;
+	u32 t1023_u;
+	u32 t1518_l;
+	u32 t1518_u;
+	u32 t1519x_l;
+	u32 t1519x_u;
+	u32 res02a8[6];
+	u32 tcnp_l;
+	u32 tcnp_u;
+	u32 res02c8[14];
+	/* Line Interface Control */
+	u32 if_mode;		/* 0x300 Interface Mode Control */
+	u32 if_status;		/* 0x304 Interface Status */
+	u32 res0308[14];
+	/* HiGig/2 */
+	u32 hg_config;		/* 0x340 Control and cfg */
+	u32 res0344[3];
+	u32 hg_pause_quanta;	/* 0x350 Pause quanta */
+	u32 res0354[3];
+	u32 hg_pause_thresh;	/* 0x360 Pause quanta threshold */
+	u32 res0364[3];
+	u32 hgrx_pause_status;	/* 0x370 Receive pause status */
+	u32 hg_fifos_status;	/* 0x374 fifos status */
+	u32 rhm;		/* 0x378 rx messages counter */
+	u32 thm;		/* 0x37C tx messages counter */
+};
+
+struct memac_cfg {
+	bool reset_on_init;
+	bool rx_error_discard;
+	bool pause_ignore;
+	bool pause_forward_enable;
+	bool no_length_check_enable;
+	bool cmd_frame_enable;
+	bool send_idle_enable;
+	bool wan_mode_enable;
+	bool promiscuous_mode_enable;
+	bool tx_addr_ins_enable;
+	bool loopback_enable;
+	bool lgth_check_nostdr;
+	bool time_stamp_enable;
+	bool pad_enable;
+	bool phy_tx_ena_on;
+	bool rx_sfd_any;
+	bool rx_pbl_fwd;
+	bool tx_pbl_fwd;
+	bool debug_mode;
+	bool wake_on_lan;
+	struct fixed_phy_status *fixed_link;
+	u16 max_frame_length;
+	u16 pause_quanta;
+	u32 tx_ipg_length;
+};
+
+struct fman_mac {
+	/* Pointer to MAC memory mapped registers */
+	struct memac_regs __iomem *regs;
+	/* Pointer to MII memory mapped registers */
+	struct memac_mii_regs __iomem *mii_regs;
+	/* MAC address of device */
+	u64 addr;
+	/* Ethernet physical interface */
+	phy_interface_t phy_if;
+	u16 max_speed;
+	void *dev_id; /* device cookie used by the exception cbs */
+	fman_mac_exception_cb *exception_cb;
+	fman_mac_exception_cb *event_cb;
+	/* Pointer to driver's global address hash table  */
+	struct eth_hash_t *multicast_addr_hash;
+	/* Pointer to driver's individual address hash table  */
+	struct eth_hash_t *unicast_addr_hash;
+	bool debug_mode;
+	u8 mac_id;
+	u32 exceptions;
+	struct memac_cfg *memac_drv_param;
+	void *fm;
+	struct fman_rev_info fm_rev_info;
+	bool basex_if;
+};
+
+static int write_phy_reg_10g(struct memac_mii_regs __iomem *mii_regs,
+			     u8 phy_addr, u8 reg, u16 data)
+{
+	u32 tmp_reg;
+	int count;
+
+	tmp_reg = ioread32be(&mii_regs->mdio_cfg);
+	/* Leave only MDIO_CLK_DIV bits set on */
+	tmp_reg &= MDIO_CFG_CLK_DIV_MASK;
+	/* Set maximum MDIO_HOLD value to allow phy to see
+	 * change of data signal
+	 */
+	tmp_reg |= MDIO_CFG_HOLD_MASK;
+	/* Add 10G interface mode */
+	tmp_reg |= MDIO_CFG_ENC45;
+	iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
+
+	/* Wait for command completion */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	/* Specify phy and register to be accessed */
+	iowrite32be(phy_addr, &mii_regs->mdio_ctrl);
+	iowrite32be(reg, &mii_regs->mdio_addr);
+
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	/* Write data */
+	iowrite32be(data, &mii_regs->mdio_data);
+
+	/* Wait for write transaction end */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
+		 --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int write_phy_reg_1g(struct memac_mii_regs __iomem *mii_regs,
+			    u8 phy_addr, u8 reg, u16 data)
+{
+	u32 tmp_reg;
+	int count;
+
+	/* Leave only MDIO_CLK_DIV and MDIO_HOLD bits set on */
+	tmp_reg = ioread32be(&mii_regs->mdio_cfg);
+	tmp_reg &= (MDIO_CFG_CLK_DIV_MASK | MDIO_CFG_HOLD_MASK);
+	iowrite32be(tmp_reg, &mii_regs->mdio_cfg);
+
+	/* Wait for command completion */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	/* Write transaction */
+	tmp_reg = (phy_addr << MDIO_CTL_PHY_ADDR_SHIFT);
+	tmp_reg |= reg;
+	iowrite32be(tmp_reg, &mii_regs->mdio_ctrl);
+
+	/* Wait for command completion */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_cfg)) & MDIO_CFG_BSY) && --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	iowrite32be(data, &mii_regs->mdio_data);
+
+	/* Wait for write transaction to end */
+	count = 100;
+	do {
+		udelay(1);
+	} while (((ioread32be(&mii_regs->mdio_data)) & MDIO_DATA_BSY) &&
+		 --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int mii_write_phy_reg(struct fman_mac *memac, u8 phy_addr, u8 reg,
+			     u16 data)
+{
+	int err = 0;
+	/* Figure out interface type - 10G vs 1G.
+	 * In 10G interface both phy_addr and devAddr present.
+	 */
+	if (memac->max_speed == SPEED_10000)
+		err = write_phy_reg_10g(memac->mii_regs, phy_addr, reg, data);
+	else
+		err = write_phy_reg_1g(memac->mii_regs, phy_addr, reg, data);
+
+	return err;
+}
+
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+			      u8 paddr_num)
+{
+	u32 tmp0, tmp1;
+
+	tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+	tmp1 = (u32)(adr[4] | adr[5] << 8);
+
+	if (paddr_num == 0) {
+		iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
+		iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
+	} else {
+		iowrite32be(tmp0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
+		iowrite32be(tmp1, &regs->mac_addr[paddr_num - 1].mac_addr_u);
+	}
+}
+
+static int reset(struct memac_regs __iomem *regs)
+{
+	u32 tmp;
+	int count;
+
+	tmp = ioread32be(&regs->command_config);
+
+	tmp |= CMD_CFG_SW_RESET;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	count = 100;
+	do {
+		udelay(1);
+	} while ((ioread32be(&regs->command_config) & CMD_CFG_SW_RESET) &&
+		 --count);
+
+	if (count == 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static void set_exception(struct memac_regs __iomem *regs, u32 val,
+			  bool enable)
+{
+	u32 tmp;
+
+	tmp = ioread32be(&regs->imask);
+	if (enable)
+		tmp |= val;
+	else
+		tmp &= ~val;
+
+	iowrite32be(tmp, &regs->imask);
+}
+
+static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
+		phy_interface_t phy_if, u16 speed, bool slow_10g_if,
+		u32 exceptions)
+{
+	u32 tmp;
+
+	/* Config */
+	tmp = 0;
+	if (cfg->wan_mode_enable)
+		tmp |= CMD_CFG_WAN_MODE;
+	if (cfg->promiscuous_mode_enable)
+		tmp |= CMD_CFG_PROMIS_EN;
+	if (cfg->pause_forward_enable)
+		tmp |= CMD_CFG_PAUSE_FWD;
+	if (cfg->pause_ignore)
+		tmp |= CMD_CFG_PAUSE_IGNORE;
+	if (cfg->tx_addr_ins_enable)
+		tmp |= CMD_CFG_TX_ADDR_INS;
+	if (cfg->loopback_enable)
+		tmp |= CMD_CFG_LOOPBACK_EN;
+	if (cfg->cmd_frame_enable)
+		tmp |= CMD_CFG_CNT_FRM_EN;
+	if (cfg->send_idle_enable)
+		tmp |= CMD_CFG_SEND_IDLE;
+	if (cfg->no_length_check_enable)
+		tmp |= CMD_CFG_NO_LEN_CHK;
+	if (cfg->rx_sfd_any)
+		tmp |= CMD_CFG_SFD_ANY;
+	if (cfg->pad_enable)
+		tmp |= CMD_CFG_TX_PAD_EN;
+	if (cfg->wake_on_lan)
+		tmp |= CMD_CFG_MG;
+
+	tmp |= CMD_CFG_CRC_FWD;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	/* Max Frame Length */
+	iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+
+	/* Pause Time */
+	iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
+	iowrite32be((u32)0, &regs->pause_thresh[0]);
+
+	/* IF_MODE */
+	tmp = 0;
+	switch (phy_if) {
+	case PHY_INTERFACE_MODE_XGMII:
+		tmp |= IF_MODE_XGMII;
+		break;
+	default:
+		tmp |= IF_MODE_GMII;
+		if (phy_if == PHY_INTERFACE_MODE_RGMII && !cfg->loopback_enable)
+			tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
+	}
+	iowrite32be(tmp, &regs->if_mode);
+
+	/* TX_FIFO_SECTIONS */
+	tmp = 0;
+	if (phy_if == PHY_INTERFACE_MODE_XGMII) {
+		if (slow_10g_if) {
+			tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
+				TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+		} else {
+			tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
+				TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
+		}
+	} else {
+		tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
+			TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
+	}
+	iowrite32be(tmp, &regs->tx_fifo_sections);
+
+	/* clear all pending events and set-up interrupts */
+	iowrite32be(0xffffffff, &regs->ievent);
+	set_exception(regs, exceptions, true);
+
+	return 0;
+}
+
+static void set_dflts(struct memac_cfg *cfg)
+{
+	cfg->reset_on_init = false;
+	cfg->wan_mode_enable = false;
+	cfg->promiscuous_mode_enable = false;
+	cfg->pause_forward_enable = false;
+	cfg->pause_ignore = false;
+	cfg->tx_addr_ins_enable = false;
+	cfg->loopback_enable = false;
+	cfg->cmd_frame_enable = false;
+	cfg->rx_error_discard = false;
+	cfg->send_idle_enable = false;
+	cfg->no_length_check_enable = true;
+	cfg->lgth_check_nostdr = false;
+	cfg->time_stamp_enable = false;
+	cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+	cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
+	cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
+	cfg->pad_enable = true;
+	cfg->phy_tx_ena_on = false;
+	cfg->rx_sfd_any = false;
+	cfg->rx_pbl_fwd = false;
+	cfg->tx_pbl_fwd = false;
+	cfg->debug_mode = false;
+	cfg->wake_on_lan = false;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+	u64 mask1, mask2;
+	u32 xor_val = 0;
+	u8 i, j;
+
+	for (i = 0; i < 6; i++) {
+		mask1 = eth_addr & (u64)0x01;
+		eth_addr >>= 1;
+
+		for (j = 0; j < 7; j++) {
+			mask2 = eth_addr & (u64)0x01;
+			mask1 ^= mask2;
+			eth_addr >>= 1;
+		}
+
+		xor_val |= (mask1 << (5 - i));
+	}
+
+	return xor_val;
+}
+
+static void setup_sgmii_internal_phy(struct fman_mac *memac, u8 phy_addr,
+				     struct fixed_phy_status *fixed_link)
+{
+	u16 tmp_reg16, speed;
+
+	/* In case the higher MACs are used (i.e. the MACs that should
+	 * support 10G), speed=10000 is provided for SGMII ports.
+	 * Temporary modify enet mode to 1G one, so MII functions can
+	 * work correctly.
+	 */
+	speed = memac->max_speed;
+	memac->max_speed = SPEED_1000;
+
+	/* SGMII mode */
+	tmp_reg16 = PHY_SGMII_IF_MODE_SGMII;
+	if (!fixed_link)
+		/* AN enable */
+		tmp_reg16 |= PHY_SGMII_IF_MODE_AN;
+	else {
+#ifndef __rtems__
+		switch (fixed_link->speed) {
+		case 10:
+			tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_10M;
+		break;
+		case 100:
+			tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_100M;
+		break;
+		case 1000: /* fallthrough */
+		default:
+			tmp_reg16 |= PHY_SGMII_IF_MODE_SPEED_GB;
+		break;
+		}
+		if (fixed_link->duplex)
+			tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_FULL;
+		else
+			tmp_reg16 |= PHY_SGMII_IF_MODE_DUPLEX_HALF;
+#endif /* __rtems__ */
+	}
+	mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+
+	/* Device ability according to SGMII specification */
+	tmp_reg16 = PHY_SGMII_DEV_ABILITY_SGMII;
+	mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+
+	/* Adjust link timer for SGMII  -
+	 * According to Cisco SGMII specification the timer should be 1.6 ms.
+	 * The link_timer register is configured in units of the clock.
+	 * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+	 * unit = 1 / (125*10^6 Hz) = 8 ns.
+	 * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
+	 * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+	 * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+	 * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
+	 * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+	 * we always set up here a value of 2.5 SGMII.
+	 */
+	mii_write_phy_reg(memac, phy_addr, 0x13, 0x0007);
+	mii_write_phy_reg(memac, phy_addr, 0x12, 0xa120);
+
+	if (!fixed_link)
+		/* Restart AN */
+		tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+	else
+		/* AN disabled */
+		tmp_reg16 = PHY_SGMII_CR_DEF_VAL & ~PHY_SGMII_CR_AN_ENABLE;
+	mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
+
+	/* Restore original speed */
+	memac->max_speed = speed;
+}
+
+static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac, u8 phy_addr)
+{
+	u16 tmp_reg16, speed;
+
+	/* In case the higher MACs are used (i.e. the MACs that
+	 * should support 10G), speed=10000 is provided for SGMII ports.
+	 * Temporary modify enet mode to 1G one, so MII functions can
+	 * work correctly.
+	 */
+	speed = memac->max_speed;
+	memac->max_speed = SPEED_1000;
+
+	/* 1000BaseX mode */
+	tmp_reg16 = PHY_SGMII_IF_MODE_1000X;
+	mii_write_phy_reg(memac, phy_addr, 0x14, tmp_reg16);
+
+	/* AN Device capability  */
+	tmp_reg16 = PHY_SGMII_DEV_ABILITY_1000X;
+	mii_write_phy_reg(memac, phy_addr, 0x4, tmp_reg16);
+
+	/* Adjust link timer for SGMII  -
+	 * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
+	 * The link_timer register is configured in units of the clock.
+	 * - When running as 1G SGMII, Serdes clock is 125 MHz, so
+	 * unit = 1 / (125*10^6 Hz) = 8 ns.
+	 * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
+	 * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
+	 * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
+	 * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
+	 * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
+	 * we always set up here a value of 2.5 SGMII.
+	 */
+	mii_write_phy_reg(memac, phy_addr, 0x13, 0x002f);
+	mii_write_phy_reg(memac, phy_addr, 0x12, 0xaf08);
+
+	/* Restart AN */
+	tmp_reg16 = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
+	mii_write_phy_reg(memac, phy_addr, 0x0, tmp_reg16);
+
+	/* Restore original speed */
+	memac->max_speed = speed;
+}
+
+static int check_init_parameters(struct fman_mac *memac)
+{
+	if (memac->addr == 0) {
+		pr_err("Ethernet MAC must have a valid MAC address\n");
+		return -EINVAL;
+	}
+	if (!memac->exception_cb) {
+		pr_err("Uninitialized exception handler\n");
+		return -EINVAL;
+	}
+	if (!memac->event_cb) {
+		pr_warn("Uninitialize event handler\n");
+		return -EINVAL;
+	}
+
+	/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+	if (!memac->memac_drv_param->no_length_check_enable) {
+		pr_err("Length Check!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+	u32 bit_mask;
+
+	switch (exception) {
+	case FM_MAC_EX_10G_TX_ECC_ER:
+		bit_mask = MEMAC_IMASK_TECC_ER;
+		break;
+	case FM_MAC_EX_10G_RX_ECC_ER:
+		bit_mask = MEMAC_IMASK_RECC_ER;
+		break;
+	case FM_MAC_EX_TS_FIFO_ECC_ERR:
+		bit_mask = MEMAC_IMASK_TSECC_ER;
+		break;
+	case FM_MAC_EX_MAGIC_PACKET_INDICATION:
+		bit_mask = MEMAC_IMASK_MGI;
+		break;
+	default:
+		bit_mask = 0;
+		break;
+	}
+
+	return bit_mask;
+}
+
+static void memac_err_exception(void *handle)
+{
+	struct fman_mac *memac = (struct fman_mac *)handle;
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 event, imask;
+
+	event = ioread32be(&regs->ievent);
+	imask = ioread32be(&regs->imask);
+
+	/* Imask include both error and notification/event bits.
+	 * Leaving only error bits enabled by imask.
+	 * The imask error bits are shifted by 16 bits offset from
+	 * their corresponding location in the ievent - hence the >> 16
+	 */
+	event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+	iowrite32be(event, &regs->ievent);
+
+	if (event & MEMAC_IEVNT_TS_ECC_ER)
+		memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
+	if (event & MEMAC_IEVNT_TX_ECC_ER)
+		memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+	if (event & MEMAC_IEVNT_RX_ECC_ER)
+		memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+}
+
+static void memac_exception(void *handle)
+{
+	struct fman_mac *memac = (struct fman_mac *)handle;
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 event, imask;
+
+	event = ioread32be(&regs->ievent);
+	imask = ioread32be(&regs->imask);
+
+	/* Imask include both error and notification/event bits.
+	 * Leaving only error bits enabled by imask.
+	 * The imask error bits are shifted by 16 bits offset from
+	 * their corresponding location in the ievent - hence the >> 16
+	 */
+	event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
+
+	iowrite32be(event, &regs->ievent);
+
+	if (event & MEMAC_IEVNT_MGI)
+		memac->exception_cb(memac->dev_id,
+				    FM_MAC_EX_MAGIC_PACKET_INDICATION);
+}
+
+static void free_init_resources(struct fman_mac *memac)
+{
+	fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+			     FMAN_INTR_TYPE_ERR);
+
+	fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+			     FMAN_INTR_TYPE_NORMAL);
+
+	/* release the driver's group hash table */
+	free_hash_table(memac->multicast_addr_hash);
+	memac->multicast_addr_hash = NULL;
+
+	/* release the driver's individual hash table */
+	free_hash_table(memac->unicast_addr_hash);
+	memac->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct memac_cfg *memac_drv_params)
+{
+	/* Checks if mEMAC driver parameters were initialized */
+	if (!memac_drv_params)
+		return true;
+
+	return false;
+}
+
+int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (mode & COMM_MODE_RX)
+		tmp |= CMD_CFG_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp |= CMD_CFG_TX_EN;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (mode & COMM_MODE_RX)
+		tmp &= ~CMD_CFG_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp &= ~CMD_CFG_TX_EN;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (new_val)
+		tmp |= CMD_CFG_PROMIS_EN;
+	else
+		tmp &= ~CMD_CFG_PROMIS_EN;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int memac_adjust_link(struct fman_mac *memac, u16 speed)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->if_mode);
+
+	/* Set full duplex */
+	tmp &= ~IF_MODE_HD;
+
+	if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
+		/* Configure RGMII in manual mode */
+		tmp &= ~IF_MODE_RGMII_AUTO;
+		tmp &= ~IF_MODE_RGMII_SP_MASK;
+		/* Full duplex */
+		tmp |= IF_MODE_RGMII_FD;
+
+		switch (speed) {
+		case SPEED_1000:
+			tmp |= IF_MODE_RGMII_1000;
+			break;
+		case SPEED_100:
+			tmp |= IF_MODE_RGMII_100;
+			break;
+		case SPEED_10:
+			tmp |= IF_MODE_RGMII_10;
+			break;
+		default:
+			break;
+		}
+	}
+
+	iowrite32be(tmp, &regs->if_mode);
+
+	return 0;
+}
+
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+{
+	if (is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	memac->memac_drv_param->max_frame_length = new_val;
+
+	return 0;
+}
+
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
+{
+	if (is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	memac->memac_drv_param->reset_on_init = enable;
+
+	return 0;
+}
+
+#ifndef __rtems__
+int memac_cfg_fixed_link(struct fman_mac *memac,
+			 struct fixed_phy_status *fixed_link)
+{
+	if (is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	memac->memac_drv_param->fixed_link = fixed_link;
+
+	return 0;
+}
+#endif /* __rtems__ */
+
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+			      u16 pause_time, u16 thresh_time)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->tx_fifo_sections);
+
+	GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+	iowrite32be(tmp, &regs->tx_fifo_sections);
+
+	tmp = ioread32be(&regs->command_config);
+	tmp &= ~CMD_CFG_PFC_MODE;
+	priority = 0;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+	if (priority % 2)
+		tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+	else
+		tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+	tmp |= ((u32)pause_time << (16 * (priority % 2)));
+	iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+	tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+	if (priority % 2)
+		tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+	else
+		tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+	tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+	iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
+	return 0;
+}
+
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	u32 tmp;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (en)
+		tmp &= ~CMD_CFG_PAUSE_IGNORE;
+	else
+		tmp |= CMD_CFG_PAUSE_IGNORE;
+
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+{
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+
+	return 0;
+}
+
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	struct eth_hash_entry *hash_entry;
+	u32 hash;
+	u64 addr;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+	if (!(addr & GROUP_ADDRESS)) {
+		/* Unicast addresses not supported in hash */
+		pr_err("Unicast Address\n");
+		return -EINVAL;
+	}
+	hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+	/* Create element to be added to the driver hash table */
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	if (!hash_entry)
+		return -ENOMEM;
+	hash_entry->addr = addr;
+	INIT_LIST_HEAD(&hash_entry->node);
+
+	list_add_tail(&hash_entry->node,
+		      &memac->multicast_addr_hash->lsts[hash]);
+	iowrite32be(hash | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+	return 0;
+}
+
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+{
+	struct memac_regs __iomem *regs = memac->regs;
+	struct eth_hash_entry *hash_entry = NULL;
+	struct list_head *pos;
+	u32 hash;
+	u64 addr;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+	hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+	list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
+		hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+		if (hash_entry->addr == addr) {
+			list_del_init(&hash_entry->node);
+			kfree(hash_entry);
+			break;
+		}
+	}
+	if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
+		iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+	return 0;
+}
+
+int memac_set_exception(struct fman_mac *memac,
+			enum fman_mac_exceptions exception, bool enable)
+{
+	u32 bit_mask = 0;
+
+	if (!is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	bit_mask = get_exception_flag(exception);
+	if (bit_mask) {
+		if (enable)
+			memac->exceptions |= bit_mask;
+		else
+			memac->exceptions &= ~bit_mask;
+	} else {
+		pr_err("Undefined exception\n");
+		return -EINVAL;
+	}
+	set_exception(memac->regs, bit_mask, enable);
+
+	return 0;
+}
+
+int memac_init(struct fman_mac *memac)
+{
+	struct memac_cfg *memac_drv_param;
+	u8 i, phy_addr;
+	enet_addr_t eth_addr;
+	bool slow_10g_if = false;
+	struct fixed_phy_status *fixed_link;
+	int err;
+	u32 reg32 = 0;
+
+	if (is_init_done(memac->memac_drv_param))
+		return -EINVAL;
+
+	err = check_init_parameters(memac);
+	if (err)
+		return err;
+
+	memac_drv_param = memac->memac_drv_param;
+
+	if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
+		slow_10g_if = true;
+
+	/* First, reset the MAC if desired. */
+	if (memac_drv_param->reset_on_init) {
+		err = reset(memac->regs);
+		if (err) {
+			pr_err("mEMAC reset failed\n");
+			return err;
+		}
+	}
+
+	/* MAC Address */
+	MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+	add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+
+	fixed_link = memac_drv_param->fixed_link;
+
+	init(memac->regs, memac->memac_drv_param, memac->phy_if,
+	     memac->max_speed, slow_10g_if, memac->exceptions);
+
+	/* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
+	 * Exists only in FMan 6.0 and 6.3.
+	 */
+	if ((memac->fm_rev_info.major == 6) &&
+	    ((memac->fm_rev_info.minor == 0) ||
+	    (memac->fm_rev_info.minor == 3))) {
+		/* MAC strips CRC from received frames - this workaround
+		 * should decrease the likelihood of bug appearance
+		 */
+		reg32 = in_be32(&memac->regs->command_config);
+		reg32 &= ~CMD_CFG_CRC_FWD;
+		out_be32(&memac->regs->command_config, reg32);
+	}
+
+	if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+		/* Configure internal SGMII PHY */
+		if (memac->basex_if)
+			setup_sgmii_internal_phy_base_x(memac, PHY_MDIO_ADDR);
+		else
+			setup_sgmii_internal_phy(memac, PHY_MDIO_ADDR,
+						 fixed_link);
+	} else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
+		/* Configure 4 internal SGMII PHYs */
+		for (i = 0; i < 4; i++) {
+			/* QSGMII PHY address occupies 3 upper bits of 5-bit
+			 * phy_address; the lower 2 bits are used to extend
+			 * register address space and access each one of 4
+			 * ports inside QSGMII.
+			 */
+			phy_addr = (u8)((PHY_MDIO_ADDR << 2) | i);
+			if (memac->basex_if)
+				setup_sgmii_internal_phy_base_x(memac,
+								phy_addr);
+			else
+				setup_sgmii_internal_phy(memac, phy_addr,
+							 fixed_link);
+		}
+	}
+
+	/* Max Frame Length */
+	err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
+				     memac_drv_param->max_frame_length);
+	if (err) {
+		pr_err("settings Mac max frame length is FAILED\n");
+		return err;
+	}
+
+	memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+	if (!memac->multicast_addr_hash) {
+		free_init_resources(memac);
+		pr_err("allocation hash table is FAILED\n");
+		return -ENOMEM;
+	}
+
+	memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
+	if (!memac->unicast_addr_hash) {
+		free_init_resources(memac);
+		pr_err("allocation hash table is FAILED\n");
+		return -ENOMEM;
+	}
+
+	fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+			   FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
+
+	fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
+			   FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
+
+	kfree(memac_drv_param);
+	memac->memac_drv_param = NULL;
+
+	return 0;
+}
+
+int memac_free(struct fman_mac *memac)
+{
+	free_init_resources(memac);
+
+	kfree(memac->memac_drv_param);
+	kfree(memac);
+
+	return 0;
+}
+
+struct fman_mac *memac_config(struct fman_mac_params *params)
+{
+	struct fman_mac *memac;
+	struct memac_cfg *memac_drv_param;
+	void __iomem *base_addr;
+
+	base_addr = params->base_addr;
+	/* allocate memory for the m_emac data structure */
+	memac = kzalloc(sizeof(*memac), GFP_KERNEL);
+	if (!memac)
+		return NULL;
+
+	/* allocate memory for the m_emac driver parameters data structure */
+	memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
+	if (!memac_drv_param) {
+		memac_free(memac);
+		return NULL;
+	}
+
+	/* Plant parameter structure pointer */
+	memac->memac_drv_param = memac_drv_param;
+
+	set_dflts(memac_drv_param);
+
+	memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+
+	memac->regs = (struct memac_regs __iomem *)(base_addr);
+	memac->mii_regs = (struct memac_mii_regs __iomem *)
+		(base_addr + MEMAC_TO_MII_OFFSET);
+	memac->max_speed = params->max_speed;
+	memac->phy_if = params->phy_if;
+	memac->mac_id = params->mac_id;
+	memac->exceptions = MEMAC_DEFAULT_EXCEPTIONS;
+	memac->exception_cb = params->exception_cb;
+	memac->event_cb = params->event_cb;
+	memac->dev_id = params->dev_id;
+	memac->fm = params->fm;
+	memac->basex_if = params->basex_if;
+
+	/* Save FMan revision */
+	fman_get_revision(memac->fm, &memac->fm_rev_info);
+
+	return memac;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_memac.h b/linux/drivers/net/ethernet/freescale/fman/fman_memac.h
new file mode 100644
index 0000000..ae01dd0
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEMAC_H
+#define __MEMAC_H
+
+#include "fman_mac.h"
+
+#include <linux/netdevice.h>
+
+struct fman_mac *memac_config(struct fman_mac_params *params);
+int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
+int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_adjust_link(struct fman_mac *memac, u16 speed);
+int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
+int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
+#ifndef __rtems__
+int memac_cfg_fixed_link(struct fman_mac *memac,
+			 struct fixed_phy_status *fixed_link);
+#endif /* __rtems__ */
+int memac_enable(struct fman_mac *memac, enum comm_mode mode);
+int memac_disable(struct fman_mac *memac, enum comm_mode mode);
+int memac_init(struct fman_mac *memac);
+int memac_free(struct fman_mac *memac);
+int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
+int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+			      u16 pause_time, u16 thresh_time);
+int memac_set_exception(struct fman_mac *memac,
+			enum fman_mac_exceptions exception, bool enable);
+int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+
+#endif /* __MEMAC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.c b/linux/drivers/net/ethernet/freescale/fman/fman_muram.c
new file mode 100644
index 0000000..9762d72
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -0,0 +1,124 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_muram.h"
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+
+struct muram_info {
+	struct gen_pool *pool;
+	void __iomem *vbase;
+	size_t size;
+	phys_addr_t pbase;
+};
+
+static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
+						unsigned long vaddr)
+{
+	return vaddr - (unsigned long)muram->vbase;
+}
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+	struct muram_info *muram;
+	void __iomem *vaddr;
+	int ret;
+
+	muram = kzalloc(sizeof(*muram), GFP_KERNEL);
+	if (!muram)
+		return NULL;
+
+	muram->pool = gen_pool_create(ilog2(64), -1);
+	if (!muram->pool) {
+		pr_err("%s(): MURAM pool create failed\n", __func__);
+		goto  muram_free;
+	}
+
+	vaddr = ioremap(base, size);
+	if (!vaddr) {
+		pr_err("%s(): MURAM ioremap failed\n", __func__);
+		goto pool_destroy;
+	}
+
+	ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
+				base, size, -1);
+	if (ret < 0) {
+		pr_err("%s(): MURAM pool add failed\n", __func__);
+		iounmap(vaddr);
+		goto pool_destroy;
+	}
+
+	memset_io(vaddr, 0, (int)size);
+
+	muram->vbase = vaddr;
+	muram->pbase = base;
+	return muram;
+
+pool_destroy:
+	gen_pool_destroy(muram->pool);
+muram_free:
+	kfree(muram);
+	return NULL;
+}
+
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+					 unsigned long offset)
+{
+	return offset + (unsigned long)muram->vbase;
+}
+
+int fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+	unsigned long vaddr;
+
+	vaddr = gen_pool_alloc(muram->pool, size);
+	if (!vaddr)
+		return -ENOMEM;
+
+	memset_io((void __iomem *)vaddr, 0, size);
+
+	return fman_muram_vbase_to_offset(muram, vaddr);
+}
+
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+{
+	unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
+
+	gen_pool_free(muram->pool, addr, size);
+}
+
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_muram.h b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
new file mode 100644
index 0000000..c715795
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FM_MURAM_EXT
+#define __FM_MURAM_EXT
+
+#include <linux/types.h>
+
+#define FM_MURAM_INVALID_ALLOCATION	-1
+
+/* Structure for FM MURAM information */
+struct muram_info;
+
+/**
+ * fman_muram_init
+ * @base:	Pointer to base of memory mapped FM-MURAM.
+ * @size:	Size of the FM-MURAM partition.
+ *
+ * Creates partition in the MURAM.
+ * The routine returns a pointer to the MURAM partition.
+ * This pointer must be passed as to all other FM-MURAM function calls.
+ * No actual initialization or configuration of FM_MURAM hardware is done by
+ * this routine.
+ *
+ * Return: pointer to FM-MURAM object, or NULL for Failure.
+ */
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
+
+/**
+ * fman_muram_offset_to_vbase
+ * @muram:	FM-MURAM module pointer.
+ * @offset:	the offset of the memory block
+ *
+ * Gives the address of the memory region from specific offset
+ *
+ * Return: The address of the memory block
+ */
+unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
+					 unsigned long offset);
+
+/**
+ * fman_muram_alloc
+ * @muram:	FM-MURAM module pointer.
+ * @size:	Size of the memory to be allocated.
+ *
+ * Allocate some memory from FM-MURAM partition.
+ *
+ * Return: address of the allocated memory; NULL otherwise.
+ */
+int fman_muram_alloc(struct muram_info *muram, size_t size);
+
+/**
+ * fman_muram_free_mem
+ * muram:	FM-MURAM module pointer.
+ * offset:	offset of the memory region to be freed.
+ * size:	size of the memory to be freed.
+ *
+ * Free an allocated memory from FM-MURAM partition.
+ */
+void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+
+#endif /* __FM_MURAM_EXT */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.c b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
new file mode 100644
index 0000000..e42ac1c
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -0,0 +1,1827 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_sp.h"
+
+#include <asm/mpc85xx.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+/* Queue ID */
+#define DFLT_FQ_ID		0x00FFFFFF
+
+/* General defines */
+#define PORT_BMI_FIFO_UNITS		0x100
+
+#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)	\
+	min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
+
+#define PORT_CG_MAP_NUM			8
+#define PORT_PRS_RESULT_WORDS_NUM	8
+#define PORT_IC_OFFSET_UNITS		0x10
+
+#define MIN_EXT_BUF_SIZE		64
+
+#define BMI_PORT_REGS_OFFSET				0
+#define QMI_PORT_REGS_OFFSET				0x400
+
+/* Default values */
+#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN		\
+	DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
+
+#define DFLT_PORT_CUT_BYTES_FROM_END		4
+
+#define DFLT_PORT_ERRORS_TO_DISCARD		FM_PORT_FRM_ERR_CLS_DISCARD
+#define DFLT_PORT_MAX_FRAME_LENGTH		9600
+
+#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size)	\
+	MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
+
+#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size)	\
+	(major == 6 ?						\
+	MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) :		\
+	(MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4))	\
+
+#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS		0
+
+/* QMI defines */
+#define QMI_DEQ_CFG_SUBPORTAL_MASK		0x1f
+
+#define QMI_PORT_CFG_EN				0x80000000
+#define QMI_PORT_STATUS_DEQ_FD_BSY		0x20000000
+
+#define QMI_DEQ_CFG_PRI				0x80000000
+#define QMI_DEQ_CFG_TYPE1			0x10000000
+#define QMI_DEQ_CFG_TYPE2			0x20000000
+#define QMI_DEQ_CFG_TYPE3			0x30000000
+#define QMI_DEQ_CFG_PREFETCH_PARTIAL		0x01000000
+#define QMI_DEQ_CFG_PREFETCH_FULL		0x03000000
+#define QMI_DEQ_CFG_SP_MASK			0xf
+#define QMI_DEQ_CFG_SP_SHIFT			20
+
+#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type)	\
+	(_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
+
+/* BMI defins */
+#define BMI_EBD_EN				0x80000000
+
+#define BMI_PORT_CFG_EN				0x80000000
+#define BMI_PORT_CFG_FDOVR			0x02000000
+
+#define BMI_PORT_STATUS_BSY			0x80000000
+
+#define BMI_DMA_ATTR_SWP_SHIFT			FMAN_SP_DMA_ATTR_SWP_SHIFT
+#define BMI_DMA_ATTR_IC_STASH_ON		0x10000000
+#define BMI_DMA_ATTR_HDR_STASH_ON		0x04000000
+#define BMI_DMA_ATTR_SG_STASH_ON		0x01000000
+#define BMI_DMA_ATTR_WRITE_OPTIMIZE		FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
+
+#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT	16
+#define BMI_RX_FIFO_THRESHOLD_ETHE		0x80000000
+
+#define BMI_FRAME_END_CS_IGNORE_SHIFT		24
+#define BMI_FRAME_END_CS_IGNORE_MASK		0x0000001f
+
+#define BMI_RX_FRAME_END_CUT_SHIFT		16
+#define BMI_RX_FRAME_END_CUT_MASK		0x0000001f
+
+#define BMI_IC_TO_EXT_SHIFT			FMAN_SP_IC_TO_EXT_SHIFT
+#define BMI_IC_TO_EXT_MASK			0x0000001f
+#define BMI_IC_FROM_INT_SHIFT			FMAN_SP_IC_FROM_INT_SHIFT
+#define BMI_IC_FROM_INT_MASK			0x0000000f
+#define BMI_IC_SIZE_MASK			0x0000001f
+
+#define BMI_INT_BUF_MARG_SHIFT			28
+#define BMI_INT_BUF_MARG_MASK			0x0000000f
+#define BMI_EXT_BUF_MARG_START_SHIFT		FMAN_SP_EXT_BUF_MARG_START_SHIFT
+#define BMI_EXT_BUF_MARG_START_MASK		0x000001ff
+#define BMI_EXT_BUF_MARG_END_MASK		0x000001ff
+
+#define BMI_CMD_MR_LEAC				0x00200000
+#define BMI_CMD_MR_SLEAC			0x00100000
+#define BMI_CMD_MR_MA				0x00080000
+#define BMI_CMD_MR_DEAS				0x00040000
+#define BMI_CMD_RX_MR_DEF			(BMI_CMD_MR_LEAC | \
+						BMI_CMD_MR_SLEAC | \
+						BMI_CMD_MR_MA | \
+						BMI_CMD_MR_DEAS)
+#define BMI_CMD_TX_MR_DEF			0
+
+#define BMI_CMD_ATTR_ORDER			0x80000000
+#define BMI_CMD_ATTR_SYNC			0x02000000
+#define BMI_CMD_ATTR_COLOR_SHIFT		26
+
+#define BMI_FIFO_PIPELINE_DEPTH_SHIFT		12
+#define BMI_FIFO_PIPELINE_DEPTH_MASK		0x0000000f
+#define BMI_NEXT_ENG_FD_BITS_SHIFT		24
+
+#define BMI_EXT_BUF_POOL_VALID			FMAN_SP_EXT_BUF_POOL_VALID
+#define BMI_EXT_BUF_POOL_EN_COUNTER		FMAN_SP_EXT_BUF_POOL_EN_COUNTER
+#define BMI_EXT_BUF_POOL_BACKUP		FMAN_SP_EXT_BUF_POOL_BACKUP
+#define BMI_EXT_BUF_POOL_ID_SHIFT		16
+#define BMI_EXT_BUF_POOL_ID_MASK		0x003F0000
+#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT	16
+
+#define BMI_TX_FIFO_MIN_FILL_SHIFT		16
+
+#define BMI_SG_DISABLE				FMAN_SP_SG_DISABLE
+
+#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+#define BMI_FIFO_THRESHOLD	      ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
+
+#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed)		\
+	((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
+
+#define BMI_PORT_RFNE_FRWD_RPD                  0x40000000
+
+#define RX_ERRS_TO_ENQ				  \
+	(FM_PORT_FRM_ERR_DMA			| \
+	FM_PORT_FRM_ERR_PHYSICAL		| \
+	FM_PORT_FRM_ERR_SIZE			| \
+	FM_PORT_FRM_ERR_EXTRACTION		| \
+	FM_PORT_FRM_ERR_NO_SCHEME		| \
+	FM_PORT_FRM_ERR_PRS_TIMEOUT		| \
+	FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT	| \
+	FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED	| \
+	FM_PORT_FRM_ERR_PRS_HDR_ERR		| \
+	FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW	| \
+	FM_PORT_FRM_ERR_IPRE)
+
+/* NIA defines */
+#define NIA_ORDER_RESTOR				0x00800000
+#define NIA_ENG_FM_CTL					0x00000000
+#define NIA_ENG_BMI					0x00500000
+#define NIA_ENG_QMI_ENQ					0x00540000
+#define NIA_ENG_QMI_DEQ					0x00580000
+
+#define NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME	0x00000028
+#define NIA_BMI_AC_ENQ_FRAME				0x00000002
+#define NIA_BMI_AC_TX_RELEASE				0x000002C0
+#define NIA_BMI_AC_RELEASE				0x000000C0
+#define NIA_BMI_AC_TX					0x00000274
+#define NIA_BMI_AC_FETCH_ALL_FRAME			0x0000020c
+
+/* Port IDs */
+#define TX_10G_PORT_BASE		0x30
+#define RX_10G_PORT_BASE		0x10
+
+/* BMI Rx port register map */
+struct fman_port_rx_bmi_regs {
+	u32 fmbm_rcfg;		/* Rx Configuration */
+	u32 fmbm_rst;		/* Rx Status */
+	u32 fmbm_rda;		/* Rx DMA attributes */
+	u32 fmbm_rfp;		/* Rx FIFO Parameters */
+	u32 fmbm_rfed;		/* Rx Frame End Data */
+	u32 fmbm_ricp;		/* Rx Internal Context Parameters */
+	u32 fmbm_rim;		/* Rx Internal Buffer Margins */
+	u32 fmbm_rebm;		/* Rx External Buffer Margins */
+	u32 fmbm_rfne;		/* Rx Frame Next Engine */
+	u32 fmbm_rfca;		/* Rx Frame Command Attributes. */
+	u32 fmbm_rfpne;		/* Rx Frame Parser Next Engine */
+	u32 fmbm_rpso;		/* Rx Parse Start Offset */
+	u32 fmbm_rpp;		/* Rx Policer Profile  */
+	u32 fmbm_rccb;		/* Rx Coarse Classification Base */
+	u32 fmbm_reth;		/* Rx Excessive Threshold */
+	u32 reserved003c[1];	/* (0x03C 0x03F) */
+	u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
+	/* Rx Parse Results Array Init */
+	u32 fmbm_rfqid;		/* Rx Frame Queue ID */
+	u32 fmbm_refqid;	/* Rx Error Frame Queue ID */
+	u32 fmbm_rfsdm;		/* Rx Frame Status Discard Mask */
+	u32 fmbm_rfsem;		/* Rx Frame Status Error Mask */
+	u32 fmbm_rfene;		/* Rx Frame Enqueue Next Engine */
+	u32 reserved0074[0x2];	/* (0x074-0x07C)  */
+	u32 fmbm_rcmne;		/* Rx Frame Continuous Mode Next Engine */
+	u32 reserved0080[0x20];	/* (0x080 0x0FF)  */
+	u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
+	/* Buffer Manager pool Information- */
+	u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];	/* Allocate Counter- */
+	u32 reserved0130[8];	/* 0x130/0x140 - 0x15F reserved - */
+	u32 fmbm_rcgm[PORT_CG_MAP_NUM];	/* Congestion Group Map */
+	u32 fmbm_mpd;		/* BM Pool Depletion  */
+	u32 reserved0184[0x1F];	/* (0x184 0x1FF) */
+	u32 fmbm_rstc;		/* Rx Statistics Counters */
+	u32 fmbm_rfrc;		/* Rx Frame Counter */
+	u32 fmbm_rfbc;		/* Rx Bad Frames Counter */
+	u32 fmbm_rlfc;		/* Rx Large Frames Counter */
+	u32 fmbm_rffc;		/* Rx Filter Frames Counter */
+	u32 fmbm_rfdc;		/* Rx Frame Discard Counter */
+	u32 fmbm_rfldec;		/* Rx Frames List DMA Error Counter */
+	u32 fmbm_rodc;		/* Rx Out of Buffers Discard nntr */
+	u32 fmbm_rbdc;		/* Rx Buffers Deallocate Counter */
+	u32 fmbm_rpec;		/* RX Prepare to enqueue Counte */
+	u32 reserved0224[0x16];	/* (0x224 0x27F) */
+	u32 fmbm_rpc;		/* Rx Performance Counters */
+	u32 fmbm_rpcp;		/* Rx Performance Count Parameters */
+	u32 fmbm_rccn;		/* Rx Cycle Counter */
+	u32 fmbm_rtuc;		/* Rx Tasks Utilization Counter */
+	u32 fmbm_rrquc;		/* Rx Receive Queue Utilization cntr */
+	u32 fmbm_rduc;		/* Rx DMA Utilization Counter */
+	u32 fmbm_rfuc;		/* Rx FIFO Utilization Counter */
+	u32 fmbm_rpac;		/* Rx Pause Activation Counter */
+	u32 reserved02a0[0x18];	/* (0x2A0 0x2FF) */
+	u32 fmbm_rdcfg[0x3];	/* Rx Debug Configuration */
+	u32 fmbm_rgpr;		/* Rx General Purpose Register */
+	u32 reserved0310[0x3a];
+};
+
+/* BMI Tx port register map */
+struct fman_port_tx_bmi_regs {
+	u32 fmbm_tcfg;		/* Tx Configuration */
+	u32 fmbm_tst;		/* Tx Status */
+	u32 fmbm_tda;		/* Tx DMA attributes */
+	u32 fmbm_tfp;		/* Tx FIFO Parameters */
+	u32 fmbm_tfed;		/* Tx Frame End Data */
+	u32 fmbm_ticp;		/* Tx Internal Context Parameters */
+	u32 fmbm_tfdne;		/* Tx Frame Dequeue Next Engine. */
+	u32 fmbm_tfca;		/* Tx Frame Command attribute. */
+	u32 fmbm_tcfqid;	/* Tx Confirmation Frame Queue ID. */
+	u32 fmbm_tefqid;	/* Tx Frame Error Queue ID */
+	u32 fmbm_tfene;		/* Tx Frame Enqueue Next Engine */
+	u32 fmbm_trlmts;	/* Tx Rate Limiter Scale */
+	u32 fmbm_trlmt;		/* Tx Rate Limiter */
+	u32 reserved0034[0x0e];	/* (0x034-0x6c) */
+	u32 fmbm_tccb;		/* Tx Coarse Classification base */
+	u32 fmbm_tfne;		/* Tx Frame Next Engine */
+	u32 fmbm_tpfcm[0x02];
+	/* Tx Priority based Flow Control (PFC) Mapping */
+	u32 fmbm_tcmne;		/* Tx Frame Continuous Mode Next Engine */
+	u32 reserved0080[0x60];	/* (0x080-0x200) */
+	u32 fmbm_tstc;		/* Tx Statistics Counters */
+	u32 fmbm_tfrc;		/* Tx Frame Counter */
+	u32 fmbm_tfdc;		/* Tx Frames Discard Counter */
+	u32 fmbm_tfledc;	/* Tx Frame len error discard cntr */
+	u32 fmbm_tfufdc;	/* Tx Frame unsprt frmt discard cntr */
+	u32 fmbm_tbdc;		/* Tx Buffers Deallocate Counter */
+	u32 reserved0218[0x1A];	/* (0x218-0x280) */
+	u32 fmbm_tpc;		/* Tx Performance Counters */
+	u32 fmbm_tpcp;		/* Tx Performance Count Parameters */
+	u32 fmbm_tccn;		/* Tx Cycle Counter */
+	u32 fmbm_ttuc;		/* Tx Tasks Utilization Counter */
+	u32 fmbm_ttcquc;	/* Tx Transmit conf Q util Counter */
+	u32 fmbm_tduc;		/* Tx DMA Utilization Counter */
+	u32 fmbm_tfuc;		/* Tx FIFO Utilization Counter */
+	u32 reserved029c[16];	/* (0x29C-0x2FF) */
+	u32 fmbm_tdcfg[0x3];	/* Tx Debug Configuration */
+	u32 fmbm_tgpr;		/* Tx General Purpose Register */
+	u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
+};
+
+/* BMI port register map */
+union fman_port_bmi_regs {
+	struct fman_port_rx_bmi_regs rx;
+	struct fman_port_tx_bmi_regs tx;
+};
+
+/* QMI port register map */
+struct fman_port_qmi_regs {
+	u32 fmqm_pnc;		/* PortID n Configuration Register */
+	u32 fmqm_pns;		/* PortID n Status Register */
+	u32 fmqm_pnts;		/* PortID n Task Status Register */
+	u32 reserved00c[4];	/* 0xn00C - 0xn01B */
+	u32 fmqm_pnen;		/* PortID n Enqueue NIA Register */
+	u32 fmqm_pnetfc;		/* PortID n Enq Total Frame Counter */
+	u32 reserved024[2];	/* 0xn024 - 0x02B */
+	u32 fmqm_pndn;		/* PortID n Dequeue NIA Register */
+	u32 fmqm_pndc;		/* PortID n Dequeue Config Register */
+	u32 fmqm_pndtfc;		/* PortID n Dequeue tot Frame cntr */
+	u32 fmqm_pndfdc;		/* PortID n Dequeue FQID Dflt Cntr */
+	u32 fmqm_pndcc;		/* PortID n Dequeue Confirm Counter */
+};
+
+/* QMI dequeue prefetch modes */
+enum fman_port_deq_prefetch {
+	FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
+	FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
+	FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
+};
+
+/* A structure for defining FM port resources */
+struct fman_port_rsrc {
+	u32 num; /* Committed required resource */
+	u32 extra; /* Extra (not committed) required resource */
+};
+
+enum fman_port_dma_swap {
+	FMAN_PORT_DMA_NO_SWAP,	/* No swap, transfer data as is */
+	FMAN_PORT_DMA_SWAP_LE,
+	/* The transferred data should be swapped in PPC Little Endian mode */
+	FMAN_PORT_DMA_SWAP_BE
+	/* The transferred data should be swapped in Big Endian mode */
+};
+
+/* Default port color */
+enum fman_port_color {
+	FMAN_PORT_COLOR_GREEN,	/* Default port color is green */
+	FMAN_PORT_COLOR_YELLOW,	/* Default port color is yellow */
+	FMAN_PORT_COLOR_RED,		/* Default port color is red */
+	FMAN_PORT_COLOR_OVERRIDE	/* Ignore color */
+};
+
+/* QMI dequeue from the SP channel - types */
+enum fman_port_deq_type {
+	FMAN_PORT_DEQ_BY_PRI,
+	/* Priority precedence and Intra-Class scheduling */
+	FMAN_PORT_DEQ_ACTIVE_FQ,
+	/* Active FQ precedence and Intra-Class scheduling */
+	FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
+	/* Active FQ precedence and override Intra-Class scheduling */
+};
+
+/* External buffer pools configuration */
+struct fman_port_bpools {
+	u8 count;			/* Num of pools to set up */
+	bool counters_enable;		/* Enable allocate counters */
+	u8 grp_bp_depleted_num;
+	/* Number of depleted pools - if reached the BMI indicates
+	 * the MAC to send a pause frame
+	 */
+	struct {
+		u8 bpid;		/* BM pool ID */
+		u16 size;
+		/* Pool's size - must be in ascending order */
+		bool is_backup;
+		/* If this is a backup pool */
+		bool grp_bp_depleted;
+		/* Consider this buffer in multiple pools depletion criteria */
+		bool single_bp_depleted;
+		/* Consider this buffer in single pool depletion criteria */
+	} bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+};
+
+struct fman_port_cfg {
+	u32 dflt_fqid;
+	u32 err_fqid;
+	u8 deq_sp;
+	bool deq_high_priority;
+	enum fman_port_deq_type deq_type;
+	enum fman_port_deq_prefetch deq_prefetch_option;
+	u16 deq_byte_cnt;
+	u8 cheksum_last_bytes_ignore;
+	u8 rx_cut_end_bytes;
+	struct fman_buf_pool_depletion buf_pool_depletion;
+	bool discard_override;
+	bool en_buf_pool_depletion;
+	struct fman_ext_pools ext_buf_pools;
+	u32 tx_fifo_min_level;
+	u32 tx_fifo_low_comf_level;
+	u32 rx_pri_elevation;
+	u32 rx_fifo_thr;
+	struct fman_sp_buf_margins buf_margins;
+	u32 int_buf_start_margin;
+	struct fman_sp_int_context_data_copy int_context;
+	u32 discard_mask;
+	u32 err_mask;
+	bool forward_reuse_int_context;
+	struct fman_buffer_prefix_content buffer_prefix_content;
+	bool dont_release_buf;
+	bool set_num_of_tasks;
+	bool set_num_of_open_dmas;
+	bool set_size_of_fifo;
+	bool bcb_workaround;
+
+	u8 rx_fd_bits;
+	u32 tx_fifo_deq_pipeline_depth;
+	bool errata_A006675;
+	bool errata_A006320;
+	bool excessive_threshold_register;
+	bool fmbm_rebm_has_sgd;
+	bool fmbm_tfne_has_features;
+	bool qmi_deq_options_support;
+
+	enum fman_port_dma_swap dma_swap_data;
+	bool dma_ic_stash_on;
+	bool dma_header_stash_on;
+	bool dma_sg_stash_on;
+	bool dma_write_optimize;
+	enum fman_port_color color;
+	bool sync_req;
+
+	bool no_scatter_gather;
+};
+
+struct fman_port_rx_pools_params {
+	u8 num_of_pools;
+	u16 second_largest_buf_size;
+	u16 largest_buf_size;
+};
+
+struct fman_port_dts_params {
+	void __iomem *base_addr;	/* FMan port virtual memory */
+	enum fman_port_type type;	/* Port type */
+	u16 speed;			/* Port speed */
+	u8 id;				/* HW Port Id */
+	u32 qman_channel_id;		/* QMan channel id (non RX only) */
+	struct fman *fman;		/* FMan Handle */
+};
+
+struct fman_port {
+	void *fm;
+	struct fman_rev_info rev_info;
+	u8 port_id;
+	enum fman_port_type port_type;
+	u16 port_speed;
+
+	union fman_port_bmi_regs __iomem *bmi_regs;
+	struct fman_port_qmi_regs __iomem *qmi_regs;
+
+	struct fman_sp_buffer_offsets buffer_offsets;
+
+	u8 internal_buf_offset;
+	struct fman_ext_pools ext_buf_pools;
+
+	u16 max_frame_length;
+	struct fman_port_rsrc open_dmas;
+	struct fman_port_rsrc tasks;
+	struct fman_port_rsrc fifo_bufs;
+	struct fman_port_rx_pools_params rx_pools_params;
+
+	struct fman_port_cfg *cfg;
+	struct fman_port_dts_params dts_params;
+
+	u8 ext_pools_num;
+	u32 max_port_fifo_size;
+	u32 max_num_of_ext_pools;
+	u32 max_num_of_sub_portals;
+	u32 bm_max_num_of_pools;
+};
+
+static int init_bmi_rx(struct fman_port *port)
+{
+	struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
+	struct fman_port_cfg *cfg = port->cfg;
+	u32 tmp;
+
+	/* Rx Configuration register */
+	tmp = 0;
+	if (cfg->discard_override)
+		tmp |= BMI_PORT_CFG_FDOVR;
+	iowrite32be(tmp, &regs->fmbm_rcfg);
+
+	/* DMA attributes */
+	tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+	if (cfg->dma_ic_stash_on)
+		tmp |= BMI_DMA_ATTR_IC_STASH_ON;
+	if (cfg->dma_header_stash_on)
+		tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
+	if (cfg->dma_sg_stash_on)
+		tmp |= BMI_DMA_ATTR_SG_STASH_ON;
+	if (cfg->dma_write_optimize)
+		tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
+	iowrite32be(tmp, &regs->fmbm_rda);
+
+	/* Rx FIFO parameters */
+	tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
+		BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
+	tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
+	iowrite32be(tmp, &regs->fmbm_rfp);
+
+	if (cfg->excessive_threshold_register)
+		/* always allow access to the extra resources */
+		iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
+
+	/* Frame end data */
+	tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+		BMI_FRAME_END_CS_IGNORE_SHIFT;
+	tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
+		BMI_RX_FRAME_END_CUT_SHIFT;
+	if (cfg->errata_A006320)
+		tmp &= 0xffe0ffff;
+	iowrite32be(tmp, &regs->fmbm_rfed);
+
+	/* Internal context parameters */
+	tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+	tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+	tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_SIZE_MASK;
+	iowrite32be(tmp, &regs->fmbm_ricp);
+
+	/* Internal buffer offset */
+	tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
+		BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
+	iowrite32be(tmp, &regs->fmbm_rim);
+
+	/* External buffer margins */
+	tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
+		BMI_EXT_BUF_MARG_START_SHIFT;
+	tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
+	if (cfg->fmbm_rebm_has_sgd && cfg->no_scatter_gather)
+		tmp |= BMI_SG_DISABLE;
+	iowrite32be(tmp, &regs->fmbm_rebm);
+
+	/* Frame attributes */
+	tmp = BMI_CMD_RX_MR_DEF;
+	tmp |= BMI_CMD_ATTR_ORDER;
+	tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+	if (cfg->sync_req)
+		tmp |= BMI_CMD_ATTR_SYNC;
+
+	iowrite32be(tmp, &regs->fmbm_rfca);
+
+	/* NIA */
+	tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
+
+	if (cfg->errata_A006675)
+		tmp |= NIA_ENG_FM_CTL |
+		       NIA_FM_CTL_AC_NO_IPACC_PRE_BMI_ENQ_FRAME;
+	else
+		tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
+	iowrite32be(tmp, &regs->fmbm_rfne);
+
+	/* Enqueue NIA */
+	iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
+
+	/* Default/error queues */
+	iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
+	iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
+
+	/* Discard/error masks */
+	iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
+	iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
+
+	return 0;
+}
+
+static int init_bmi_tx(struct fman_port *port)
+{
+	struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
+	struct fman_port_cfg *cfg = port->cfg;
+	u32 tmp;
+
+	/* Tx Configuration register */
+	tmp = 0;
+	iowrite32be(tmp, &regs->fmbm_tcfg);
+
+	/* DMA attributes */
+	tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
+	if (cfg->dma_ic_stash_on)
+		tmp |= BMI_DMA_ATTR_IC_STASH_ON;
+	if (cfg->dma_header_stash_on)
+		tmp |= BMI_DMA_ATTR_HDR_STASH_ON;
+	if (cfg->dma_sg_stash_on)
+		tmp |= BMI_DMA_ATTR_SG_STASH_ON;
+	iowrite32be(tmp, &regs->fmbm_tda);
+
+	/* Tx FIFO parameters */
+	tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
+		BMI_TX_FIFO_MIN_FILL_SHIFT;
+	tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
+		BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
+	tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
+	iowrite32be(tmp, &regs->fmbm_tfp);
+
+	/* Frame end data */
+	tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
+		BMI_FRAME_END_CS_IGNORE_SHIFT;
+	iowrite32be(tmp, &regs->fmbm_tfed);
+
+	/* Internal context parameters */
+	tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
+	tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
+	tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
+		BMI_IC_SIZE_MASK;
+	iowrite32be(tmp, &regs->fmbm_ticp);
+
+	/* Frame attributes */
+	tmp = BMI_CMD_TX_MR_DEF;
+	tmp |= BMI_CMD_ATTR_ORDER;
+	tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
+	iowrite32be(tmp, &regs->fmbm_tfca);
+
+	/* Dequeue NIA + enqueue NIA */
+	iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
+	iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
+	if (cfg->fmbm_tfne_has_features)
+		iowrite32be(!cfg->dflt_fqid ?
+			    BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
+			    NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
+	if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+		iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
+		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+			    &regs->fmbm_tfene);
+		if (cfg->fmbm_tfne_has_features)
+			iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
+				    &regs->fmbm_tfne);
+	}
+
+	/* Confirmation/error queues */
+	if (cfg->dflt_fqid || !cfg->dont_release_buf)
+		iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
+	iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
+
+	return 0;
+}
+
+static int init_qmi(struct fman_port *port)
+{
+	struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
+	struct fman_port_cfg *cfg = port->cfg;
+	u32 tmp;
+
+	/* Rx port configuration */
+	if (port->port_type == FMAN_PORT_TYPE_RX) {
+		/* Enqueue NIA */
+		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
+		return 0;
+	}
+
+	/* Continue with Tx port configuration */
+	if (port->port_type == FMAN_PORT_TYPE_TX) {
+		/* Enqueue NIA */
+		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
+			    &regs->fmqm_pnen);
+		/* Dequeue NIA */
+		iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
+	}
+
+	/* Dequeue Configuration register */
+	tmp = 0;
+	if (cfg->deq_high_priority)
+		tmp |= QMI_DEQ_CFG_PRI;
+
+	switch (cfg->deq_type) {
+	case FMAN_PORT_DEQ_BY_PRI:
+		tmp |= QMI_DEQ_CFG_TYPE1;
+		break;
+	case FMAN_PORT_DEQ_ACTIVE_FQ:
+		tmp |= QMI_DEQ_CFG_TYPE2;
+		break;
+	case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
+		tmp |= QMI_DEQ_CFG_TYPE3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cfg->qmi_deq_options_support) {
+		switch (cfg->deq_prefetch_option) {
+		case FMAN_PORT_DEQ_NO_PREFETCH:
+			break;
+		case FMAN_PORT_DEQ_PART_PREFETCH:
+			tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
+			break;
+		case FMAN_PORT_DEQ_FULL_PREFETCH:
+			tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
+	tmp |= cfg->deq_byte_cnt;
+	iowrite32be(tmp, &regs->fmqm_pndc);
+
+	return 0;
+}
+
+static int init(struct fman_port *port)
+{
+	int err;
+
+	/* Init BMI registers */
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		err = init_bmi_rx(port);
+		break;
+	case FMAN_PORT_TYPE_TX:
+		err = init_bmi_tx(port);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (err)
+		return err;
+
+	/* Init QMI registers */
+	err = init_qmi(port);
+	return err;
+
+	return 0;
+}
+
+static int set_bpools(const struct fman_port *port,
+		      const struct fman_port_bpools *bp)
+{
+	u32 __iomem *bp_reg, *bp_depl_reg;
+	u32 tmp;
+	u8 i, max_bp_num;
+	bool grp_depl_used = false, rx_port;
+
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		max_bp_num = port->ext_pools_num;
+		rx_port = true;
+		bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
+		bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rx_port) {
+		/* Check buffers are provided in ascending order */
+		for (i = 0; (i < (bp->count - 1) &&
+			     (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
+			if (bp->bpool[i].size > bp->bpool[i + 1].size)
+				return -EINVAL;
+		}
+	}
+
+	/* Set up external buffers pools */
+	for (i = 0; i < bp->count; i++) {
+		tmp = BMI_EXT_BUF_POOL_VALID;
+		tmp |= ((u32)bp->bpool[i].bpid <<
+			BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
+
+		if (rx_port) {
+			if (bp->counters_enable)
+				tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
+
+			if (bp->bpool[i].is_backup)
+				tmp |= BMI_EXT_BUF_POOL_BACKUP;
+
+			tmp |= (u32)bp->bpool[i].size;
+		}
+
+		iowrite32be(tmp, &bp_reg[i]);
+	}
+
+	/* Clear unused pools */
+	for (i = bp->count; i < max_bp_num; i++)
+		iowrite32be(0, &bp_reg[i]);
+
+	/* Pools depletion */
+	tmp = 0;
+	for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
+		if (bp->bpool[i].grp_bp_depleted) {
+			grp_depl_used = true;
+			tmp |= 0x80000000 >> i;
+		}
+
+		if (bp->bpool[i].single_bp_depleted)
+			tmp |= 0x80 >> i;
+	}
+
+	if (grp_depl_used)
+		tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
+		    BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
+
+	iowrite32be(tmp, bp_depl_reg);
+	return 0;
+}
+
+static bool is_init_done(struct fman_port_cfg *cfg)
+{
+	/* Checks if FMan port driver parameters were initialized */
+	if (!cfg)
+		return true;
+
+	return false;
+}
+
+static int verify_size_of_fifo(struct fman_port *port)
+{
+	u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
+
+	/* TX Ports */
+	if (port->port_type == FMAN_PORT_TYPE_TX) {
+		min_fifo_size_required = (u32)
+		    (roundup(port->max_frame_length,
+			     FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
+
+		min_fifo_size_required +=
+		    port->cfg->tx_fifo_deq_pipeline_depth *
+		    FMAN_BMI_FIFO_UNITS;
+
+		opt_fifo_size_for_b2b = min_fifo_size_required;
+
+		/* Add some margin for back-to-back capability to improve
+		 * performance, allows the hardware to pipeline new frame dma
+		 * while the previous frame not yet transmitted.
+		 */
+		if (port->port_speed == 10000)
+			opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+		else
+			opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
+	}
+
+	/* RX Ports */
+	else if (port->port_type == FMAN_PORT_TYPE_RX) {
+		if (port->rev_info.major >= 6)
+			min_fifo_size_required = (u32)
+			(roundup(port->max_frame_length,
+				 FMAN_BMI_FIFO_UNITS) +
+				 (5 * FMAN_BMI_FIFO_UNITS));
+			/* 4 according to spec + 1 for FOF>0 */
+		else
+			min_fifo_size_required = (u32)
+			(roundup(min(port->max_frame_length,
+				     port->rx_pools_params.largest_buf_size),
+				     FMAN_BMI_FIFO_UNITS) +
+				     (7 * FMAN_BMI_FIFO_UNITS));
+
+		opt_fifo_size_for_b2b = min_fifo_size_required;
+
+		/* Add some margin for back-to-back capability to improve
+		 * performance,allows the hardware to pipeline new frame dma
+		 * while the previous frame not yet transmitted.
+		 */
+		if (port->port_speed == 10000)
+			opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
+		else
+			opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
+	}
+
+	WARN_ON(min_fifo_size_required <= 0);
+	WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
+
+	/* Verify the size  */
+	if (port->fifo_bufs.num < min_fifo_size_required)
+		pr_debug("FIFO size should be enlarged to %d bytes\n",
+			 min_fifo_size_required);
+	else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
+		pr_debug("For b2b processing,FIFO may be enlarged to %d bytes\n",
+			 opt_fifo_size_for_b2b);
+
+	return 0;
+}
+
+static int set_ext_buffer_pools(struct fman_port *port)
+{
+	struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
+	struct fman_buf_pool_depletion *buf_pool_depletion =
+	&port->cfg->buf_pool_depletion;
+	u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
+	u16 sizes_array[BM_MAX_NUM_OF_POOLS];
+	int i = 0, j = 0, err;
+	struct fman_port_bpools bpools;
+
+	memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
+	memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
+	memcpy(&port->ext_buf_pools, ext_buf_pools,
+	       sizeof(struct fman_ext_pools));
+
+	fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
+							ordered_array,
+							sizes_array);
+
+	memset(&bpools, 0, sizeof(struct fman_port_bpools));
+	bpools.count = ext_buf_pools->num_of_pools_used;
+	bpools.counters_enable = true;
+	for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
+		bpools.bpool[i].bpid = ordered_array[i];
+		bpools.bpool[i].size = sizes_array[ordered_array[i]];
+	}
+
+	/* save pools parameters for later use */
+	port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
+	port->rx_pools_params.largest_buf_size =
+	    sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
+	port->rx_pools_params.second_largest_buf_size =
+	    sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
+
+	/* FMBM_RMPD reg. - pool depletion */
+	if (buf_pool_depletion->pools_grp_mode_enable) {
+		bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
+		for (i = 0; i < port->bm_max_num_of_pools; i++) {
+			if (buf_pool_depletion->pools_to_consider[i]) {
+				for (j = 0; j < ext_buf_pools->
+				     num_of_pools_used; j++) {
+					if (i == ordered_array[j]) {
+						bpools.bpool[j].
+						    grp_bp_depleted = true;
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	if (buf_pool_depletion->single_pool_mode_enable) {
+		for (i = 0; i < port->bm_max_num_of_pools; i++) {
+			if (buf_pool_depletion->
+			    pools_to_consider_for_single_mode[i]) {
+				for (j = 0; j < ext_buf_pools->
+				     num_of_pools_used; j++) {
+					if (i == ordered_array[j]) {
+						bpools.bpool[j].
+						    single_bp_depleted = true;
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	err = set_bpools(port, &bpools);
+	if (err != 0) {
+		pr_err("FMan port: set_bpools\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int init_low_level_driver(struct fman_port *port)
+{
+	struct fman_port_cfg *cfg = port->cfg;
+	u32 tmp_val;
+
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
+		if (cfg->forward_reuse_int_context)
+			cfg->rx_fd_bits = (u8)(BMI_PORT_RFNE_FRWD_RPD >> 24);
+		break;
+	default:
+		break;
+	}
+
+	tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
+		(port->internal_buf_offset / OFFSET_UNITS + 1) :
+		(port->internal_buf_offset / OFFSET_UNITS));
+	port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
+	port->cfg->int_buf_start_margin = port->internal_buf_offset;
+
+	if (init(port) != 0) {
+		pr_err("fman_port_init\n");
+		return -ENODEV;
+	}
+
+	/* The code bellow is a trick so the FM will not release the buffer
+	 * to BM nor will try to enqueue the frame to QM
+	 */
+	if (port->port_type == FMAN_PORT_TYPE_TX) {
+		if (!cfg->dflt_fqid && cfg->dont_release_buf) {
+			/* override fmbm_tcfqid 0 with a false non-0 value.
+			 * This will force FM to act according to tfene.
+			 * Otherwise, if fmbm_tcfqid is 0 the FM will release
+			 * buffers to BM regardless of fmbm_tfene
+			 */
+			out_be32(&port->bmi_regs->tx.fmbm_tcfqid, 0xFFFFFF);
+			out_be32(&port->bmi_regs->tx.fmbm_tfene,
+				 NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE);
+		}
+	}
+
+	return 0;
+}
+
+static int fill_soc_specific_params(struct fman_port *port)
+{
+	u32 bmi_max_fifo_size;
+
+	bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
+	port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
+	port->bm_max_num_of_pools = 64;
+
+	/* P4080 - Major 2
+	 * P2041/P3041/P5020/P5040 - Major 3
+	 * Tx/Bx - Major 6
+	 */
+	switch (port->rev_info.major) {
+	case 2:
+	case 3:
+		port->max_num_of_ext_pools		= 4;
+		port->max_num_of_sub_portals		= 12;
+		break;
+
+	case 6:
+		port->max_num_of_ext_pools		= 8;
+		port->max_num_of_sub_portals		= 16;
+		break;
+
+	default:
+		pr_err("Unsupported FMan version\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
+					    u16 speed)
+{
+	switch (type) {
+	case FMAN_PORT_TYPE_RX:
+	case FMAN_PORT_TYPE_TX:
+		switch (speed) {
+		case 10000:
+			return 4;
+		case 1000:
+			if (major >= 6)
+				return 2;
+			else
+				return 1;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+}
+
+static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
+				 u16 speed)
+{
+	switch (type) {
+	case FMAN_PORT_TYPE_RX:
+	case FMAN_PORT_TYPE_TX:
+		switch (speed) {
+		case 10000:
+			return 16;
+		case 1000:
+			if (major >= 6)
+				return 4;
+			else
+				return 3;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+}
+
+static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
+				       u16 speed)
+{
+	switch (type) {
+	case FMAN_PORT_TYPE_RX:
+		/* FMan V3 */
+		if (major >= 6)
+			return 0;
+
+		/* FMan V2 */
+		if (speed == 10000)
+			return 8;
+		else
+			return 2;
+	case FMAN_PORT_TYPE_TX:
+	default:
+		return 0;
+	}
+}
+
+static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
+				     u16 speed)
+{
+	int val;
+
+	if (major >= 6) {
+		switch (type) {
+		case FMAN_PORT_TYPE_TX:
+			if (speed == 10000)
+				val = 12;
+			else
+				val = 3;
+			break;
+		case FMAN_PORT_TYPE_RX:
+			if (speed == 10000)
+				val = 8;
+			else
+				val = 2;
+			break;
+		default:
+			return 0;
+		}
+	} else {
+		switch (type) {
+		case FMAN_PORT_TYPE_TX:
+		case FMAN_PORT_TYPE_RX:
+			if (speed == 10000)
+				val = 8;
+			else
+				val = 1;
+			break;
+		default:
+			val = 0;
+		}
+	}
+
+	return val;
+}
+
+static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
+					   u16 speed)
+{
+	/* FMan V3 */
+	if (major >= 6)
+		return 0;
+
+	/* FMan V2 */
+	switch (type) {
+	case FMAN_PORT_TYPE_RX:
+	case FMAN_PORT_TYPE_TX:
+		if (speed == 10000)
+			return 8;
+		else
+			return 1;
+	default:
+		return 0;
+	}
+}
+
+static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
+				     u16 speed)
+{
+	int val;
+
+	if (major >= 6) {
+		switch (type) {
+		case FMAN_PORT_TYPE_TX:
+			if (speed == 10000)
+				val = 64;
+			else
+				val = 50;
+			break;
+		case FMAN_PORT_TYPE_RX:
+			if (speed == 10000)
+				val = 96;
+			else
+				val = 50;
+			break;
+		default:
+			val = 0;
+		}
+	} else {
+		switch (type) {
+		case FMAN_PORT_TYPE_TX:
+			if (speed == 10000)
+				val = 48;
+			else
+				val = 44;
+			break;
+		case FMAN_PORT_TYPE_RX:
+			if (speed == 10000)
+				val = 48;
+			else
+				val = 45;
+			break;
+		default:
+			val = 0;
+		}
+	}
+
+	return val;
+}
+
+static void set_dflt_cfg(struct fman_port *port,
+			 struct fman_port_params *port_params)
+{
+	struct fman_port_cfg *cfg = port->cfg;
+
+	cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
+	cfg->dma_write_optimize = true;
+	cfg->color = FMAN_PORT_COLOR_GREEN;
+	cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
+	cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
+	cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
+	cfg->tx_fifo_low_comf_level = (5 * 1024);
+	cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
+	cfg->sync_req = true;
+	cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
+	cfg->tx_fifo_deq_pipeline_depth =
+		BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
+	cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
+
+	cfg->rx_pri_elevation =
+		DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
+	port->cfg->rx_fifo_thr =
+		DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
+					    port->max_port_fifo_size);
+
+	if ((port->rev_info.major == 6) &&
+	    ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
+		cfg->errata_A006320 = true;
+
+	/* Excessive Threshold register - exists for pre-FMv3 chips only */
+	if (port->rev_info.major < 6) {
+		cfg->excessive_threshold_register = true;
+	} else {
+		cfg->fmbm_rebm_has_sgd = true;
+		cfg->fmbm_tfne_has_features = true;
+	}
+
+	cfg->qmi_deq_options_support = true;
+
+	cfg->buffer_prefix_content.data_align =
+		DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+}
+
+static void set_rx_dflt_cfg(struct fman_port *port,
+			    struct fman_port_params *port_params)
+{
+	port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
+
+	memcpy(&port->cfg->ext_buf_pools,
+	       &port_params->specific_params.rx_params.ext_buf_pools,
+	       sizeof(struct fman_ext_pools));
+	port->cfg->err_fqid =
+		port_params->specific_params.rx_params.err_fqid;
+	port->cfg->dflt_fqid =
+		port_params->specific_params.rx_params.dflt_fqid;
+
+	/* Set BCB workaround on Rx ports, only for B4860 rev1 */
+	if (port->rev_info.major >= 6) {
+		unsigned int svr;
+
+		svr = mfspr(SPRN_SVR);
+		if ((SVR_SOC_VER(svr) == SVR_B4860) && (SVR_MAJ(svr) == 1))
+			port->cfg->bcb_workaround = true;
+	}
+}
+
+static void set_tx_dflt_cfg(struct fman_port *port,
+			    struct fman_port_params *port_params,
+			    struct fman_port_dts_params *dts_params)
+{
+	port->cfg->tx_fifo_deq_pipeline_depth =
+		get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
+						 port->port_type,
+						 port->port_speed);
+	port->cfg->err_fqid =
+		port_params->specific_params.non_rx_params.err_fqid;
+	port->cfg->deq_sp =
+		(u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
+	port->cfg->dflt_fqid =
+		port_params->specific_params.non_rx_params.dflt_fqid;
+	port->cfg->deq_high_priority = true;
+}
+
+int fman_port_config(struct fman_port *port, struct fman_port_params *params)
+{
+	void __iomem *base_addr = port->dts_params.base_addr;
+	int err;
+
+	/* Allocate the FM driver's parameters structure */
+	port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
+	if (!port->cfg)
+		goto err_params;
+
+	/* Initialize FM port parameters which will be kept by the driver */
+	port->port_type = port->dts_params.type;
+	port->port_speed = port->dts_params.speed;
+	port->port_id = port->dts_params.id;
+	port->fm = port->dts_params.fman;
+	port->ext_pools_num = (u8)8;
+
+	/* get FM revision */
+	fman_get_revision(port->fm, &port->rev_info);
+
+	err = fill_soc_specific_params(port);
+	if (err)
+		goto err_port_cfg;
+
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		set_rx_dflt_cfg(port, params);
+	case FMAN_PORT_TYPE_TX:
+		set_tx_dflt_cfg(port, params, &port->dts_params);
+	default:
+		set_dflt_cfg(port, params);
+	}
+
+	/* Continue with other parameters */
+	/* set memory map pointers */
+	port->bmi_regs = (union fman_port_bmi_regs __iomem *)
+			 (base_addr + BMI_PORT_REGS_OFFSET);
+	port->qmi_regs = (struct fman_port_qmi_regs __iomem *)
+			 (base_addr + QMI_PORT_REGS_OFFSET);
+
+	port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
+	/* resource distribution. */
+
+	port->fifo_bufs.num =
+	get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
+				  port->port_speed) * FMAN_BMI_FIFO_UNITS;
+	port->fifo_bufs.extra =
+	DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
+
+	port->open_dmas.num =
+	get_dflt_num_of_open_dmas(port->rev_info.major,
+				  port->port_type, port->port_speed);
+	port->open_dmas.extra =
+	get_dflt_extra_num_of_open_dmas(port->rev_info.major,
+					port->port_type, port->port_speed);
+	port->tasks.num =
+	get_dflt_num_of_tasks(port->rev_info.major,
+			      port->port_type, port->port_speed);
+	port->tasks.extra =
+	get_dflt_extra_num_of_tasks(port->rev_info.major,
+				    port->port_type, port->port_speed);
+
+	/* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
+	 * workaround
+	 */
+	if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
+	    (((port->port_type == FMAN_PORT_TYPE_TX) &&
+	    (port->port_speed == 1000)))) {
+		port->open_dmas.num = 16;
+		port->open_dmas.extra = 0;
+	}
+
+	if (port->rev_info.major >= 6 &&
+	    port->port_type == FMAN_PORT_TYPE_TX &&
+	    port->port_speed == 1000) {
+		/* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
+		 * workaround
+		 */
+		if (port->rev_info.major >= 6) {
+			u32 reg;
+
+			reg = 0x00001013;
+			out_be32(&port->bmi_regs->tx.fmbm_tfp, reg);
+		}
+	}
+
+	return 0;
+
+err_port_cfg:
+	kfree(port->cfg);
+err_params:
+	kfree(port);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(fman_port_config);
+
+int fman_port_init(struct fman_port *port)
+{
+	struct fman_port_cfg *cfg;
+	int err;
+	struct fman_port_init_params params;
+
+	if (is_init_done(port->cfg))
+		return -EINVAL;
+
+	err = fman_sp_build_buffer_struct(&port->cfg->int_context,
+					  &port->cfg->buffer_prefix_content,
+					  &port->cfg->buf_margins,
+					  &port->buffer_offsets,
+					  &port->internal_buf_offset);
+	if (err)
+		return err;
+
+	/* FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 Errata workaround */
+	if (port->rev_info.major >= 6 && (port->cfg->bcb_workaround) &&
+	    ((port->port_type == FMAN_PORT_TYPE_RX) &&
+	    (port->port_speed == 1000))) {
+		port->cfg->discard_mask |= FM_PORT_FRM_ERR_PHYSICAL;
+		port->fifo_bufs.num += 4 * 1024;
+	}
+
+	cfg = port->cfg;
+
+	if (port->port_type == FMAN_PORT_TYPE_RX) {
+		/* Call the external Buffer routine which also checks fifo
+		 * size and updates it if necessary
+		 */
+		/* define external buffer pools and pool depletion */
+		err = set_ext_buffer_pools(port);
+		if (err)
+			return err;
+		/* check if the largest external buffer pool is large enough */
+		if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
+		    cfg->buf_margins.end_margins >
+		    port->rx_pools_params.largest_buf_size) {
+			pr_err("buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
+			       cfg->buf_margins.start_margins,
+			       cfg->buf_margins.end_margins,
+			       port->rx_pools_params.largest_buf_size);
+			return -EINVAL;
+		}
+	}
+
+	/* Call FM module routine for communicating parameters */
+	memset(&params, 0, sizeof(params));
+	params.port_id = port->port_id;
+	params.port_type = port->port_type;
+	params.port_speed = port->port_speed;
+	params.num_of_tasks = (u8)port->tasks.num;
+	params.num_of_extra_tasks = (u8)port->tasks.extra;
+	params.num_of_open_dmas = (u8)port->open_dmas.num;
+	params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
+
+	if (port->fifo_bufs.num) {
+		err = verify_size_of_fifo(port);
+		if (err)
+			return err;
+	}
+	params.size_of_fifo = port->fifo_bufs.num;
+	params.extra_size_of_fifo = port->fifo_bufs.extra;
+	params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
+	params.max_frame_length = port->max_frame_length;
+
+	err = fman_set_port_params(port->fm, &params);
+	if (err)
+		return err;
+
+	err = init_low_level_driver(port);
+	if (err)
+		return err;
+
+	kfree(port->cfg);
+	port->cfg = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(fman_port_init);
+
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+				     struct fman_buffer_prefix_content *
+				     buffer_prefix_content)
+{
+	if (is_init_done(port->cfg))
+		return -EINVAL;
+
+	memcpy(&port->cfg->buffer_prefix_content,
+	       buffer_prefix_content,
+	       sizeof(struct fman_buffer_prefix_content));
+	/* if data_align was not initialized by user,
+	 * we return to driver's default
+	 */
+	if (!port->cfg->buffer_prefix_content.data_align)
+		port->cfg->buffer_prefix_content.data_align =
+		DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
+
+	return 0;
+}
+EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
+
+int fman_port_disable(struct fman_port *port)
+{
+	u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+	bool rx_port, failure = false;
+	int count;
+
+	if (!is_init_done(port->cfg))
+		return -EINVAL;
+
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+		bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
+		rx_port = true;
+		break;
+	case FMAN_PORT_TYPE_TX:
+		bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+		bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
+		rx_port = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Disable QMI */
+	if (!rx_port) {
+		tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
+		iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+
+		/* Wait for QMI to finish FD handling */
+		count = 100;
+		do {
+			udelay(10);
+			tmp = ioread32be(&port->qmi_regs->fmqm_pns);
+		} while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
+
+		if (count == 0) {
+			/* Timeout */
+			failure = true;
+		}
+	}
+
+	/* Disable BMI */
+	tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
+	iowrite32be(tmp, bmi_cfg_reg);
+
+	/* Wait for graceful stop end */
+	count = 500;
+	do {
+		udelay(10);
+		tmp = ioread32be(bmi_status_reg);
+	} while ((tmp & BMI_PORT_STATUS_BSY) && --count);
+
+	if (count == 0) {
+		/* Timeout */
+		failure = true;
+	}
+
+	if (failure)
+		pr_debug("FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
+			 port->port_id);
+
+	return 0;
+}
+EXPORT_SYMBOL(fman_port_disable);
+
+int fman_port_enable(struct fman_port *port)
+{
+	u32 __iomem *bmi_cfg_reg, tmp;
+	bool rx_port;
+
+	if (!is_init_done(port->cfg))
+		return -EINVAL;
+
+	switch (port->port_type) {
+	case FMAN_PORT_TYPE_RX:
+		bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
+		rx_port = true;
+		break;
+	case FMAN_PORT_TYPE_TX:
+		bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
+		rx_port = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Enable QMI */
+	if (!rx_port) {
+		tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
+		iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
+	}
+
+	/* Enable BMI */
+	tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
+	iowrite32be(tmp, bmi_cfg_reg);
+
+	return 0;
+}
+EXPORT_SYMBOL(fman_port_enable);
+
+struct fman_port *fman_port_bind(struct device *dev)
+{
+	return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
+}
+EXPORT_SYMBOL(fman_port_bind);
+
+u32 fman_port_get_qman_channel_id(struct fman_port *port)
+{
+	return port->dts_params.qman_channel_id;
+}
+EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+
+#ifndef __rtems__
+static int fman_port_probe(struct platform_device *of_dev)
+#else /* __rtems__ */
+static int fman_port_probe(struct platform_device *of_dev, struct fman *fman)
+#endif /* __rtems__ */
+{
+	struct fman_port *port;
+#ifndef __rtems__
+	struct fman *fman;
+	struct device_node *fm_node, *port_node;
+#else /* __rtems__ */
+	struct device_node *port_node;
+#endif /* __rtems__ */
+	struct resource res;
+#ifndef __rtems__
+	struct resource *dev_res;
+#endif /* __rtems__ */
+	const u32 *u32_prop;
+	int err = 0, lenp;
+	enum fman_port_type port_type;
+	u16 port_speed;
+	u8 port_id;
+
+	port = kzalloc(sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port_node = of_node_get(of_dev->dev.of_node);
+
+	/* Get the FM node */
+#ifndef __rtems__
+	fm_node = of_get_parent(port_node);
+	if (!fm_node) {
+		pr_err("of_get_parent() failed\n");
+		err = -ENODEV;
+		goto return_err;
+	}
+
+	fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+	of_node_put(fm_node);
+	if (!fman) {
+		err = -EINVAL;
+		goto return_err;
+	}
+#endif /* __rtems__ */
+
+	u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
+	if (!u32_prop) {
+		pr_err("of_get_property(%s, cell-index) failed\n",
+		       port_node->full_name);
+		err = -EINVAL;
+		goto return_err;
+	}
+	if (WARN_ON(lenp != sizeof(u32))) {
+		err = -EINVAL;
+		goto return_err;
+	}
+	port_id = (u8)*u32_prop;
+
+	port->dts_params.id = port_id;
+
+	if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
+		port_type = FMAN_PORT_TYPE_TX;
+		port_speed = 1000;
+		u32_prop = (const u32 *)of_get_property(port_node,
+							"fsl,fman-10g-port",
+							&lenp);
+		if (u32_prop)
+			port_speed = 10000;
+
+	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
+		if (port_id >= TX_10G_PORT_BASE)
+			port_speed = 10000;
+		else
+			port_speed = 1000;
+		port_type = FMAN_PORT_TYPE_TX;
+
+	} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
+		port_type = FMAN_PORT_TYPE_RX;
+		port_speed = 1000;
+		u32_prop = (const u32 *)of_get_property(port_node,
+						  "fsl,fman-10g-port", &lenp);
+		if (u32_prop)
+			port_speed = 10000;
+
+	} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
+		if (port_id >= RX_10G_PORT_BASE)
+			port_speed = 10000;
+		else
+			port_speed = 1000;
+		port_type = FMAN_PORT_TYPE_RX;
+
+	}  else {
+		pr_err("Illegal port type\n");
+		err = -EINVAL;
+		goto return_err;
+	}
+
+	port->dts_params.type = port_type;
+	port->dts_params.speed = port_speed;
+
+	if (port_type == FMAN_PORT_TYPE_TX) {
+		u32 qman_channel_id;
+
+		qman_channel_id = fman_get_qman_channel_id(fman, port_id);
+		if (qman_channel_id == 0) {
+			pr_err("incorrect qman-channel-id\n");
+			err = -EINVAL;
+			goto return_err;
+		}
+		port->dts_params.qman_channel_id = qman_channel_id;
+	}
+
+	err = of_address_to_resource(port_node, 0, &res);
+	if (err < 0) {
+		pr_err("of_address_to_resource() failed\n");
+		err = -ENOMEM;
+		goto return_err;
+	}
+
+	port->dts_params.fman = fman;
+
+	of_node_put(port_node);
+
+#ifndef __rtems__
+	dev_res = __devm_request_region(fman_get_device(fman), &res,
+					res.start, (res.end + 1 - res.start),
+					"fman-port");
+	if (!dev_res) {
+		pr_err("__devm_request_region() failed\n");
+		err = -EINVAL;
+		goto free_port;
+	}
+#endif /* __rtems__ */
+
+	port->dts_params.base_addr = devm_ioremap(fman_get_device(fman),
+						  res.start,
+						  (res.end + 1 - res.start));
+	if (port->dts_params.base_addr == 0)
+		pr_err("devm_ioremap() failed\n");
+
+	dev_set_drvdata(&of_dev->dev, port);
+
+	return 0;
+
+return_err:
+	of_node_put(port_node);
+#ifndef __rtems__
+free_port:
+#endif /* __rtems__ */
+	kfree(port);
+	return err;
+}
+
+#ifndef __rtems__
+static const struct of_device_id fman_port_match[] = {
+	{.compatible = "fsl,fman-v3-port-rx"},
+	{.compatible = "fsl,fman-v2-port-rx"},
+	{.compatible = "fsl,fman-v3-port-tx"},
+	{.compatible = "fsl,fman-v2-port-tx"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, fman_port_match);
+
+static struct platform_driver fman_port_driver = {
+	.driver = {
+		   .name = "fsl-fman-port",
+		   .of_match_table = fman_port_match,
+		   },
+	.probe = fman_port_probe,
+};
+
+builtin_platform_driver(fman_port_driver);
+
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+static int
+fman_port_dev_probe(device_t dev)
+{
+	struct fman_ivars *ivars = device_get_ivars(dev);
+	int err;
+
+	err = fman_port_probe(&ivars->of_dev, ivars->fman);
+	if (err == 0) {
+		device_set_desc(dev, "FMan Port");
+		return (BUS_PROBE_DEFAULT);
+	} else {
+		return (ENXIO);
+	}
+}
+
+static device_method_t fman_port_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe, fman_port_dev_probe),
+	DEVMETHOD(device_attach, bus_generic_attach),
+	DEVMETHOD(device_detach, bus_generic_detach),
+	DEVMETHOD(device_suspend, bus_generic_suspend),
+	DEVMETHOD(device_resume, bus_generic_resume),
+	DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+	DEVMETHOD_END
+};
+
+driver_t fman_port_driver = {
+	.name = "fman_port",
+	.methods = fman_port_methods
+};
+
+static devclass_t fman_port_devclass;
+
+DRIVER_MODULE(fman_port, fman_mac, fman_port_driver, fman_port_devclass, 0, 0);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_port.h b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
new file mode 100644
index 0000000..56c1d02
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FMAN_PORT_H
+#define __FMAN_PORT_H
+
+#include "fman.h"
+
+/* FM Port API
+ * The FM uses a general module called "port" to represent a Tx port (MAC),
+ * an Rx port (MAC).
+ * The number of ports in an FM varies between SOCs.
+ * The SW driver manages these ports as sub-modules of the FM,i.e. after an
+ * FM is initialized, its ports may be initialized and operated upon.
+ * The port is initialized aware of its type, but other functions on a port
+ * may be indifferent to its type. When necessary, the driver verifies
+ * coherence and returns error if applicable.
+ * On initialization, user specifies the port type and it's index (relative
+ * to the port's type) - always starting at 0.
+ */
+
+/* FM Frame error */
+/* Frame Descriptor errors */
+/* Not for Rx-Port! Unsupported Format */
+#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT	FM_FD_ERR_UNSUPPORTED_FORMAT
+/* Not for Rx-Port! Length Error */
+#define FM_PORT_FRM_ERR_LENGTH			FM_FD_ERR_LENGTH
+/* DMA Data error */
+#define FM_PORT_FRM_ERR_DMA			FM_FD_ERR_DMA
+/* non Frame-Manager error; probably come from SEC that was chained to FM */
+#define FM_PORT_FRM_ERR_NON_FM			FM_FD_RX_STATUS_ERR_NON_FM
+ /* IPR error */
+#define FM_PORT_FRM_ERR_IPRE			(FM_FD_ERR_IPR & ~FM_FD_IPR)
+/* IPR non-consistent-sp */
+#define FM_PORT_FRM_ERR_IPR_NCSP		(FM_FD_ERR_IPR_NCSP &	\
+						~FM_FD_IPR)
+
+/* Rx FIFO overflow, FCS error, code error, running disparity
+ * error (SGMII and TBI modes), FIFO parity error.
+ * PHY Sequence error, PHY error control character detected.
+ */
+#define FM_PORT_FRM_ERR_PHYSICAL                FM_FD_ERR_PHYSICAL
+/* Frame too long OR Frame size exceeds max_length_frame  */
+#define FM_PORT_FRM_ERR_SIZE                    FM_FD_ERR_SIZE
+/* indicates a classifier "drop" operation */
+#define FM_PORT_FRM_ERR_CLS_DISCARD             FM_FD_ERR_CLS_DISCARD
+/* Extract Out of Frame */
+#define FM_PORT_FRM_ERR_EXTRACTION              FM_FD_ERR_EXTRACTION
+/* No Scheme Selected */
+#define FM_PORT_FRM_ERR_NO_SCHEME               FM_FD_ERR_NO_SCHEME
+/* Keysize Overflow */
+#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW        FM_FD_ERR_KEYSIZE_OVERFLOW
+/* Frame color is red */
+#define FM_PORT_FRM_ERR_COLOR_RED               FM_FD_ERR_COLOR_RED
+/* Frame color is yellow */
+#define FM_PORT_FRM_ERR_COLOR_YELLOW            FM_FD_ERR_COLOR_YELLOW
+/* Parser Time out Exceed */
+#define FM_PORT_FRM_ERR_PRS_TIMEOUT             FM_FD_ERR_PRS_TIMEOUT
+/* Invalid Soft Parser instruction */
+#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT        FM_FD_ERR_PRS_ILL_INSTRUCT
+/* Header error was identified during parsing */
+#define FM_PORT_FRM_ERR_PRS_HDR_ERR             FM_FD_ERR_PRS_HDR_ERR
+/* Frame parsed beyind 256 first bytes */
+#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED    FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
+/* FPM Frame Processing Timeout Exceeded */
+#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT         0x00000001
+
+struct fman_port;
+
+/* A structure for additional Rx port parameters */
+struct fman_port_rx_params {
+	u32 err_fqid;			/* Error Queue Id. */
+	u32 dflt_fqid;			/* Default Queue Id. */
+	/* Which external buffer pools are used
+	 * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
+	 */
+	struct fman_ext_pools ext_buf_pools;
+};
+
+/* A structure for additional non-Rx port parameters */
+struct fman_port_non_rx_params {
+	/* Error Queue Id. */
+	u32 err_fqid;
+	/* For Tx - Default Confirmation queue, 0 means no Tx confirmation
+	 * for processed frames. For OP port - default Rx queue.
+	 */
+	u32 dflt_fqid;
+};
+
+/* A union for additional parameters depending on port type */
+union fman_port_specific_params {
+	/* Rx port parameters structure */
+	struct fman_port_rx_params rx_params;
+	/* Non-Rx port parameters structure */
+	struct fman_port_non_rx_params non_rx_params;
+};
+
+/* A structure representing FM initialization parameters */
+struct fman_port_params {
+	/* Virtual Address of memory mapped FM Port registers. */
+	void *fm;
+	union fman_port_specific_params specific_params;
+	/* Additional parameters depending on port type. */
+};
+
+/**
+ * fman_port_config
+ * @port:	Pointer to the port structure
+ * @params:	Pointer to data structure of parameters
+ *
+ * Creates a descriptor for the FM PORT module.
+ * The routine returns a pointer to the FM PORT object.
+ * This descriptor must be passed as first parameter to all other FM PORT
+ * function calls.
+ * No actual initialization or configuration of FM hardware is done by this
+ * routine.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_config(struct fman_port *port, struct fman_port_params *params);
+
+/**
+ * fman_port_init
+ * port:	A pointer to a FM Port module.
+ * Initializes the FM PORT module by defining the software structure and
+ * configuring the hardware registers.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_init(struct fman_port *port);
+
+/**
+ * fman_port_cfg_buf_prefix_content
+ * @port			A pointer to a FM Port module.
+ * @buffer_prefix_content	A structure of parameters describing
+ *				the structure of the buffer.
+ *				Out parameter:
+ *				Start margin - offset of data from
+ *				start of external buffer.
+ * Defines the structure, size and content of the application buffer.
+ * The prefix, in Tx ports, if 'pass_prs_result', the application should set
+ * a value to their offsets in the prefix of the FM will save the first
+ * 'priv_data_size', than, depending on 'pass_prs_result' and
+ * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
+ * (in this order), to the application buffer, and to offset.
+ * Calling this routine changes the buffer margins definitions in the internal
+ * driver data base from its default configuration:
+ * Data size:  [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
+ * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
+ * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
+ * May be used for all ports
+ *
+ * Allowed only following fman_port_config() and before fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_cfg_buf_prefix_content(struct fman_port *port,
+				     struct fman_buffer_prefix_content
+				     *buffer_prefix_content);
+
+/**
+ * fman_port_disable
+ * port:	A pointer to a FM Port module.
+ *
+ * Gracefully disable an FM port. The port will not start new	tasks after all
+ * tasks associated with the port are terminated.
+ *
+ * This is a blocking routine, it returns after port is gracefully stopped,
+ * i.e. the port will not except new frames, but it will finish all frames
+ * or tasks which were already began.
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_disable(struct fman_port *port);
+
+/**
+ * fman_port_enable
+ * port:	A pointer to a FM Port module.
+ *
+ * A runtime routine provided to allow disable/enable of port.
+ *
+ * Allowed only following fman_port_init().
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_port_enable(struct fman_port *port);
+
+/**
+ * fman_port_get_qman_channel_id
+ * port:	Pointer to the FMan port devuce
+ *
+ * Get the QMan channel ID for the specific port
+ *
+ * Return: QMan channel ID
+ */
+u32 fman_port_get_qman_channel_id(struct fman_port *port);
+
+/**
+ * fman_port_bind
+ * dev:		FMan Port OF device pointer
+ *
+ * Bind to a specific FMan Port.
+ *
+ * Allowed only after the port was created.
+ *
+ * Return: A pointer to the FMan port device.
+ */
+struct fman_port *fman_port_bind(struct device *dev);
+
+#endif /* __FMAN_PORT_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.c b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
new file mode 100644
index 0000000..2fcfa6c
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -0,0 +1,171 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "fman_sp.h"
+#include "fman.h"
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+						     *fm_ext_pools,
+						     u8 *ordered_array,
+						     u16 *sizes_array)
+{
+	u16 buf_size = 0;
+	int i = 0, j = 0, k = 0;
+
+	/* First we copy the external buffers pools information
+	 * to an ordered local array
+	 */
+	for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
+		/* get pool size */
+		buf_size = fm_ext_pools->ext_buf_pool[i].size;
+
+		/* keep sizes in an array according to poolId
+		 * for direct access
+		 */
+		sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
+
+		/* save poolId in an ordered array according to size */
+		for (j = 0; j <= i; j++) {
+			/* this is the next free place in the array */
+			if (j == i)
+				ordered_array[i] =
+				    fm_ext_pools->ext_buf_pool[i].id;
+			else {
+				/* find the right place for this poolId */
+				if (buf_size < sizes_array[ordered_array[j]]) {
+					/* move the pool_ids one place ahead
+					 * to make room for this poolId
+					 */
+					for (k = i; k > j; k--)
+						ordered_array[k] =
+						    ordered_array[k - 1];
+
+					/* now k==j, this is the place for
+					 * the new size
+					 */
+					ordered_array[k] =
+					    fm_ext_pools->ext_buf_pool[i].id;
+					break;
+				}
+			}
+		}
+	}
+}
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
+				int_context_data_copy,
+				struct fman_buffer_prefix_content *
+				buffer_prefix_content,
+				struct fman_sp_buf_margins *buf_margins,
+				struct fman_sp_buffer_offsets *buffer_offsets,
+				u8 *internal_buf_offset)
+{
+	u32 tmp;
+
+	/* Align start of internal context data to 16 byte */
+	int_context_data_copy->ext_buf_offset = (u16)
+		((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
+		((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
+			~(u16)(OFFSET_UNITS - 1)) :
+		buffer_prefix_content->priv_data_size);
+
+	/* Translate margin and int_context params to FM parameters */
+	/* Initialize with illegal value. Later we'll set legal values. */
+	buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
+	buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
+	buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
+
+	/* Internally the driver supports 4 options
+	 * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
+	 * but for simplicity we'll
+	 * relate to it as 1).
+	 * 2. All IC context (from AD) not including debug.
+	 */
+
+	/* This case covers the options under 1 */
+	/* Copy size must be in 16-byte granularity. */
+	int_context_data_copy->size =
+	    (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
+		  ((buffer_prefix_content->pass_time_stamp ||
+		  buffer_prefix_content->pass_hash_result) ? 16 : 0));
+
+	/* Align start of internal context data to 16 byte */
+	int_context_data_copy->int_context_offset =
+	    (u8)(buffer_prefix_content->pass_prs_result ? 32 :
+		 ((buffer_prefix_content->pass_time_stamp ||
+		 buffer_prefix_content->pass_hash_result) ? 64 : 0));
+
+	if (buffer_prefix_content->pass_prs_result)
+		buffer_offsets->prs_result_offset =
+		    int_context_data_copy->ext_buf_offset;
+	if (buffer_prefix_content->pass_time_stamp)
+		buffer_offsets->time_stamp_offset =
+		    buffer_prefix_content->pass_prs_result ?
+		    (int_context_data_copy->ext_buf_offset +
+			sizeof(struct fman_prs_result)) :
+		    int_context_data_copy->ext_buf_offset;
+	if (buffer_prefix_content->pass_hash_result)
+		/* If PR is not requested, whether TS is
+		 * requested or not, IC will be copied from TS
+			 */
+		buffer_offsets->hash_result_offset =
+		buffer_prefix_content->pass_prs_result ?
+			(int_context_data_copy->ext_buf_offset +
+				sizeof(struct fman_prs_result) + 8) :
+			int_context_data_copy->ext_buf_offset + 8;
+
+	if (int_context_data_copy->size)
+		buf_margins->start_margins =
+		    (u16)(int_context_data_copy->ext_buf_offset +
+			  int_context_data_copy->size);
+	else
+		/* No Internal Context passing, STartMargin is
+		 * immediately after private_info
+		 */
+		buf_margins->start_margins =
+		    buffer_prefix_content->priv_data_size;
+
+	/* align data start */
+	tmp = (u32)(buf_margins->start_margins %
+		    buffer_prefix_content->data_align);
+	if (tmp)
+		buf_margins->start_margins +=
+		    (buffer_prefix_content->data_align - tmp);
+	buffer_offsets->data_offset = buf_margins->start_margins;
+
+	return 0;
+}
+
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_sp.h b/linux/drivers/net/ethernet/freescale/fman/fman_sp.h
new file mode 100644
index 0000000..820b7f6
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FM_SP_H
+#define __FM_SP_H
+
+#include "fman.h"
+#include <linux/types.h>
+
+#define ILLEGAL_BASE    (~0)
+
+/* defaults */
+#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN	64
+
+/* Registers bit fields */
+#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER		0x40000000
+#define FMAN_SP_EXT_BUF_POOL_VALID			0x80000000
+#define FMAN_SP_EXT_BUF_POOL_BACKUP			0x20000000
+#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE		0x00100000
+#define FMAN_SP_SG_DISABLE				0x80000000
+
+/* shifts */
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT		16
+#define FMAN_SP_DMA_ATTR_SWP_SHIFT			30
+#define FMAN_SP_IC_TO_EXT_SHIFT			16
+#define FMAN_SP_IC_FROM_INT_SHIFT			8
+
+/* structure for defining internal context copying */
+struct fman_sp_int_context_data_copy {
+	/* < Offset in External buffer to which internal
+	 *  context is copied to (Rx) or taken from (Tx, Op).
+	 */
+	u16 ext_buf_offset;
+	/* Offset within internal context to copy from
+	 * (Rx) or to copy to (Tx, Op).
+	 */
+	u8 int_context_offset;
+	/* Internal offset size to be copied */
+	u16 size;
+};
+
+/*  struct for defining external buffer margins */
+struct fman_sp_buf_margins {
+	/* Number of bytes to be left at the beginning
+	 * of the external buffer (must be divisible by 16)
+	 */
+	u16 start_margins;
+	/* number of bytes to be left at the end
+	 * of the external buffer(must be divisible by 16)
+	 */
+	u16 end_margins;
+};
+
+struct fman_sp_buffer_offsets {
+	u32 data_offset;
+	u32 prs_result_offset;
+	u32 time_stamp_offset;
+	u32 hash_result_offset;
+};
+
+int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
+				*int_context_data_copy,
+				struct fman_buffer_prefix_content
+				*buffer_prefix_content,
+				struct fman_sp_buf_margins *buf_margins,
+				struct fman_sp_buffer_offsets
+				*buffer_offsets,
+				u8 *internal_buf_offset);
+
+void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
+						     *fm_ext_pools,
+						     u8 *ordered_array,
+						     u16 *sizes_array);
+
+#endif	/* __FM_SP_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
new file mode 100644
index 0000000..5b22a04
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -0,0 +1,853 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "crc_mac_addr_ext.h"
+
+#include "fman_tgec.h"
+#include "fman.h"
+
+#include <linux/slab.h>
+#include <linux/bitrev.h>
+#include <linux/io.h>
+
+/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
+#define TGEC_TX_IPG_LENGTH_MASK	0x000003ff
+
+/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP		0x00100000
+#define CMD_CFG_NO_LEN_CHK		0x00020000
+#define CMD_CFG_SEND_IDLE		0x00010000
+#define CMD_CFG_RX_ER_DISC		0x00004000
+#define CMD_CFG_CMD_FRM_EN		0x00002000
+#define CMD_CFG_LOOPBACK_EN		0x00000400
+#define CMD_CFG_TX_ADDR_INS		0x00000200
+#define CMD_CFG_PAUSE_IGNORE		0x00000100
+#define CMD_CFG_PAUSE_FWD		0x00000080
+#define CMF_CFG_CRC_FWD			0x00000040
+#define CMD_CFG_PROMIS_EN		0x00000010
+#define CMD_CFG_WAN_MODE		0x00000008
+#define CMD_CFG_RX_EN			0x00000002
+#define CMD_CFG_TX_EN			0x00000001
+
+/* Interrupt Mask Register (IMASK) */
+#define TGEC_IMASK_MDIO_SCAN_EVENT	0x00010000
+#define TGEC_IMASK_MDIO_CMD_CMPL	0x00008000
+#define TGEC_IMASK_REM_FAULT		0x00004000
+#define TGEC_IMASK_LOC_FAULT		0x00002000
+#define TGEC_IMASK_TX_ECC_ER		0x00001000
+#define TGEC_IMASK_TX_FIFO_UNFL	0x00000800
+#define TGEC_IMASK_TX_FIFO_OVFL	0x00000400
+#define TGEC_IMASK_TX_ER		0x00000200
+#define TGEC_IMASK_RX_FIFO_OVFL	0x00000100
+#define TGEC_IMASK_RX_ECC_ER		0x00000080
+#define TGEC_IMASK_RX_JAB_FRM		0x00000040
+#define TGEC_IMASK_RX_OVRSZ_FRM	0x00000020
+#define TGEC_IMASK_RX_RUNT_FRM		0x00000010
+#define TGEC_IMASK_RX_FRAG_FRM		0x00000008
+#define TGEC_IMASK_RX_LEN_ER		0x00000004
+#define TGEC_IMASK_RX_CRC_ER		0x00000002
+#define TGEC_IMASK_RX_ALIGN_ER		0x00000001
+
+/* Hashtable Control Register (HASHTABLE_CTRL) */
+#define TGEC_HASH_MCAST_SHIFT		23
+#define TGEC_HASH_MCAST_EN		0x00000200
+#define TGEC_HASH_ADR_MSK		0x000001ff
+
+#define DEFAULT_TX_IPG_LENGTH			12
+#define DEFAULT_MAX_FRAME_LENGTH		0x600
+#define DEFAULT_PAUSE_QUANT			0xf000
+
+#define TGEC_DEFAULT_EXCEPTIONS			 \
+	((u32)((TGEC_IMASK_MDIO_SCAN_EVENT)		|\
+		(TGEC_IMASK_REM_FAULT)			|\
+		(TGEC_IMASK_LOC_FAULT)			|\
+		(TGEC_IMASK_TX_ECC_ER)			|\
+		(TGEC_IMASK_TX_FIFO_UNFL)		|\
+		(TGEC_IMASK_TX_FIFO_OVFL)		|\
+		(TGEC_IMASK_TX_ER)			|\
+		(TGEC_IMASK_RX_FIFO_OVFL)		|\
+		(TGEC_IMASK_RX_ECC_ER)			|\
+		(TGEC_IMASK_RX_JAB_FRM)			|\
+		(TGEC_IMASK_RX_OVRSZ_FRM)		|\
+		(TGEC_IMASK_RX_RUNT_FRM)		|\
+		(TGEC_IMASK_RX_FRAG_FRM)		|\
+		(TGEC_IMASK_RX_CRC_ER)			|\
+		(TGEC_IMASK_RX_ALIGN_ER)))
+
+/* number of pattern match registers (entries) */
+#define TGEC_NUM_OF_PADDRS          1
+
+/* Group address bit indication */
+#define GROUP_ADDRESS               0x0000010000000000LL
+
+/* Hash table size (= 32 bits*8 regs) */
+#define TGEC_HASH_TABLE_SIZE             512
+
+/* tGEC memory map */
+struct tgec_regs {
+	u32 tgec_id;		/* 0x000 Controller ID */
+	u32 reserved001[1];	/* 0x004 */
+	u32 command_config;	/* 0x008 Control and configuration */
+	u32 mac_addr_0;		/* 0x00c Lower 32 bits of the MAC adr */
+	u32 mac_addr_1;		/* 0x010 Upper 16 bits of the MAC adr */
+	u32 maxfrm;		/* 0x014 Maximum frame length */
+	u32 pause_quant;	/* 0x018 Pause quanta */
+	u32 rx_fifo_sections;	/* 0x01c  */
+	u32 tx_fifo_sections;	/* 0x020  */
+	u32 rx_fifo_almost_f_e;	/* 0x024  */
+	u32 tx_fifo_almost_f_e;	/* 0x028  */
+	u32 hashtable_ctrl;	/* 0x02c Hash table control */
+	u32 mdio_cfg_status;	/* 0x030  */
+	u32 mdio_command;	/* 0x034  */
+	u32 mdio_data;		/* 0x038  */
+	u32 mdio_regaddr;	/* 0x03c  */
+	u32 status;		/* 0x040  */
+	u32 tx_ipg_len;		/* 0x044 Transmitter inter-packet-gap */
+	u32 mac_addr_2;		/* 0x048 Lower 32 bits of 2nd MAC adr */
+	u32 mac_addr_3;		/* 0x04c Upper 16 bits of 2nd MAC adr */
+	u32 rx_fifo_ptr_rd;	/* 0x050  */
+	u32 rx_fifo_ptr_wr;	/* 0x054  */
+	u32 tx_fifo_ptr_rd;	/* 0x058  */
+	u32 tx_fifo_ptr_wr;	/* 0x05c  */
+	u32 imask;		/* 0x060 Interrupt mask */
+	u32 ievent;		/* 0x064 Interrupt event */
+	u32 udp_port;		/* 0x068 Defines a UDP Port number */
+	u32 type_1588v2;	/* 0x06c Type field for 1588v2 */
+	u32 reserved070[4];	/* 0x070 */
+	/* 10Ge Statistics Counter */
+	u32 tfrm_u;		/* 80 aFramesTransmittedOK */
+	u32 tfrm_l;		/* 84 aFramesTransmittedOK */
+	u32 rfrm_u;		/* 88 aFramesReceivedOK */
+	u32 rfrm_l;		/* 8c aFramesReceivedOK */
+	u32 rfcs_u;		/* 90 aFrameCheckSequenceErrors */
+	u32 rfcs_l;		/* 94 aFrameCheckSequenceErrors */
+	u32 raln_u;		/* 98 aAlignmentErrors */
+	u32 raln_l;		/* 9c aAlignmentErrors */
+	u32 txpf_u;		/* A0 aPAUSEMACCtrlFramesTransmitted */
+	u32 txpf_l;		/* A4 aPAUSEMACCtrlFramesTransmitted */
+	u32 rxpf_u;		/* A8 aPAUSEMACCtrlFramesReceived */
+	u32 rxpf_l;		/* Ac aPAUSEMACCtrlFramesReceived */
+	u32 rlong_u;		/* B0 aFrameTooLongErrors */
+	u32 rlong_l;		/* B4 aFrameTooLongErrors */
+	u32 rflr_u;		/* B8 aInRangeLengthErrors */
+	u32 rflr_l;		/* Bc aInRangeLengthErrors */
+	u32 tvlan_u;		/* C0 VLANTransmittedOK */
+	u32 tvlan_l;		/* C4 VLANTransmittedOK */
+	u32 rvlan_u;		/* C8 VLANReceivedOK */
+	u32 rvlan_l;		/* Cc VLANReceivedOK */
+	u32 toct_u;		/* D0 if_out_octets */
+	u32 toct_l;		/* D4 if_out_octets */
+	u32 roct_u;		/* D8 if_in_octets */
+	u32 roct_l;		/* Dc if_in_octets */
+	u32 ruca_u;		/* E0 if_in_ucast_pkts */
+	u32 ruca_l;		/* E4 if_in_ucast_pkts */
+	u32 rmca_u;		/* E8 ifInMulticastPkts */
+	u32 rmca_l;		/* Ec ifInMulticastPkts */
+	u32 rbca_u;		/* F0 ifInBroadcastPkts */
+	u32 rbca_l;		/* F4 ifInBroadcastPkts */
+	u32 terr_u;		/* F8 if_out_errors */
+	u32 terr_l;		/* Fc if_out_errors */
+	u32 reserved100[2];	/* 100-108 */
+	u32 tuca_u;		/* 108 if_out_ucast_pkts */
+	u32 tuca_l;		/* 10c if_out_ucast_pkts */
+	u32 tmca_u;		/* 110 ifOutMulticastPkts */
+	u32 tmca_l;		/* 114 ifOutMulticastPkts */
+	u32 tbca_u;		/* 118 ifOutBroadcastPkts */
+	u32 tbca_l;		/* 11c ifOutBroadcastPkts */
+	u32 rdrp_u;		/* 120 etherStatsDropEvents */
+	u32 rdrp_l;		/* 124 etherStatsDropEvents */
+	u32 reoct_u;		/* 128 etherStatsOctets */
+	u32 reoct_l;		/* 12c etherStatsOctets */
+	u32 rpkt_u;		/* 130 etherStatsPkts */
+	u32 rpkt_l;		/* 134 etherStatsPkts */
+	u32 trund_u;		/* 138 etherStatsUndersizePkts */
+	u32 trund_l;		/* 13c etherStatsUndersizePkts */
+	u32 r64_u;		/* 140 etherStatsPkts64Octets */
+	u32 r64_l;		/* 144 etherStatsPkts64Octets */
+	u32 r127_u;		/* 148 etherStatsPkts65to127Octets */
+	u32 r127_l;		/* 14c etherStatsPkts65to127Octets */
+	u32 r255_u;		/* 150 etherStatsPkts128to255Octets */
+	u32 r255_l;		/* 154 etherStatsPkts128to255Octets */
+	u32 r511_u;		/* 158 etherStatsPkts256to511Octets */
+	u32 r511_l;		/* 15c etherStatsPkts256to511Octets */
+	u32 r1023_u;		/* 160 etherStatsPkts512to1023Octets */
+	u32 r1023_l;		/* 164 etherStatsPkts512to1023Octets */
+	u32 r1518_u;		/* 168 etherStatsPkts1024to1518Octets */
+	u32 r1518_l;		/* 16c etherStatsPkts1024to1518Octets */
+	u32 r1519x_u;		/* 170 etherStatsPkts1519toX */
+	u32 r1519x_l;		/* 174 etherStatsPkts1519toX */
+	u32 trovr_u;		/* 178 etherStatsOversizePkts */
+	u32 trovr_l;		/* 17c etherStatsOversizePkts */
+	u32 trjbr_u;		/* 180 etherStatsJabbers */
+	u32 trjbr_l;		/* 184 etherStatsJabbers */
+	u32 trfrg_u;		/* 188 etherStatsFragments */
+	u32 trfrg_l;		/* 18C etherStatsFragments */
+	u32 rerr_u;		/* 190 if_in_errors */
+	u32 rerr_l;		/* 194 if_in_errors */
+};
+
+struct tgec_cfg {
+	bool rx_error_discard;
+	bool pause_ignore;
+	bool pause_forward_enable;
+	bool no_length_check_enable;
+	bool cmd_frame_enable;
+	bool send_idle_enable;
+	bool wan_mode_enable;
+	bool promiscuous_mode_enable;
+	bool tx_addr_ins_enable;
+	bool loopback_enable;
+	bool time_stamp_enable;
+	u16 max_frame_length;
+	u16 pause_quant;
+	u32 tx_ipg_length;
+};
+
+struct fman_mac {
+	/* Pointer to the memory mapped registers. */
+	struct tgec_regs __iomem *regs;
+	/* MAC address of device; */
+	u64 addr;
+	u16 max_speed;
+	void *dev_id; /* device cookie used by the exception cbs */
+	fman_mac_exception_cb *exception_cb;
+	fman_mac_exception_cb *event_cb;
+	/* pointer to driver's global address hash table  */
+	struct eth_hash_t *multicast_addr_hash;
+	/* pointer to driver's individual address hash table  */
+	struct eth_hash_t *unicast_addr_hash;
+	u8 mac_id;
+	u32 exceptions;
+	struct tgec_cfg *cfg;
+	void *fm;
+	struct fman_rev_info fm_rev_info;
+};
+
+static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+{
+	u32 tmp0, tmp1;
+
+	tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
+	tmp1 = (u32)(adr[4] | adr[5] << 8);
+	iowrite32be(tmp0, &regs->mac_addr_0);
+	iowrite32be(tmp1, &regs->mac_addr_1);
+}
+
+static void set_dflts(struct tgec_cfg *cfg)
+{
+	cfg->wan_mode_enable = false;
+	cfg->promiscuous_mode_enable = false;
+	cfg->pause_forward_enable = false;
+	cfg->pause_ignore = false;
+	cfg->tx_addr_ins_enable = false;
+	cfg->loopback_enable = false;
+	cfg->cmd_frame_enable = false;
+	cfg->rx_error_discard = false;
+	cfg->send_idle_enable = false;
+	cfg->no_length_check_enable = true;
+	cfg->time_stamp_enable = false;
+	cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
+	cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
+	cfg->pause_quant = DEFAULT_PAUSE_QUANT;
+}
+
+static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
+		u32 exception_mask)
+{
+	u32 tmp;
+
+	/* Config */
+	tmp = CMF_CFG_CRC_FWD;
+	if (cfg->wan_mode_enable)
+		tmp |= CMD_CFG_WAN_MODE;
+	if (cfg->promiscuous_mode_enable)
+		tmp |= CMD_CFG_PROMIS_EN;
+	if (cfg->pause_forward_enable)
+		tmp |= CMD_CFG_PAUSE_FWD;
+	if (cfg->pause_ignore)
+		tmp |= CMD_CFG_PAUSE_IGNORE;
+	if (cfg->tx_addr_ins_enable)
+		tmp |= CMD_CFG_TX_ADDR_INS;
+	if (cfg->loopback_enable)
+		tmp |= CMD_CFG_LOOPBACK_EN;
+	if (cfg->cmd_frame_enable)
+		tmp |= CMD_CFG_CMD_FRM_EN;
+	if (cfg->rx_error_discard)
+		tmp |= CMD_CFG_RX_ER_DISC;
+	if (cfg->send_idle_enable)
+		tmp |= CMD_CFG_SEND_IDLE;
+	if (cfg->no_length_check_enable)
+		tmp |= CMD_CFG_NO_LEN_CHK;
+	if (cfg->time_stamp_enable)
+		tmp |= CMD_CFG_EN_TIMESTAMP;
+	iowrite32be(tmp, &regs->command_config);
+
+	/* Max Frame Length */
+	iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
+	/* Pause Time */
+	iowrite32be(cfg->pause_quant, &regs->pause_quant);
+
+	/* clear all pending events and set-up interrupts */
+	iowrite32be(0xffffffff, &regs->ievent);
+	iowrite32be(ioread32be(&regs->imask) | exception_mask, &regs->imask);
+
+	return 0;
+}
+
+static int check_init_parameters(struct fman_mac *tgec)
+{
+	if (tgec->max_speed < SPEED_10000) {
+		pr_err("10G MAC driver only support 10G speed\n");
+		return -EINVAL;
+	}
+	if (tgec->addr == 0) {
+		pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
+		return -EINVAL;
+	}
+	if (!tgec->exception_cb) {
+		pr_err("uninitialized exception_cb\n");
+		return -EINVAL;
+	}
+	if (!tgec->event_cb) {
+		pr_err("uninitialized event_cb\n");
+		return -EINVAL;
+	}
+
+	/* FM_LEN_CHECK_ERRATA_FMAN_SW002 Errata workaround */
+	if (!tgec->cfg->no_length_check_enable) {
+		pr_warn("Length Check!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int get_exception_flag(enum fman_mac_exceptions exception)
+{
+	u32 bit_mask;
+
+	switch (exception) {
+	case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
+		bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
+		break;
+	case FM_MAC_EX_10G_MDIO_CMD_CMPL:
+		bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
+		break;
+	case FM_MAC_EX_10G_REM_FAULT:
+		bit_mask = TGEC_IMASK_REM_FAULT;
+		break;
+	case FM_MAC_EX_10G_LOC_FAULT:
+		bit_mask = TGEC_IMASK_LOC_FAULT;
+		break;
+	case FM_MAC_EX_10G_TX_ECC_ER:
+		bit_mask = TGEC_IMASK_TX_ECC_ER;
+		break;
+	case FM_MAC_EX_10G_TX_FIFO_UNFL:
+		bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
+		break;
+	case FM_MAC_EX_10G_TX_FIFO_OVFL:
+		bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
+		break;
+	case FM_MAC_EX_10G_TX_ER:
+		bit_mask = TGEC_IMASK_TX_ER;
+		break;
+	case FM_MAC_EX_10G_RX_FIFO_OVFL:
+		bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
+		break;
+	case FM_MAC_EX_10G_RX_ECC_ER:
+		bit_mask = TGEC_IMASK_RX_ECC_ER;
+		break;
+	case FM_MAC_EX_10G_RX_JAB_FRM:
+		bit_mask = TGEC_IMASK_RX_JAB_FRM;
+		break;
+	case FM_MAC_EX_10G_RX_OVRSZ_FRM:
+		bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
+		break;
+	case FM_MAC_EX_10G_RX_RUNT_FRM:
+		bit_mask = TGEC_IMASK_RX_RUNT_FRM;
+		break;
+	case FM_MAC_EX_10G_RX_FRAG_FRM:
+		bit_mask = TGEC_IMASK_RX_FRAG_FRM;
+		break;
+	case FM_MAC_EX_10G_RX_LEN_ER:
+		bit_mask = TGEC_IMASK_RX_LEN_ER;
+		break;
+	case FM_MAC_EX_10G_RX_CRC_ER:
+		bit_mask = TGEC_IMASK_RX_CRC_ER;
+		break;
+	case FM_MAC_EX_10G_RX_ALIGN_ER:
+		bit_mask = TGEC_IMASK_RX_ALIGN_ER;
+		break;
+	default:
+		bit_mask = 0;
+		break;
+	}
+
+	return bit_mask;
+}
+
+static u32 get_mac_addr_hash_code(u64 eth_addr)
+{
+	u32 crc;
+
+	/* CRC calculation */
+	GET_MAC_ADDR_CRC(eth_addr, crc);
+
+	crc = bitrev32(crc);
+
+	return crc;
+}
+
+static void tgec_err_exception(void *handle)
+{
+	struct fman_mac *tgec = (struct fman_mac *)handle;
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 event;
+
+	/* do not handle MDIO events */
+	event = ioread32be(&regs->ievent) &
+			   ~(TGEC_IMASK_MDIO_SCAN_EVENT |
+			   TGEC_IMASK_MDIO_CMD_CMPL);
+
+	event &= ioread32be(&regs->imask);
+
+	iowrite32be(event, &regs->ievent);
+
+	if (event & TGEC_IMASK_REM_FAULT)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
+	if (event & TGEC_IMASK_LOC_FAULT)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
+	if (event & TGEC_IMASK_TX_ECC_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
+	if (event & TGEC_IMASK_TX_FIFO_UNFL)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
+	if (event & TGEC_IMASK_TX_FIFO_OVFL)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
+	if (event & TGEC_IMASK_TX_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
+	if (event & TGEC_IMASK_RX_FIFO_OVFL)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
+	if (event & TGEC_IMASK_RX_ECC_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
+	if (event & TGEC_IMASK_RX_JAB_FRM)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
+	if (event & TGEC_IMASK_RX_OVRSZ_FRM)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
+	if (event & TGEC_IMASK_RX_RUNT_FRM)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
+	if (event & TGEC_IMASK_RX_FRAG_FRM)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
+	if (event & TGEC_IMASK_RX_LEN_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
+	if (event & TGEC_IMASK_RX_CRC_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
+	if (event & TGEC_IMASK_RX_ALIGN_ER)
+		tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
+}
+
+static void free_init_resources(struct fman_mac *tgec)
+{
+	fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+			     FMAN_INTR_TYPE_ERR);
+
+	/* release the driver's group hash table */
+	free_hash_table(tgec->multicast_addr_hash);
+	tgec->multicast_addr_hash = NULL;
+
+	/* release the driver's individual hash table */
+	free_hash_table(tgec->unicast_addr_hash);
+	tgec->unicast_addr_hash = NULL;
+}
+
+static bool is_init_done(struct tgec_cfg *cfg)
+{
+	/* Checks if tGEC driver parameters were initialized */
+	if (!cfg)
+		return true;
+
+	return false;
+}
+
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 tmp;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (mode & COMM_MODE_RX)
+		tmp |= CMD_CFG_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp |= CMD_CFG_TX_EN;
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 tmp;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (mode & COMM_MODE_RX)
+		tmp &= ~CMD_CFG_RX_EN;
+	if (mode & COMM_MODE_TX)
+		tmp &= ~CMD_CFG_TX_EN;
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 tmp;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (new_val)
+		tmp |= CMD_CFG_PROMIS_EN;
+	else
+		tmp &= ~CMD_CFG_PROMIS_EN;
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
+{
+	if (is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tgec->cfg->max_frame_length = new_val;
+
+	return 0;
+}
+
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
+			     u16 pause_time, u16 __maybe_unused thresh_time)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	iowrite32be((u32)pause_time, &regs->pause_quant);
+
+	return 0;
+}
+
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 tmp;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tmp = ioread32be(&regs->command_config);
+	if (!en)
+		tmp |= CMD_CFG_PAUSE_IGNORE;
+	else
+		tmp &= ~CMD_CFG_PAUSE_IGNORE;
+	iowrite32be(tmp, &regs->command_config);
+
+	return 0;
+}
+
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+{
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
+	set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+
+	return 0;
+}
+
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	struct eth_hash_entry *hash_entry;
+	u32 crc, hash;
+	u64 addr;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	addr = ENET_ADDR_TO_UINT64(*eth_addr);
+
+	if (!(addr & GROUP_ADDRESS)) {
+		/* Unicast addresses not supported in hash */
+		pr_err("Unicast Address\n");
+		return -EINVAL;
+	}
+	/* CRC calculation */
+	crc = get_mac_addr_hash_code(addr);
+
+	/* Take 9 MSB bits */
+	hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+	/* Create element to be added to the driver hash table */
+	hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+	if (!hash_entry)
+		return -ENOMEM;
+	hash_entry->addr = addr;
+	INIT_LIST_HEAD(&hash_entry->node);
+
+	list_add_tail(&hash_entry->node,
+		      &tgec->multicast_addr_hash->lsts[hash]);
+	iowrite32be((hash | TGEC_HASH_MCAST_EN), &regs->hashtable_ctrl);
+
+	return 0;
+}
+
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	struct eth_hash_entry *hash_entry = NULL;
+	struct list_head *pos;
+	u32 crc, hash;
+	u64 addr;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	addr = ((*(u64 *)eth_addr) >> 16);
+
+	/* CRC calculation */
+	crc = get_mac_addr_hash_code(addr);
+	/* Take 9 MSB bits */
+	hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+	list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
+		hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+		if (hash_entry->addr == addr) {
+			list_del_init(&hash_entry->node);
+			kfree(hash_entry);
+			break;
+		}
+	}
+	if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
+		iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
+			    &regs->hashtable_ctrl);
+
+	return 0;
+}
+
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	*mac_version = ioread32be(&regs->tgec_id);
+
+	return 0;
+}
+
+int tgec_set_exception(struct fman_mac *tgec,
+		       enum fman_mac_exceptions exception, bool enable)
+{
+	struct tgec_regs __iomem *regs = tgec->regs;
+	u32 bit_mask = 0;
+
+	if (!is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	bit_mask = get_exception_flag(exception);
+	if (bit_mask) {
+		if (enable)
+			tgec->exceptions |= bit_mask;
+		else
+			tgec->exceptions &= ~bit_mask;
+	} else {
+		pr_err("Undefined exception\n");
+		return -EINVAL;
+	}
+	if (enable)
+		iowrite32be(ioread32be(&regs->imask) | bit_mask, &regs->imask);
+	else
+		iowrite32be(ioread32be(&regs->imask) & ~bit_mask, &regs->imask);
+
+	return 0;
+}
+
+int tgec_init(struct fman_mac *tgec)
+{
+	struct tgec_cfg *cfg;
+	enet_addr_t eth_addr;
+	int err;
+
+	if (is_init_done(tgec->cfg))
+		return -EINVAL;
+
+	if (DEFAULT_RESET_ON_INIT &&
+	    (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
+		pr_err("Can't reset MAC!\n");
+		return -EINVAL;
+	}
+
+	err = check_init_parameters(tgec);
+	if (err)
+		return err;
+
+	cfg = tgec->cfg;
+
+	MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+	set_mac_address(tgec->regs, (u8 *)eth_addr);
+
+	/* interrupts */
+	/* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
+	if (tgec->fm_rev_info.major <= 2)
+		tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
+				      TGEC_IMASK_LOC_FAULT);
+
+	err = init(tgec->regs, cfg, tgec->exceptions);
+	if (err) {
+		free_init_resources(tgec);
+		pr_err("TGEC version doesn't support this i/f mode\n");
+		return err;
+	}
+
+	/* Max Frame Length */
+	err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
+				     cfg->max_frame_length);
+	if (err) {
+		pr_err("Setting max frame length FAILED\n");
+		free_init_resources(tgec);
+		return -EINVAL;
+	}
+
+	/* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
+	if (tgec->fm_rev_info.major == 2) {
+		struct tgec_regs __iomem *regs = tgec->regs;
+		u32 tmp;
+
+		/* restore the default tx ipg Length */
+		tmp = (ioread32be(&regs->tx_ipg_len) &
+		       ~TGEC_TX_IPG_LENGTH_MASK) | 12;
+
+		iowrite32be(tmp, &regs->tx_ipg_len);
+	}
+
+	tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+	if (!tgec->multicast_addr_hash) {
+		free_init_resources(tgec);
+		pr_err("allocation hash table is FAILED\n");
+		return -ENOMEM;
+	}
+
+	tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
+	if (!tgec->unicast_addr_hash) {
+		free_init_resources(tgec);
+		pr_err("allocation hash table is FAILED\n");
+		return -ENOMEM;
+	}
+
+	fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
+			   FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
+
+	kfree(cfg);
+	tgec->cfg = NULL;
+
+	return 0;
+}
+
+int tgec_free(struct fman_mac *tgec)
+{
+	free_init_resources(tgec);
+
+	if (tgec->cfg)
+		tgec->cfg = NULL;
+
+	kfree(tgec->cfg);
+	kfree(tgec);
+
+	return 0;
+}
+
+struct fman_mac *tgec_config(struct fman_mac_params *params)
+{
+	struct fman_mac *tgec;
+	struct tgec_cfg *cfg;
+	void __iomem *base_addr;
+
+	base_addr = params->base_addr;
+	/* allocate memory for the UCC GETH data structure. */
+	tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
+	if (!tgec)
+		return NULL;
+
+	/* allocate memory for the 10G MAC driver parameters data structure. */
+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+	if (!cfg) {
+		tgec_free(tgec);
+		return NULL;
+	}
+
+	/* Plant parameter structure pointer */
+	tgec->cfg = cfg;
+
+	set_dflts(cfg);
+
+	tgec->regs = (struct tgec_regs __iomem *)(base_addr);
+	tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+	tgec->max_speed = params->max_speed;
+	tgec->mac_id = params->mac_id;
+	tgec->exceptions = TGEC_DEFAULT_EXCEPTIONS;
+	tgec->exception_cb = params->exception_cb;
+	tgec->event_cb = params->event_cb;
+	tgec->dev_id = params->dev_id;
+	tgec->fm = params->fm;
+
+	/* Save FMan revision */
+	fman_get_revision(tgec->fm, &tgec->fm_rev_info);
+
+	return tgec;
+}
diff --git a/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h
new file mode 100644
index 0000000..514bba9
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TGEC_H
+#define __TGEC_H
+
+#include "fman_mac.h"
+
+struct fman_mac *tgec_config(struct fman_mac_params *params);
+int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
+int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
+int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
+int tgec_init(struct fman_mac *tgec);
+int tgec_free(struct fman_mac *tgec);
+int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
+int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
+			     u16 pause_time, u16 thresh_time);
+int tgec_set_exception(struct fman_mac *tgec,
+		       enum fman_mac_exceptions exception, bool enable);
+int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
+int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
+
+#endif /* __TGEC_H */
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.c b/linux/drivers/net/ethernet/freescale/fman/mac.c
new file mode 100644
index 0000000..4b26211
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.c
@@ -0,0 +1,1180 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef __rtems__
+#include <sys/types.h>
+#include <net/if_dl.h>
+#include <bsp/fdt.h>
+#include "../../../../../../rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h"
+#endif /* __rtems__ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/etherdevice.h>
+
+#include "mac.h"
+#include "fman_mac.h"
+#include "fman_dtsec.h"
+#include "fman_tgec.h"
+#include "fman_memac.h"
+
+#define MAC_DESCRIPTION "FSL FMan MAC API based driver"
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_AUTHOR("Emil Medve <Emilian.Medve at Freescale.com>");
+
+MODULE_DESCRIPTION(MAC_DESCRIPTION);
+
+struct mac_priv_s {
+	struct device			*dev;
+	void __iomem			*vaddr;
+	u8				cell_index;
+	phy_interface_t			phy_if;
+	struct fman			*fman;
+	struct device_node		*phy_node;
+	/* List of multicast addresses */
+	struct list_head		mc_addr_list;
+	struct platform_device		*eth_dev;
+	struct fixed_phy_status		*fixed_link;
+	u16				speed;
+	u16				max_speed;
+
+	int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
+	int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
+};
+
+struct mac_address {
+	u8 addr[ETH_ALEN];
+	struct list_head list;
+};
+
+static void mac_exception(void *_mac_dev, enum fman_mac_exceptions ex)
+{
+	struct mac_device	*mac_dev;
+	struct mac_priv_s	*priv;
+
+	mac_dev = (struct mac_device *)_mac_dev;
+	priv = mac_dev->priv;
+
+	if (FM_MAC_EX_10G_RX_FIFO_OVFL == ex) {
+		/* don't flag RX FIFO after the first */
+		mac_dev->set_exception(mac_dev->fman_mac,
+				       FM_MAC_EX_10G_RX_FIFO_OVFL, false);
+		dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+	}
+
+#ifndef __rtems__
+	dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+		__func__, ex);
+#endif /* __rtems__ */
+}
+
+static void set_fman_mac_params(struct mac_device *mac_dev,
+				struct fman_mac_params *params)
+{
+	struct mac_priv_s *priv = mac_dev->priv;
+
+#ifndef __rtems__
+	params->base_addr = (typeof(params->base_addr))
+		devm_ioremap(priv->dev, mac_dev->res->start, 0x2000);
+#else /* __rtems__ */
+	params->base_addr = priv->vaddr;
+#endif /* __rtems__ */
+	memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
+	params->max_speed	= priv->max_speed;
+	params->phy_if		= priv->phy_if;
+	params->basex_if	= false;
+	params->mac_id		= priv->cell_index;
+	params->fm		= (void *)priv->fman;
+	params->exception_cb	= mac_exception;
+	params->event_cb	= mac_exception;
+	params->dev_id		= mac_dev;
+}
+
+static int tgec_initialization(struct mac_device *mac_dev)
+{
+	int err;
+	struct mac_priv_s	*priv;
+	struct fman_mac_params	params;
+	u32			version;
+
+	priv = mac_dev->priv;
+
+	set_fman_mac_params(mac_dev, &params);
+
+	mac_dev->fman_mac = tgec_config(&params);
+	if (!mac_dev->fman_mac) {
+		err = -EINVAL;
+		goto _return;
+	}
+
+	err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = tgec_init(mac_dev->fman_mac);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	/* For 10G MAC, disable Tx ECC exception */
+	err = mac_dev->set_exception(mac_dev->fman_mac,
+				     FM_MAC_EX_10G_TX_ECC_ER, false);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = tgec_get_version(mac_dev->fman_mac, &version);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
+
+	goto _return;
+
+_return_fm_mac_free:
+	tgec_free(mac_dev->fman_mac);
+
+_return:
+	return err;
+}
+
+static int dtsec_initialization(struct mac_device *mac_dev)
+{
+	int			err;
+	struct mac_priv_s	*priv;
+	struct fman_mac_params	params;
+	u32			version;
+
+	priv = mac_dev->priv;
+
+	set_fman_mac_params(mac_dev, &params);
+
+	mac_dev->fman_mac = dtsec_config(&params);
+	if (!mac_dev->fman_mac) {
+		err = -EINVAL;
+		goto _return;
+	}
+
+	err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = dtsec_init(mac_dev->fman_mac);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	/* For 1G MAC, disable by default the MIB counters overflow interrupt */
+	err = mac_dev->set_exception(mac_dev->fman_mac,
+				     FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = dtsec_get_version(mac_dev->fman_mac, &version);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
+
+	goto _return;
+
+_return_fm_mac_free:
+	dtsec_free(mac_dev->fman_mac);
+
+_return:
+	return err;
+}
+
+static int memac_initialization(struct mac_device *mac_dev)
+{
+	int			 err;
+	struct mac_priv_s	*priv;
+	struct fman_mac_params	 params;
+
+	priv = mac_dev->priv;
+
+	set_fman_mac_params(mac_dev, &params);
+
+	if (priv->max_speed == SPEED_10000)
+		params.phy_if = PHY_INTERFACE_MODE_XGMII;
+
+	mac_dev->fman_mac = memac_config(&params);
+	if (!mac_dev->fman_mac) {
+		err = -EINVAL;
+		goto _return;
+	}
+
+	err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+#ifndef __rtems__
+	err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
+	if (err < 0)
+		goto _return_fm_mac_free;
+#endif /* __rtems__ */
+
+	err = memac_init(mac_dev->fman_mac);
+	if (err < 0)
+		goto _return_fm_mac_free;
+
+	dev_info(priv->dev, "FMan MEMAC\n");
+
+	goto _return;
+
+_return_fm_mac_free:
+	memac_free(mac_dev->fman_mac);
+
+_return:
+	return err;
+}
+
+static int start(struct mac_device *mac_dev)
+{
+	int	 err;
+#ifndef __rtems__
+	struct phy_device *phy_dev = mac_dev->phy_dev;
+#endif /* __rtems__ */
+	struct mac_priv_s *priv = mac_dev->priv;
+
+	err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+#ifndef __rtems__
+	if (!err && phy_dev)
+		phy_start(phy_dev);
+#endif /* __rtems__ */
+
+	return err;
+}
+
+static int stop(struct mac_device *mac_dev)
+{
+	struct mac_priv_s *priv = mac_dev->priv;
+
+#ifndef __rtems__
+	if (mac_dev->phy_dev)
+		phy_stop(mac_dev->phy_dev);
+#endif /* __rtems__ */
+
+	return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
+}
+
+static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+{
+	struct mac_priv_s	*priv;
+	struct mac_address	*old_addr, *tmp;
+#ifndef __rtems__
+	struct netdev_hw_addr	*ha;
+#endif /* __rtems__ */
+	int			err;
+	enet_addr_t		*addr;
+#ifdef __rtems__
+	struct ifnet		*ifp;
+	struct ifmultiaddr	*ifma;
+#endif /* __rtems__ */
+
+	priv = mac_dev->priv;
+
+	/* Clear previous address list */
+	list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
+		addr = (enet_addr_t *)old_addr->addr;
+		err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
+		if (err < 0)
+			return err;
+
+		list_del(&old_addr->list);
+		kfree(old_addr);
+	}
+
+	/* Add all the addresses from the new list */
+#ifndef __rtems__
+	netdev_for_each_mc_addr(ha, net_dev) {
+		addr = (enet_addr_t *)ha->addr;
+#else /* __rtems__ */
+	ifp = mac_dev->net_dev.ifp;
+	if_maddr_rlock(ifp);
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		addr = (enet_addr_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+#endif /* __rtems__ */
+		err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
+		if (err < 0)
+			return err;
+
+		tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
+		if (!tmp)
+			return -ENOMEM;
+
+		ether_addr_copy(tmp->addr, *addr);
+		list_add(&tmp->list, &priv->mc_addr_list);
+	}
+#ifdef __rtems__
+	if_maddr_runlock(ifp);
+#endif /* __rtems__ */
+	return 0;
+}
+
+/* Avoid redundant calls to FMD, if the MAC driver already contains the desired
+ * active PAUSE settings. Otherwise, the new active settings should be reflected
+ * in FMan.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
+{
+	struct fman_mac *fman_mac = mac_dev->fman_mac;
+	int err = 0;
+
+	if (rx != mac_dev->rx_pause_active) {
+		err = mac_dev->set_rx_pause(fman_mac, rx);
+		if (likely(err == 0))
+			mac_dev->rx_pause_active = rx;
+	}
+
+	if (tx != mac_dev->tx_pause_active) {
+		u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
+					 FSL_FM_PAUSE_TIME_DISABLE);
+
+		err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
+
+		if (likely(err == 0))
+			mac_dev->tx_pause_active = tx;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(fman_set_mac_active_pause);
+
+#ifndef __rtems__
+/* Determine the MAC RX/TX PAUSE frames settings based on PHY
+ * autonegotiation or values set by eththool.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+			bool *tx_pause)
+{
+	struct phy_device *phy_dev = mac_dev->phy_dev;
+	u16 lcl_adv, rmt_adv;
+	u8 flowctrl;
+
+	*rx_pause = *tx_pause = false;
+
+	if (!phy_dev->duplex)
+		return;
+
+	/* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
+	 * are those set by ethtool.
+	 */
+	if (!mac_dev->autoneg_pause) {
+		*rx_pause = mac_dev->rx_pause_req;
+		*tx_pause = mac_dev->tx_pause_req;
+		return;
+	}
+
+	/* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
+	 * settings depend on the result of the link negotiation.
+	 */
+
+	/* get local capabilities */
+	lcl_adv = 0;
+	if (phy_dev->advertising & ADVERTISED_Pause)
+		lcl_adv |= ADVERTISE_PAUSE_CAP;
+	if (phy_dev->advertising & ADVERTISED_Asym_Pause)
+		lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+	/* get link partner capabilities */
+	rmt_adv = 0;
+	if (phy_dev->pause)
+		rmt_adv |= LPA_PAUSE_CAP;
+	if (phy_dev->asym_pause)
+		rmt_adv |= LPA_PAUSE_ASYM;
+
+	/* Calculate TX/RX settings based on local and peer advertised
+	 * symmetric/asymmetric PAUSE capabilities.
+	 */
+	flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+	if (flowctrl & FLOW_CTRL_RX)
+		*rx_pause = true;
+	if (flowctrl & FLOW_CTRL_TX)
+		*tx_pause = true;
+}
+EXPORT_SYMBOL(fman_get_pause_cfg);
+
+static void adjust_link_void(struct net_device *net_dev)
+{
+}
+
+static void adjust_link_dtsec(struct net_device *net_dev)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa_eth_data *eth_data = dev->platform_data;
+	struct mac_device *mac_dev = eth_data->mac_dev;
+	struct phy_device *phy_dev = mac_dev->phy_dev;
+	struct fman_mac *fman_mac;
+	bool rx_pause, tx_pause;
+	int err;
+
+	fman_mac = mac_dev->fman_mac;
+	if (!phy_dev->link) {
+		dtsec_restart_autoneg(fman_mac);
+
+		return;
+	}
+
+	dtsec_adjust_link(fman_mac, phy_dev->speed);
+	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+	if (err < 0)
+		netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+static void adjust_link_memac(struct net_device *net_dev)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa_eth_data *eth_data = dev->platform_data;
+	struct mac_device *mac_dev = eth_data->mac_dev;
+	struct phy_device *phy_dev = mac_dev->phy_dev;
+	struct fman_mac *fman_mac;
+	bool rx_pause, tx_pause;
+	int err;
+
+	fman_mac = mac_dev->fman_mac;
+	memac_adjust_link(fman_mac, phy_dev->speed);
+
+	fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+	err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+	if (err < 0)
+		netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+}
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *net_dev,
+		    struct mac_device *mac_dev,
+		    void (*adj_lnk)(struct net_device *))
+{
+	struct phy_device	*phy_dev;
+	struct mac_priv_s	*priv = mac_dev->priv;
+
+	phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
+				 priv->phy_if);
+	if (!phy_dev) {
+		netdev_err(net_dev, "Could not connect to PHY\n");
+		return -ENODEV;
+	}
+
+	/* Remove any features not supported by the controller */
+	phy_dev->supported &= mac_dev->if_support;
+	/* Enable the symmetric and asymmetric PAUSE frame advertisements,
+	 * as most of the PHY drivers do not enable them by default.
+	 */
+	phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+	phy_dev->advertising = phy_dev->supported;
+
+	mac_dev->phy_dev = phy_dev;
+
+	return 0;
+}
+
+static int dtsec_init_phy(struct net_device *net_dev,
+			  struct mac_device *mac_dev)
+{
+	return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
+}
+
+static int tgec_init_phy(struct net_device *net_dev,
+			 struct mac_device *mac_dev)
+{
+	return init_phy(net_dev, mac_dev, adjust_link_void);
+}
+
+static int memac_init_phy(struct net_device *net_dev,
+			  struct mac_device *mac_dev)
+{
+	return init_phy(net_dev, mac_dev, &adjust_link_memac);
+}
+#else /* __rtems__ */
+static void dtsec_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+	dtsec_adjust_link(mac_dev->fman_mac, speed);
+}
+
+static void tgec_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+	/* VOID */
+}
+
+static void memac_do_adjust_link(struct mac_device *mac_dev, u16 speed)
+{
+	memac_adjust_link(mac_dev->fman_mac, speed);
+}
+#endif /* __rtems__ */
+
+static void setup_dtsec(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+	mac_dev->init_phy		= dtsec_init_phy;
+#else /* __rtems__ */
+	mac_dev->adjust_link		= dtsec_do_adjust_link;
+#endif /* __rtems__ */
+	mac_dev->init			= dtsec_initialization;
+	mac_dev->set_promisc		= dtsec_set_promiscuous;
+	mac_dev->change_addr		= dtsec_modify_mac_address;
+	mac_dev->add_hash_mac_addr	= dtsec_add_hash_mac_address;
+	mac_dev->remove_hash_mac_addr	= dtsec_del_hash_mac_address;
+	mac_dev->set_tx_pause		= dtsec_set_tx_pause_frames;
+	mac_dev->set_rx_pause		= dtsec_accept_rx_pause_frames;
+	mac_dev->set_exception		= dtsec_set_exception;
+	mac_dev->set_multi		= set_multi;
+	mac_dev->start			= start;
+	mac_dev->stop			= stop;
+
+	mac_dev->priv->enable		= dtsec_enable;
+	mac_dev->priv->disable		= dtsec_disable;
+}
+
+static void setup_tgec(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+	mac_dev->init_phy		= tgec_init_phy;
+#else /* __rtems__ */
+	mac_dev->adjust_link		= tgec_do_adjust_link;
+#endif /* __rtems__ */
+	mac_dev->init			= tgec_initialization;
+	mac_dev->set_promisc		= tgec_set_promiscuous;
+	mac_dev->change_addr		= tgec_modify_mac_address;
+	mac_dev->add_hash_mac_addr	= tgec_add_hash_mac_address;
+	mac_dev->remove_hash_mac_addr	= tgec_del_hash_mac_address;
+	mac_dev->set_tx_pause		= tgec_set_tx_pause_frames;
+	mac_dev->set_rx_pause		= tgec_accept_rx_pause_frames;
+	mac_dev->set_exception		= tgec_set_exception;
+	mac_dev->set_multi		= set_multi;
+	mac_dev->start			= start;
+	mac_dev->stop			= stop;
+
+	mac_dev->priv->enable		= tgec_enable;
+	mac_dev->priv->disable		= tgec_disable;
+}
+
+static void setup_memac(struct mac_device *mac_dev)
+{
+#ifndef __rtems__
+	mac_dev->init_phy		= memac_init_phy;
+#else /* __rtems__ */
+	mac_dev->adjust_link		= memac_do_adjust_link;
+#endif /* __rtems__ */
+	mac_dev->init			= memac_initialization;
+	mac_dev->set_promisc		= memac_set_promiscuous;
+	mac_dev->change_addr		= memac_modify_mac_address;
+	mac_dev->add_hash_mac_addr	= memac_add_hash_mac_address;
+	mac_dev->remove_hash_mac_addr	= memac_del_hash_mac_address;
+	mac_dev->set_tx_pause		= memac_set_tx_pause_frames;
+	mac_dev->set_rx_pause		= memac_accept_rx_pause_frames;
+	mac_dev->set_exception		= memac_set_exception;
+	mac_dev->set_multi		= set_multi;
+	mac_dev->start			= start;
+	mac_dev->stop			= stop;
+
+	mac_dev->priv->enable		= memac_enable;
+	mac_dev->priv->disable		= memac_disable;
+}
+
+#define DTSEC_SUPPORTED \
+	(SUPPORTED_10baseT_Half \
+	| SUPPORTED_10baseT_Full \
+	| SUPPORTED_100baseT_Half \
+	| SUPPORTED_100baseT_Full \
+	| SUPPORTED_Autoneg \
+	| SUPPORTED_Pause \
+	| SUPPORTED_Asym_Pause \
+	| SUPPORTED_MII)
+
+#ifndef __rtems__
+static DEFINE_MUTEX(eth_lock);
+#endif /* __rtems__ */
+
+static const char phy_str[][11] = {
+	[PHY_INTERFACE_MODE_MII]		= "mii",
+	[PHY_INTERFACE_MODE_GMII]		= "gmii",
+	[PHY_INTERFACE_MODE_SGMII]		= "sgmii",
+	[PHY_INTERFACE_MODE_TBI]		= "tbi",
+	[PHY_INTERFACE_MODE_RMII]		= "rmii",
+	[PHY_INTERFACE_MODE_RGMII]		= "rgmii",
+	[PHY_INTERFACE_MODE_RGMII_ID]		= "rgmii-id",
+	[PHY_INTERFACE_MODE_RGMII_RXID]	= "rgmii-rxid",
+	[PHY_INTERFACE_MODE_RGMII_TXID]	= "rgmii-txid",
+	[PHY_INTERFACE_MODE_RTBI]		= "rtbi",
+	[PHY_INTERFACE_MODE_XGMII]		= "xgmii"
+};
+
+static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(phy_str); i++)
+		if (strcmp(str, phy_str[i]) == 0)
+			return (phy_interface_t)i;
+
+	return PHY_INTERFACE_MODE_MII;
+}
+
+static const u16 phy2speed[] = {
+	[PHY_INTERFACE_MODE_MII]		= SPEED_100,
+	[PHY_INTERFACE_MODE_GMII]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_SGMII]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_TBI]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_RMII]		= SPEED_100,
+	[PHY_INTERFACE_MODE_RGMII]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_ID]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_RXID]	= SPEED_1000,
+	[PHY_INTERFACE_MODE_RGMII_TXID]	= SPEED_1000,
+	[PHY_INTERFACE_MODE_RTBI]		= SPEED_1000,
+	[PHY_INTERFACE_MODE_XGMII]		= SPEED_10000
+};
+
+static struct platform_device *dpaa_eth_add_device(int fman_id,
+						   struct mac_device *mac_dev,
+						   struct device_node *node)
+{
+	struct platform_device *pdev;
+	struct dpaa_eth_data data;
+	struct mac_priv_s	*priv;
+#ifndef __rtems__
+	static int dpaa_eth_dev_cnt;
+#endif /* __rtems__ */
+	int ret;
+
+	priv = mac_dev->priv;
+
+	data.mac_dev = mac_dev;
+	data.mac_hw_id = priv->cell_index;
+	data.fman_hw_id = fman_id;
+	data.mac_node = node;
+
+#ifndef __rtems__
+	mutex_lock(&eth_lock);
+
+	pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
+	if (!pdev) {
+		ret = -ENOMEM;
+		goto no_mem;
+	}
+
+	ret = platform_device_add_data(pdev, &data, sizeof(data));
+	if (ret)
+		goto err;
+
+	ret = platform_device_add(pdev);
+	if (ret)
+		goto err;
+
+	dpaa_eth_dev_cnt++;
+	mutex_unlock(&eth_lock);
+
+	return pdev;
+
+err:
+	platform_device_put(pdev);
+no_mem:
+	mutex_unlock(&eth_lock);
+
+	return ERR_PTR(ret);
+#else /* __rtems__ */
+	pdev = &mac_dev->pdev;
+	mac_dev->data = data;
+	pdev->platform_data = &mac_dev->data;
+	ret = dpaa_eth_priv_probe(pdev, mac_dev);
+	BSD_ASSERT(ret == 0);
+	return pdev;
+#endif /* __rtems__ */
+}
+
+#ifndef __rtems__
+static const struct of_device_id mac_match[] = {
+	{ .compatible	= "fsl,fman-dtsec" },
+	{ .compatible	= "fsl,fman-xgec" },
+	{ .compatible	= "fsl,fman-memac" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mac_match);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+static int mac_probe(struct platform_device *_of_dev)
+#else /* __rtems__ */
+static int mac_probe(device_t _dev, struct platform_device *_of_dev, struct fman *fman)
+#endif /* __rtems__ */
+{
+#ifdef __rtems__
+	struct fman_mac_softc	*sc = device_get_softc(_dev);
+#endif /* __rtems__ */
+	int			 err, i, lenp;
+	struct device		*dev;
+#ifndef __rtems__
+	struct device_node	*mac_node, *dev_node, *tbi_node;
+#else /* __rtems__ */
+	struct device_node	*mac_node;
+#endif /* __rtems__ */
+	struct mac_device	*mac_dev;
+#ifndef __rtems__
+	struct platform_device	*of_dev;
+#endif /* __rtems__ */
+	struct resource		 res;
+	struct mac_priv_s	*priv;
+	const u8		*mac_addr;
+	const char		*char_prop;
+	const u32		*u32_prop;
+	u8			fman_id;
+	const phandle		*phandle_prop;
+
+	dev = &_of_dev->dev;
+	mac_node = dev->of_node;
+
+#ifndef __rtems__
+	mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
+	if (!mac_dev) {
+		err = -ENOMEM;
+		dev_err(dev, "devm_kzalloc() = %d\n", err);
+		goto _return;
+	}
+#else /* __rtems__ */
+	mac_dev = &sc->mac_dev;
+#endif /* __rtems__ */
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		err = -ENOMEM;
+		goto _return;
+	}
+
+	/* Save private information */
+	mac_dev->priv = priv;
+	priv->dev = dev;
+
+	if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
+		setup_dtsec(mac_dev);
+	} else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
+		setup_tgec(mac_dev);
+	} else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
+		setup_memac(mac_dev);
+	} else {
+#ifndef __rtems__
+		dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
+			mac_node->full_name);
+#endif /* __rtems__ */
+		err = -EINVAL;
+		goto _return;
+	}
+
+	/* Register mac_dev */
+	dev_set_drvdata(dev, mac_dev);
+
+	INIT_LIST_HEAD(&priv->mc_addr_list);
+
+	/* Get the FM node */
+#ifndef __rtems__
+	dev_node = of_get_parent(mac_node);
+	if (!dev_node) {
+		dev_err(dev, "of_get_parent(%s) failed\n",
+			mac_node->full_name);
+		err = -EINVAL;
+		goto _return_dev_set_drvdata;
+	}
+
+	of_dev = of_find_device_by_node(dev_node);
+	if (!of_dev) {
+		dev_err(dev, "of_find_device_by_node(%s) failed\n",
+			dev_node->full_name);
+		err = -EINVAL;
+		goto _return_of_node_put;
+	}
+
+	/* Get the FMan cell-index */
+	u32_prop = of_get_property(dev_node, "cell-index", &lenp);
+	if (!u32_prop) {
+		dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+			dev_node->full_name);
+		err = -EINVAL;
+		goto _return_of_node_put;
+	}
+	WARN_ON(lenp != sizeof(u32));
+	fman_id = (u8)*u32_prop + 1; /* cell-index 0 => FMan id 1 */
+
+	priv->fman = fman_bind(&of_dev->dev);
+	if (!priv->fman) {
+		dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
+		err = -ENODEV;
+		goto _return_of_node_put;
+	}
+
+	of_node_put(dev_node);
+#else /* __rtems__ */
+	priv->fman = fman;
+	fman_id = (u8)device_get_unit(_dev);
+#endif /* __rtems__ */
+
+	/* Get the address of the memory mapped registers */
+	err = of_address_to_resource(mac_node, 0, &res);
+	if (err < 0) {
+		dev_err(dev, "of_address_to_resource(%s) = %d\n",
+			mac_node->full_name, err);
+		goto _return_dev_set_drvdata;
+	}
+
+#ifndef __rtems__
+	mac_dev->res = __devm_request_region(dev,
+					     fman_get_mem_region(priv->fman),
+					     res.start, res.end + 1 - res.start,
+					     "mac");
+	if (!mac_dev->res) {
+		dev_err(dev, "__devm_request_mem_region(mac) failed\n");
+		err = -EBUSY;
+		goto _return_dev_set_drvdata;
+	}
+
+	priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
+				   mac_dev->res->end + 1 - mac_dev->res->start);
+	if (!priv->vaddr) {
+		dev_err(dev, "devm_ioremap() failed\n");
+		err = -EIO;
+		goto _return_dev_set_drvdata;
+	}
+#else /* __rtems__ */
+	priv->vaddr = devm_ioremap(dev, res.start, res.end + 1 - res.start);
+#endif /* __rtems__ */
+
+#ifndef __rtems__
+#define TBIPA_OFFSET		0x1c
+#define TBIPA_DEFAULT_ADDR	5 /* override if used as external PHY addr. */
+	tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+	if (tbi_node) {
+		u32 tbiaddr = TBIPA_DEFAULT_ADDR;
+
+		u32_prop = of_get_property(tbi_node, "reg", NULL);
+		if (u32_prop)
+			tbiaddr = *u32_prop;
+		out_be32(priv->vaddr + TBIPA_OFFSET, tbiaddr);
+	}
+#endif /* __rtems__ */
+
+	if (!of_device_is_available(mac_node)) {
+#ifndef __rtems__
+		devm_iounmap(dev, priv->vaddr);
+		__devm_release_region(dev, fman_get_mem_region(priv->fman),
+				      res.start, res.end + 1 - res.start);
+		fman_unbind(priv->fman);
+		devm_kfree(dev, mac_dev);
+#endif /* __rtems__ */
+		dev_set_drvdata(dev, NULL);
+		return -ENODEV;
+	}
+
+	/* Get the cell-index */
+	u32_prop = of_get_property(mac_node, "cell-index", &lenp);
+	if (!u32_prop) {
+		dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+			mac_node->full_name);
+		err = -EINVAL;
+		goto _return_dev_set_drvdata;
+	}
+	WARN_ON(lenp != sizeof(u32));
+	priv->cell_index = (u8)*u32_prop;
+
+	/* Get the MAC address */
+	mac_addr = of_get_mac_address(mac_node);
+	if (!mac_addr) {
+		dev_err(dev, "of_get_mac_address(%s) failed\n",
+			mac_node->full_name);
+		err = -EINVAL;
+		goto _return_dev_set_drvdata;
+	}
+	memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
+
+	/* Get the port handles */
+	phandle_prop = of_get_property(mac_node, "fsl,fman-ports", &lenp);
+	if (!phandle_prop) {
+		dev_err(dev, "of_get_property(%s, fsl,fman-ports) failed\n",
+			mac_node->full_name);
+		err = -EINVAL;
+		goto _return_dev_set_drvdata;
+	}
+	BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port));
+
+	for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+#ifndef __rtems__
+		/* Find the port node */
+		dev_node = of_find_node_by_phandle(phandle_prop[i]);
+		if (!dev_node) {
+			dev_err(dev, "of_find_node_by_phandle() failed\n");
+			err = -EINVAL;
+			goto _return_of_node_put;
+		}
+
+		of_dev = of_find_device_by_node(dev_node);
+		if (!of_dev) {
+			dev_err(dev, "of_find_device_by_node(%s) failed\n",
+				dev_node->full_name);
+			err = -EINVAL;
+			goto _return_of_node_put;
+		}
+
+		mac_dev->port[i] = fman_port_bind(&of_dev->dev);
+		if (!mac_dev->port[i]) {
+			dev_err(dev, "dev_get_drvdata(%s) failed\n",
+				dev_node->full_name);
+			err = -EINVAL;
+			goto _return_of_node_put;
+		}
+		of_node_put(dev_node);
+#else /* __rtems__ */
+		int node;
+		struct fman_ivars *ivars;
+		device_t child;
+
+		node = fdt_node_offset_by_phandle(bsp_fdt_get(), phandle_prop[i]);
+		if (node < 0) {
+			goto _return_of_node_put;
+		}
+
+		ivars = kzalloc(sizeof(*ivars), GFP_KERNEL);
+		if (ivars == NULL) {
+			goto _return_of_node_put;
+		}
+
+		ivars->dn.offset = node;
+		ivars->of_dev.dev.of_node = &ivars->dn;
+		ivars->of_dev.dev.base = _of_dev->dev.base;
+		ivars->fman = fman;
+
+		child = device_add_child(_dev, "fman_port", -1);
+		if (child == NULL) {
+			kfree(ivars);
+			goto _return_of_node_put;
+		}
+
+		device_set_ivars(child, ivars);
+
+		err = device_probe_and_attach(child);
+		if (err != 0) {
+			kfree(ivars);
+			goto _return_of_node_put;
+		}
+
+		mac_dev->port[i] = dev_get_drvdata(&ivars->of_dev.dev);
+#endif /* __rtems__ */
+	}
+
+	/* Get the PHY connection type */
+	char_prop = (const char *)of_get_property(mac_node,
+						"phy-connection-type", NULL);
+	if (!char_prop) {
+		dev_warn(dev,
+			 "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+			 mac_node->full_name);
+		priv->phy_if = PHY_INTERFACE_MODE_MII;
+	} else {
+		priv->phy_if = str2phy(char_prop);
+	}
+
+	priv->speed		= phy2speed[priv->phy_if];
+	priv->max_speed		= priv->speed;
+#ifndef __rtems__
+	mac_dev->if_support	= DTSEC_SUPPORTED;
+	/* We don't support half-duplex in SGMII mode */
+	if (strstr(char_prop, "sgmii"))
+		mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
+					SUPPORTED_100baseT_Half);
+
+	/* Gigabit support (no half-duplex) */
+	if (priv->max_speed == 1000)
+		mac_dev->if_support |= SUPPORTED_1000baseT_Full;
+
+	/* The 10G interface only supports one mode */
+	if (strstr(char_prop, "xgmii"))
+		mac_dev->if_support = SUPPORTED_10000baseT_Full;
+#endif /* __rtems__ */
+
+	/* Get the rest of the PHY information */
+#ifndef __rtems__
+	priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+	if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
+		struct phy_device *phy;
+
+		err = of_phy_register_fixed_link(mac_node);
+		if (err)
+			goto _return_dev_set_drvdata;
+
+		priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
+					   GFP_KERNEL);
+		if (!priv->fixed_link)
+			goto _return_dev_set_drvdata;
+
+		priv->phy_node = of_node_get(mac_node);
+		phy = of_phy_find_device(priv->phy_node);
+		if (!phy)
+			goto _return_dev_set_drvdata;
+
+		priv->fixed_link->link = phy->link;
+		priv->fixed_link->speed = phy->speed;
+		priv->fixed_link->duplex = phy->duplex;
+		priv->fixed_link->pause = phy->pause;
+		priv->fixed_link->asym_pause = phy->asym_pause;
+	}
+#endif /* __rtems__ */
+
+	err = mac_dev->init(mac_dev);
+	if (err < 0) {
+		dev_err(dev, "mac_dev->init() = %d\n", err);
+		of_node_put(priv->phy_node);
+		goto _return_dev_set_drvdata;
+	}
+
+	/* pause frame autonegotiation enabled */
+	mac_dev->autoneg_pause = true;
+
+	/* by intializing the values to false, force FMD to enable PAUSE frames
+	 * on RX and TX
+	 */
+	mac_dev->rx_pause_req = true;
+	mac_dev->tx_pause_req = true;
+	mac_dev->rx_pause_active = false;
+	mac_dev->tx_pause_active = false;
+	err = fman_set_mac_active_pause(mac_dev, true, true);
+	if (err < 0)
+		dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+
+	dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
+		 mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
+		 mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
+
+	priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
+	if (IS_ERR(priv->eth_dev)) {
+		dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
+			priv->cell_index);
+		priv->eth_dev = NULL;
+	}
+
+	goto _return;
+
+_return_of_node_put:
+#ifndef __rtems__
+	of_node_put(dev_node);
+#endif /* __rtems__ */
+_return_dev_set_drvdata:
+	kfree(priv->fixed_link);
+	kfree(priv);
+	dev_set_drvdata(dev, NULL);
+_return:
+	return err;
+}
+
+#ifndef __rtems__
+static struct platform_driver mac_driver = {
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.of_match_table	= mac_match,
+	},
+	.probe		= mac_probe,
+};
+
+builtin_platform_driver(mac_driver);
+#else /* __rtems__ */
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+
+#include <rtems/bsd/local/miibus_if.h>
+
+static int
+fman_mac_dev_probe(device_t dev)
+{
+	struct fman_ivars *ivars = device_get_ivars(dev);
+	int err;
+
+	err = mac_probe(dev, &ivars->of_dev, ivars->fman);
+	if (err == 0) {
+		device_set_desc(dev, "FMan MAC");
+		return (BUS_PROBE_SPECIFIC);
+	} else {
+		return (ENXIO);
+	}
+}
+
+static device_method_t fman_mac_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		fman_mac_dev_probe),
+	DEVMETHOD(device_attach,	fman_mac_dev_attach),
+	DEVMETHOD(device_detach,	fman_mac_dev_detach),
+	DEVMETHOD(device_suspend,	bus_generic_suspend),
+	DEVMETHOD(device_resume,	bus_generic_resume),
+	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
+
+	/* MII Interface */
+	DEVMETHOD(miibus_readreg,	fman_mac_miibus_read_reg),
+	DEVMETHOD(miibus_writereg,	fman_mac_miibus_write_reg),
+	DEVMETHOD(miibus_statchg,	fman_mac_miibus_statchg),
+
+	DEVMETHOD_END
+};
+
+driver_t fman_mac_driver = {
+	.name = "fman_mac",
+	.methods = fman_mac_methods,
+	.size = sizeof(struct fman_mac_softc)
+};
+
+static devclass_t fman_mac_devclass;
+
+DRIVER_MODULE(fman_mac, fman, fman_mac_driver, fman_mac_devclass, 0, 0);
+DRIVER_MODULE(miibus, fman_mac, miibus_driver, miibus_devclass, 0, 0);
+
+MODULE_DEPEND(fman_mac, ether, 1, 1, 1);
+MODULE_DEPEND(fman_mac, miibus, 1, 1, 1);
+#endif /* __rtems__ */
diff --git a/linux/drivers/net/ethernet/freescale/fman/mac.h b/linux/drivers/net/ethernet/freescale/fman/mac.h
new file mode 100644
index 0000000..727320e
--- /dev/null
+++ b/linux/drivers/net/ethernet/freescale/fman/mac.h
@@ -0,0 +1,147 @@
+/* Copyright 2008-2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MAC_H
+#define __MAC_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+#include <linux/list.h>
+#ifdef __rtems__
+#include <linux/netdevice.h>
+#endif /* __rtems__ */
+
+#include "fman_port.h"
+#include "fman.h"
+#include "fman_mac.h"
+
+struct fman_mac;
+struct mac_priv_s;
+
+#ifdef __rtems__
+struct dpaa_eth_data {
+	struct device_node *mac_node;
+	struct mac_device *mac_dev;
+	int mac_hw_id;
+	int fman_hw_id;
+};
+int dpaa_eth_priv_probe(struct platform_device *pdev,
+    struct mac_device *mac_dev);
+int dpa_eth_priv_start(struct net_device *net_dev);
+int dpa_eth_priv_stop(struct net_device *net_dev);
+#endif /* __rtems__ */
+struct mac_device {
+#ifndef __rtems__
+	struct resource		*res;
+#endif /* __rtems__ */
+	u8			 addr[ETH_ALEN];
+	struct fman_port	*port[2];
+#ifndef __rtems__
+	u32			 if_support;
+	struct phy_device	*phy_dev;
+#endif /* __rtems__ */
+#ifdef __rtems__
+	struct platform_device	 pdev;
+	struct dpaa_eth_data	 data;
+	struct net_device	 net_dev;
+#endif /* __rtems__ */
+
+	bool autoneg_pause;
+	bool rx_pause_req;
+	bool tx_pause_req;
+	bool rx_pause_active;
+	bool tx_pause_active;
+	bool promisc;
+
+#ifndef __rtems__
+	int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+#else /* __rtems__ */
+	void (*adjust_link)(struct mac_device *mac_dev, u16 speed);
+#endif /* __rtems__ */
+	int (*init)(struct mac_device *mac_dev);
+	int (*start)(struct mac_device *mac_dev);
+	int (*stop)(struct mac_device *mac_dev);
+	int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
+	int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+	int (*set_multi)(struct net_device *net_dev,
+			 struct mac_device *mac_dev);
+	int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
+	int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
+			    u16 pause_time, u16 thresh_time);
+	int (*set_exception)(struct fman_mac *mac_dev,
+			     enum fman_mac_exceptions exception, bool enable);
+	int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
+				 enet_addr_t *eth_addr);
+	int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
+				    enet_addr_t *eth_addr);
+
+	struct fman_mac		*fman_mac;
+	struct mac_priv_s	*priv;
+};
+
+#ifndef __rtems__
+struct dpaa_eth_data {
+	struct device_node *mac_node;
+	struct mac_device *mac_dev;
+	int mac_hw_id;
+	int fman_hw_id;
+};
+#endif /* __rtems__ */
+
+extern const char	*mac_driver_description;
+
+/**
+ * fman_set_mac_active_pause
+ * @mac_dev:	A pointer to the MAC device
+ * @rx:		Pause frame setting for RX
+ * @tx:		Pause frame setting for TX
+ *
+ * Set the MAC RX/TX PAUSE frames settings
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
+
+/**
+ * fman_get_pause_cfg
+ * @mac_dev:	A pointer to the MAC device
+ * @rx:		Return value for RX setting
+ * @tx:		Return value for TX setting
+ *
+ * Determine the MAC RX/TX PAUSE frames settings
+ *
+ * Return: Pointer to FMan device.
+ */
+void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
+			bool *tx_pause);
+
+#endif	/* __MAC_H */
diff --git a/linux/drivers/soc/fsl/qbman/bman-debugfs.c b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
new file mode 100644
index 0000000..09f5a28
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman-debugfs.c
@@ -0,0 +1,121 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/* Query Buffer Pool State */
+
+static int query_bp_state_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct bm_pool_state state;
+	int i, j;
+	u32 mask;
+
+	memset(&state, 0, sizeof(state));
+	ret = bman_query_pools(&state);
+	if (ret) {
+		seq_printf(file, "Error %d\n", ret);
+		return ret;
+	}
+
+	seq_puts(file, "bp_id  free_buffers_avail  bp_depleted\n");
+	for (i = 0; i < 2; i++) {
+		mask = 0x80000000;
+		for (j = 0; j < 32; j++) {
+			seq_printf(file,
+			 "  %-2u	   %-3s		    %-3s\n",
+			 (i * 32) + j,
+			 state.as.state.__state[i] & mask ? "no" : "yes",
+			 state.ds.state.__state[i] & mask ? "yes" : "no");
+			 mask >>= 1;
+		}
+	}
+
+	return 0;
+}
+
+static int query_bp_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, query_bp_state_show, NULL);
+}
+
+static const struct file_operations query_bp_state_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_bp_state_open,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static int __init bman_debugfs_init(void)
+{
+	int ret = 0;
+	struct dentry *d;
+
+	dfs_root = debugfs_create_dir("bman", NULL);
+	if (dfs_root == NULL) {
+		pr_err("Cannot create dir\n");
+		return -ENOMEM;
+	}
+
+	d = debugfs_create_file("query_bp_state",
+		S_IRUGO,
+		dfs_root,
+		NULL,
+		&query_bp_state_fops);
+	if (d == NULL) {
+		ret = -ENOMEM;
+		pr_err("Cannot create query_bp_state\n");
+		goto _return;
+	}
+
+	return 0;
+
+_return:
+	debugfs_remove_recursive(dfs_root);
+
+	return ret;
+}
+
+static void __exit bman_debugfs_exit(void)
+{
+	debugfs_remove_recursive(dfs_root);
+}
+
+module_init(bman_debugfs_init);
+module_exit(bman_debugfs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/linux/drivers/soc/fsl/qbman/bman.c b/linux/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 0000000..35459bc
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,692 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright (c) 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/* Last updated for v00.79 of the BG */
+
+struct bman;
+
+/* Register offsets */
+#define REG_POOL_SWDET(n)	(0x0000 + ((n) * 0x04))
+#define REG_POOL_HWDET(n)	(0x0100 + ((n) * 0x04))
+#define REG_POOL_SWDXT(n)	(0x0200 + ((n) * 0x04))
+#define REG_POOL_HWDXT(n)	(0x0300 + ((n) * 0x04))
+#define REG_POOL_CONTENT(n)	(0x0600 + ((n) * 0x04))
+#define REG_FBPR_FPC		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FBPR_BARE		0x0c00
+#define REG_FBPR_BAR		0x0c04
+#define REG_FBPR_AR		0x0c10
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_ERR_ISR		0x0e00	/* + "enum bm_isr_reg" */
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI	0x00000010	/* Invalid Command Verb */
+#define BM_EIRQ_FLWI	0x00000008	/* FBPR Low Watermark */
+#define BM_EIRQ_MBEI	0x00000004	/* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI	0x00000002	/* Single-bit ECC Error */
+#define BM_EIRQ_BSCN	0x00000001	/* pool State Change Notification */
+
+/* BMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(BM_EIRQ_IVCI)
+
+union bman_ecir {
+	u32 ecir_raw;
+	struct {
+		u32 __reserved1:4;
+		u32 portal_num:4;
+		u32 __reserved2:12;
+		u32 numb:4;
+		u32 __reserved3:2;
+		u32 pid:6;
+	} __packed info;
+};
+
+union bman_eadr {
+	u32 eadr_raw;
+	struct {
+		u32 __reserved1:5;
+		u32 memid:3;
+		u32 __reserved2:14;
+		u32 eadr:10;
+	} __packed info;
+};
+
+struct bman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+	BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
+	BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
+	BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+	BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+	BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+};
+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
+
+struct bman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct bman_error_info_mdata error_mdata[] = {
+	BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
+	BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
+	BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+};
+#define BMAN_ERR_MDATA_COUNT \
+	(sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+
+/**
+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define bm_err_isr_status_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_status)
+#define bm_err_isr_status_clear(bm, m)	\
+		__bm_err_isr_write(bm, bm_isr_status, m)
+#define bm_err_isr_enable_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_enable)
+#define bm_err_isr_enable_write(bm, v)	\
+		__bm_err_isr_write(bm, bm_isr_enable, v)
+#define bm_err_isr_disable_read(bm)	\
+		__bm_err_isr_read(bm, bm_isr_disable)
+#define bm_err_isr_disable_write(bm, v)	\
+		__bm_err_isr_write(bm, bm_isr_disable, v)
+#define bm_err_isr_inhibit(bm)		\
+		__bm_err_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_err_isr_uninhibit(bm)	\
+		__bm_err_isr_write(bm, bm_isr_inhibit, 0)
+
+#ifndef __rtems__
+static u16 bman_pool_max;
+#else /* __rtems__ */
+/* FIXME */
+extern u16 bman_ip_rev;
+extern u16 bman_pool_max;
+#endif /* __rtems__ */
+
+/*
+ * TODO: unimplemented registers
+ *
+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ */
+
+/* Encapsulate "struct bman *" as a cast of the register space address. */
+
+static struct bman *bm_create(void *regs)
+{
+	return (struct bman *)regs;
+}
+
+static inline u32 __bm_in(struct bman *bm, u32 offset)
+{
+	return ioread32be((void *)bm + offset);
+}
+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+{
+	iowrite32be(val, (void*) bm + offset);
+}
+#define bm_in(reg)		__bm_in(bm, REG_##reg)
+#define bm_out(reg, val)	__bm_out(bm, REG_##reg, val)
+
+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+{
+	return __bm_in(bm, REG_ERR_ISR + (n << 2));
+}
+
+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+{
+	__bm_out(bm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = bm_in(IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+	u32 e = 0;	/* co-efficient, exponent */
+	int oddbit = 0;
+
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	DPA_ASSERT(e < 0x10);
+	return val | (e << 8);
+}
+
+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
+			u32 hwdet, u32 hwdxt)
+{
+	DPA_ASSERT(pool < bman_pool_max);
+
+	bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
+	bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
+	bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
+	bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+}
+
+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+{
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+			is_power_of_2(size));
+	/* choke if '[e]ba' has lower-alignment than 'size' */
+	DPA_ASSERT(!(ba & (size - 1)));
+	bm_out(FBPR_BARE, upper_32_bits(ba));
+	bm_out(FBPR_BAR, lower_32_bits(ba));
+	bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these. */
+static struct bman *bm;
+
+/* And this state belongs to 'bm' */
+#ifndef __rtems__
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+	fbpr_a = rmem->base;
+	fbpr_sz = rmem->size;
+
+	WARN_ON(!(fbpr_a && fbpr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fbpr, 16777216);
+#define fbpr_a ((uintptr_t)&fbpr[0])
+#define fbpr_sz sizeof(fbpr)
+#endif /* __rtems__ */
+
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+	if (!bm)
+		return -ENODEV;
+	bm_set_pool(bm, bpid, thresholds[0], thresholds[1],
+		thresholds[2], thresholds[3]);
+	return 0;
+}
+EXPORT_SYMBOL(bm_pool_set);
+
+static void log_edata_bits(u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	pr_warn("ErrInt, EDATA:\n");
+	i = bit_count/32;
+	if (bit_count%32) {
+		i++;
+		mask = ~(mask << bit_count%32);
+	}
+	j = 16-i;
+	pr_warn("  0x%08x\n", bm_in(EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		pr_warn("  0x%08x\n", bm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+	union bman_ecir ecir_val;
+	union bman_eadr eadr_val;
+
+	ecir_val.ecir_raw = bm_in(ECIR);
+	/* Is portal info valid */
+	if (ecsr_val & PORTAL_ECSR_ERR) {
+		pr_warn("ErrInt: SWP id %d, numb %d, pid %d\n",
+			ecir_val.info.portal_num, ecir_val.info.numb,
+			ecir_val.info.pid);
+	}
+	if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
+		eadr_val.eadr_raw = bm_in(EADR);
+		pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+			error_mdata[eadr_val.info.memid].txt,
+			error_mdata[eadr_val.info.memid].addr_mask
+				& eadr_val.info.eadr);
+		log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+	}
+}
+
+/* BMan interrupt handler */
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+	ier_val = bm_err_isr_enable_read(bm);
+	isr_val = bm_err_isr_status_read(bm);
+	ecsr_val = bm_in(ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+
+	for (i = 0; i < BMAN_HWE_COUNT; i++) {
+		if (bman_hwerr_txts[i].mask & isr_mask) {
+			pr_warn("ErrInt: %s\n", bman_hwerr_txts[i].txt);
+			if (bman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(isr_mask, ecsr_val);
+				/* Re-arm error capture registers */
+				bm_out(ECSR, ecsr_val);
+			}
+			if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
+				pr_devel("Un-enabling error 0x%x\n",
+					bman_hwerr_txts[i].mask);
+				ier_val &= ~bman_hwerr_txts[i].mask;
+				bm_err_isr_enable_write(bm, ier_val);
+			}
+		}
+	}
+	bm_err_isr_status_clear(bm, isr_val);
+
+	return IRQ_HANDLED;
+}
+
+u32 bm_pool_free_buffers(u32 bpid)
+{
+	return bm_in(POOL_CONTENT(bpid));
+}
+EXPORT_SYMBOL(bm_pool_free_buffers);
+
+#ifndef __rtems__
+static ssize_t show_fbpr_fpc(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
+};
+
+static ssize_t show_pool_count(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data;
+	int i;
+
+	if (kstrtoint(dev_attr->attr.name, 10, &i))
+		return -EINVAL;
+	data = bm_in(POOL_CONTENT(i));
+	return snprintf(buf, PAGE_SIZE, "%d\n", data);
+};
+
+static ssize_t show_err_isr(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
+};
+
+static ssize_t show_sbec(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+
+/* Didn't use DEVICE_ATTR as 64 of this would be required.
+ * Initialize them when needed. */
+static char *name_attrs_pool_count; /* "xx" + null-terminator */
+static struct device_attribute *dev_attr_buffer_pool_count;
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *bman_dev_attributes[] = {
+	&dev_attr_fbpr_fpc.attr,
+	&dev_attr_err_isr.attr,
+	NULL
+};
+
+static struct attribute *bman_dev_ecr_attributes[] = {
+	&dev_attr_sbec_0.attr,
+	&dev_attr_sbec_1.attr,
+	NULL
+};
+
+static struct attribute **bman_dev_pool_count_attributes;
+
+/* root level */
+static const struct attribute_group bman_dev_attr_grp = {
+	.name = NULL,
+	.attrs = bman_dev_attributes
+};
+static const struct attribute_group bman_dev_ecr_grp = {
+	.name = "error_capture",
+	.attrs = bman_dev_ecr_attributes
+};
+static struct attribute_group bman_dev_pool_countent_grp = {
+	.name = "pool_count",
+};
+
+static int of_fsl_bman_remove(struct platform_device *ofdev)
+{
+	sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+	return 0;
+};
+#endif /* __rtems__ */
+
+static int of_fsl_bman_probe(struct platform_device *ofdev)
+{
+	int ret, err_irq, i;
+	struct device *dev = &ofdev->dev;
+	struct device_node *node = dev->of_node;
+	struct resource res;
+	u32 __iomem *regs;
+	u16 id;
+	u8 major, minor;
+
+	if (!of_device_is_available(node))
+		return -ENODEV;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		dev_err(dev, "Can't get %s property 'reg'\n", node->full_name);
+		return ret;
+	}
+	regs = devm_ioremap(dev, res.start, res.end - res.start + 1);
+	if (!regs)
+		return -ENXIO;
+
+	bm = bm_create(regs);
+
+	bm_get_version(bm, &id, &major, &minor);
+	dev_info(dev, "Bman ver:%04x,%02x,%02x\n", id, major, minor);
+	if ((major == 1) && (minor == 0))
+		bman_pool_max = 64;
+	else if ((major == 2) && (minor == 0))
+		bman_pool_max = 8;
+	else if ((major == 2) && (minor == 1))
+		bman_pool_max = 64;
+	else
+		dev_warn(dev, "unknown Bman version, default to rev1.0\n");
+#ifdef __rtems__
+	bman_ip_rev = (u16)((major << 8) | minor);
+#endif /* __rtems__ */
+
+
+	bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+
+	err_irq = of_irq_to_resource(node, 0, NULL);
+	if (err_irq == NO_IRQ) {
+		dev_info(dev, "Can't get %s property 'interrupts'\n",
+			 node->full_name);
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+			       node);
+	if (ret)  {
+		dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+			ret, node->full_name);
+		return ret;
+	}
+	/* Disable Buffer Pool State Change */
+	bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
+	/* Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init). */
+	bm_err_isr_status_clear(bm, 0xffffffff);
+	/* Enable Error Interrupts */
+	bm_err_isr_enable_write(bm, 0xffffffff);
+
+#ifndef __rtems__
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_attr_grp);
+	if (ret)
+		goto done;
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_ecr_grp);
+	if (ret)
+		goto del_group_0;
+
+	name_attrs_pool_count = devm_kmalloc(dev,
+		sizeof(char) * bman_pool_max * 3, GFP_KERNEL);
+	if (!name_attrs_pool_count)
+		goto del_group_1;
+
+	dev_attr_buffer_pool_count = devm_kmalloc(dev,
+		sizeof(struct device_attribute) * bman_pool_max, GFP_KERNEL);
+	if (!dev_attr_buffer_pool_count)
+		goto del_group_1;
+
+	bman_dev_pool_count_attributes = devm_kmalloc(dev,
+		sizeof(struct attribute *) * (bman_pool_max + 1), GFP_KERNEL);
+	if (!bman_dev_pool_count_attributes)
+		goto del_group_1;
+
+	for (i = 0; i < bman_pool_max; i++) {
+		ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
+		if (!ret)
+			goto del_group_1;
+		dev_attr_buffer_pool_count[i].attr.name =
+			(name_attrs_pool_count + i * 3);
+		dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
+		dev_attr_buffer_pool_count[i].show = show_pool_count;
+		bman_dev_pool_count_attributes[i] =
+			&dev_attr_buffer_pool_count[i].attr;
+	}
+	bman_dev_pool_count_attributes[bman_pool_max] = NULL;
+
+	bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
+
+	ret = sysfs_create_group(&dev->kobj, &bman_dev_pool_countent_grp);
+	if (ret)
+		goto del_group_1;
+
+	goto done;
+
+del_group_1:
+	sysfs_remove_group(&dev->kobj, &bman_dev_ecr_grp);
+del_group_0:
+	sysfs_remove_group(&dev->kobj, &bman_dev_attr_grp);
+done:
+	if (ret)
+		dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+#else /* __rtems__ */
+	(void)i;
+#endif /* __rtems__ */
+
+	return ret;
+};
+
+#ifndef __rtems__
+static const struct of_device_id of_fsl_bman_ids[] = {
+	{
+		.compatible = "fsl,bman",
+	},
+	{}
+};
+
+static struct platform_driver of_fsl_bman_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = of_fsl_bman_ids,
+	},
+	.probe = of_fsl_bman_probe,
+	.remove = of_fsl_bman_remove,
+};
+
+builtin_platform_driver(of_fsl_bman_driver);
+#else /* __rtems__ */
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <rtems.h>
+#include <bsp/fdt.h>
+#include <bsp/qoriq.h>
+
+static struct bm_portal_config bman_configs[NR_CPUS];
+
+u16 bman_ip_rev;
+
+u16 bman_pool_max;
+
+SYSINIT_REFERENCE(irqs);
+
+static void
+bman_sysinit(void)
+{
+	const char *fdt = bsp_fdt_get();
+	struct device_node dn;
+	struct platform_device ofdev = {
+		.dev = {
+			.of_node = &dn,
+			.base = (uintptr_t)&qoriq
+		}
+	};
+	const char *name;
+	int cpu_count = (int)rtems_get_processor_count();
+	int cpu;
+	int ret;
+	int node;
+	int parent;
+
+	qoriq_reset_qman_and_bman();
+	qoriq_clear_ce_portal(&qoriq_bman_portal[0][0],
+	    sizeof(qoriq_bman_portal[0]));
+	qoriq_clear_ci_portal(&qoriq_bman_portal[1][0],
+	    sizeof(qoriq_bman_portal[1]));
+
+	memset(&dn, 0, sizeof(dn));
+
+	name = "fsl,bman";
+	node = fdt_node_offset_by_compatible(fdt, 0, name);
+	if (node < 0)
+		panic("bman: no bman in FDT");
+
+	dn.full_name = name;
+	dn.offset = node;
+	ret = of_fsl_bman_probe(&ofdev);
+	if (ret != 0)
+		panic("bman: probe failed");
+
+	name = "fsl,bman-portal";
+	node = fdt_node_offset_by_compatible(fdt, 0, name);
+	if (node < 0)
+		panic("bman: no portals in FDT");
+	parent = fdt_parent_offset(fdt, node);
+	if (parent < 0)
+		panic("bman: no parent of portals in FDT");
+	node = fdt_first_subnode(fdt, parent);
+
+	dn.full_name = name;
+	dn.offset = node;
+
+	for (cpu = 0; cpu < cpu_count; ++cpu) {
+		struct bm_portal_config *pcfg = &bman_configs[cpu];
+		struct bman_portal *portal;
+		struct resource res;
+
+		if (node < 0)
+			panic("bman: missing portal in FDT");
+
+		ret = of_address_to_resource(&dn, 0, &res);
+		if (ret != 0)
+			panic("bman: no portal CE address");
+		pcfg->addr_virt[0] = (__iomem void *)
+		    ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+		    (uintptr_t)&qoriq_bman_portal[0][0]);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+		    (uintptr_t)&qoriq_bman_portal[1][0]);
+
+		ret = of_address_to_resource(&dn, 1, &res);
+		if (ret != 0)
+			panic("bman: no portal CI address");
+		pcfg->addr_virt[1] = (__iomem void *)
+		    ((uintptr_t)&qoriq_bman_portal[0][0] + (uintptr_t)res.start);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+		    (uintptr_t)&qoriq_bman_portal[1][0]);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+		    (uintptr_t)&qoriq_bman_portal[2][0]);
+
+		pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
+		if (pcfg->public_cfg.irq == NO_IRQ)
+			panic("bman: no portal interrupt");
+
+		pcfg->public_cfg.cpu = cpu;
+		bman_depletion_fill(&pcfg->public_cfg.mask);
+
+		portal = bman_create_affine_portal(pcfg);
+		if (portal == NULL)
+			panic("bman: cannot create portal");
+
+		bman_p_irqsource_add(portal, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+
+		node = fdt_next_subnode(fdt, node);
+		dn.offset = node;
+	}
+
+	bman_seed_bpid_range(0, bman_pool_max);
+}
+SYSINIT(bman_sysinit, SI_SUB_CPU, SI_ORDER_FIRST, bman_sysinit, NULL);
+#endif /* __rtems__ */
diff --git a/linux/drivers/soc/fsl/qbman/bman.h b/linux/drivers/soc/fsl/qbman/bman.h
new file mode 100644
index 0000000..c987938
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman.h
@@ -0,0 +1,542 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+extern u16 bman_pool_max;
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH	0x0000
+#define BM_REG_RCR_CI_CINH	0x0004
+#define BM_REG_RCR_ITR		0x0008
+#define BM_REG_CFG		0x0100
+#define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
+#define BM_REG_ISR		0x0e00
+#define BM_REG_IIR		0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR		0x0000
+#define BM_CL_RR0		0x0100
+#define BM_CL_RR1		0x0140
+#define BM_CL_RCR		0x1000
+#define BM_CL_RCR_PI_CENA	0x3000
+#define BM_CL_RCR_CI_CENA	0x3100
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o)		__raw_readl((bm)->addr_ci + (o))
+#define __bm_out(bm, o, val)	__raw_writel((val), (bm)->addr_ci + (o))
+#define bm_in(reg)		__bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val)	__bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
+#define __bm_cl_in(bm, o)	__raw_readl((bm)->addr_ce + (o))
+#define __bm_cl_out(bm, o, val) \
+	do { \
+		u32 *__tmpclout = (bm)->addr_ce + (o); \
+		__raw_writel((val), __tmpclout); \
+		dcbf(__tmpclout); \
+	} while (0)
+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg)	    __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+	__bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
+	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
+	bm_rcr_pce = 1,		/* PI index, cache-enabled */
+	bm_rcr_pvb = 2		/* valid-bit */
+};
+enum bm_rcr_cmode {		/* s/w-only */
+	bm_rcr_cci,		/* CI index, cache-inhibited */
+	bm_rcr_cce		/* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE		8
+
+struct bm_rcr {
+	struct bm_rcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	u32 busy;
+	enum bm_rcr_pmode pmode;
+	enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+	struct bm_mc_command *cr;
+	struct bm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum {
+		/* Can only be _mc_start()ed */
+		mc_idle,
+		/* Can only be _mc_commit()ed or _mc_abort()ed */
+		mc_user,
+		/* Can only be _mc_retry()ed */
+		mc_hw
+	} state;
+#endif
+};
+
+struct bm_addr {
+	void __iomem *addr_ce;	/* cache-enabled */
+	void __iomem *addr_ci;	/* cache-inhibited */
+};
+
+struct bm_portal {
+	struct bm_addr addr;
+	struct bm_rcr rcr;
+	struct bm_mc mc;
+	struct bm_portal_config config;
+} ____cacheline_aligned;
+
+/* --- RCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+	/* NB: this is odd-looking, but experiments show that it generates
+	 * fast code with essentially no branching overheads. We increment to
+	 * the next RCR pointer and handle overflow and 'vbit'. */
+	struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+	rcr->cursor = RCR_CARRYCLEAR(partial);
+	if (partial != rcr->cursor)
+		rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+		__maybe_unused enum bm_rcr_cmode cmode)
+{
+	/* This use of 'register', as well as all other occurrences, is because
+	 * it has been observed to generate much faster code with gcc than is
+	 * otherwise the case. */
+	register struct bm_rcr *rcr = &portal->rcr;
+	u32 cfg;
+	u8 pi;
+
+	rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
+	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	rcr->cursor = rcr->ring + pi;
+	rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ?  BM_RCR_VERB_VBIT : 0;
+	rcr->available = BM_RCR_SIZE - 1
+		- bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+	rcr->ithresh = bm_in(RCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+	rcr->pmode = pmode;
+	rcr->cmode = cmode;
+#endif
+	cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+	bm_out(CFG, cfg);
+	return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+	u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+	DPA_ASSERT(!rcr->busy);
+	if (pi != RCR_PTR2IDX(rcr->cursor))
+		pr_crit("losing uncommited RCR entries\n");
+	if (ci != rcr->ci)
+		pr_crit("missing existing RCR completions\n");
+	if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+		pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(!rcr->busy);
+	if (!rcr->available)
+		return NULL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 1;
+#endif
+	dcbz_64(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+					struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
+	if (rcr->available == 1)
+		return NULL;
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	dcbf_64(rcr->cursor);
+	RCR_INC(rcr);
+	rcr->available--;
+	dcbz_64(rcr->cursor);
+	return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pci);
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	RCR_INC(rcr);
+	rcr->available--;
+	hwsync();
+	bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+	bm_cl_invalidate(RCR_PI);
+	bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+	rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	RCR_INC(rcr);
+	rcr->available--;
+	lwsync();
+	bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	struct bm_rcr_entry *rcursor;
+
+	DPA_ASSERT(rcr->busy);
+	DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
+	lwsync();
+	rcursor = rcr->cursor;
+	rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+	dcbf_64(rcursor);
+	RCR_INC(rcr);
+	rcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+
+	DPA_ASSERT(rcr->cmode == bm_rcr_cci);
+	rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+
+	DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+	bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+	u8 diff, old_ci = rcr->ci;
+
+	DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+	rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+	bm_cl_invalidate(RCR_CI);
+	diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+	rcr->available += diff;
+	return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	rcr->ithresh = ithresh;
+	bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+	register struct bm_rcr *rcr = &portal->rcr;
+
+	return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.addr_ce + BM_CL_CR;
+	mc->rr = portal->addr.addr_ce + BM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+			BM_MCC_VERB_VBIT) ?  0 : 1;
+	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (mc->state != mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_user;
+#endif
+	dcbz_64(mc->cr);
+	return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+	__maybe_unused register struct bm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+	register struct bm_mc *mc = &portal->mc;
+	struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == mc_user);
+	lwsync();
+	mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+	dcbf(mc->cr);
+	dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+	register struct bm_mc *mc = &portal->mc;
+	struct bm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == mc_hw);
+	/* The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering... */
+	if (!__raw_readb(&rr->verb)) {
+		dcbit_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = mc_idle;
+#endif
+	return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
+{
+	return 0;
+}
+
+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
+{
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+					int enable)
+{
+	u32 val;
+
+	DPA_ASSERT(bpid < bman_pool_max);
+
+	/* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+	val = __bm_in(&portal->addr, SCN_REG(bpid));
+	if (enable)
+		val |= SCN_BIT(bpid);
+	else
+		val &= ~SCN_BIT(bpid);
+	__bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+	return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+					u32 val)
+{
+	__bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+	struct bm_mc_command *bm_cmd;
+	struct bm_mc_result *bm_res;
+	int aq_count = 0;
+	bool stop = false;
+
+	while (!stop) {
+		/* Acquire buffers until empty */
+		bm_cmd = bm_mc_start(p);
+		bm_cmd->acquire.bpid = bpid;
+		bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE |  1);
+		while (!(bm_res = bm_mc_result(p)))
+			cpu_relax();
+		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+			/* Pool is empty */
+			/* TBD : Should we do a few extra iterations in
+			   case some other some blocks keep buffers 'on deck',
+			   which may also be problematic */
+			stop = true;
+		} else
+			++aq_count;
+	}
+	return 0;
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_api.c b/linux/drivers/soc/fsl/qbman/bman_api.c
new file mode 100644
index 0000000..cdfcebb
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_api.c
@@ -0,0 +1,1123 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman.h"
+
+/* Compilation constants */
+#define RCR_THRESH	2	/* reread h/w CI when running out of space */
+#define IRQNAME		"BMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
+#define FSL_DPA_PORTAL_SHARE 1  /* Allow portals to be shared */
+
+struct bman_portal {
+	struct bm_portal p;
+	/* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+	struct bman_depletion *pools;
+	int thresh_set;
+	unsigned long irq_sources;
+	u32 slowpoll;	/* only used when interrupts are off */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spinlock_t sharing_lock; /* only used if is_shared */
+#ifndef __rtems__
+	int is_shared;
+	struct bman_portal *sharing_redirect;
+#endif /* __rtems__ */
+#endif
+	/* When the cpu-affine portal is activated, this is non-NULL */
+	const struct bm_portal_config *config;
+	/* 64-entry hash-table of pool objects that are tracking depletion
+	 * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
+	 * we're not fussy about cache-misses and so forth - whereas the above
+	 * members should all fit in one cacheline.
+	 * BTW, with 64 entries in the hash table and 64 buffer pools to track,
+	 * you'll never guess the hash-function ... */
+	struct bman_pool *cb[64];
+	char irqname[MAX_IRQNAME];
+	/* Track if the portal was alloced by the driver */
+	u8 alloced;
+};
+
+
+#ifdef FSL_DPA_PORTAL_SHARE
+/* For an explanation of the locking, redirection, or affine-portal logic,
+ * please consult the QMan driver for details. This is the same, only simpler
+ * (no fiddly QMan-specific bits.) */
+#ifndef __rtems__
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+		else \
+			local_irq_save(irqflags); \
+	} while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+						   irqflags); \
+		else \
+			local_irq_restore(irqflags); \
+	} while (0)
+#else /* __rtems__ */
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+    raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+    raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
+#endif /* __rtems__ */
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+#ifndef __rtems__
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+static inline struct bman_portal *get_raw_affine_portal(void)
+{
+	return &get_cpu_var(bman_affine_portal);
+}
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct bman_portal *get_affine_portal(void)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+
+#ifndef __rtems__
+	if (p->sharing_redirect)
+		return p->sharing_redirect;
+#endif /* __rtems__ */
+	return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(bman_affine_portal);
+}
+static inline struct bman_portal *get_poll_portal(void)
+{
+	return this_cpu_ptr(&bman_affine_portal);
+}
+#define put_poll_portal()
+
+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals. */
+struct bman_pool {
+	struct bman_pool_params params;
+	/* Used for hash-table admin when using depletion notifications. */
+	struct bman_portal *portal;
+	struct bman_pool *next;
+	/* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
+	struct bm_buffer *sp;
+	unsigned int sp_fill;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_t in_use;
+#endif
+};
+
+/* (De)Registration of depletion notification callbacks */
+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
+{
+	__maybe_unused unsigned long irqflags;
+
+	pool->portal = portal;
+	PORTAL_IRQ_LOCK(portal, irqflags);
+	pool->next = portal->cb[pool->params.bpid];
+	portal->cb[pool->params.bpid] = pool;
+	if (!pool->next)
+		/* First object for that bpid on this portal, enable the BSCN
+		 * mask bit. */
+		bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
+	PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+static void depletion_unlink(struct bman_pool *pool)
+{
+	struct bman_pool *it, *last = NULL;
+	struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
+	__maybe_unused unsigned long irqflags;
+
+	PORTAL_IRQ_LOCK(pool->portal, irqflags);
+	it = *base;	/* <-- gotcha, don't do this prior to the irq_save */
+	while (it != pool) {
+		last = it;
+		it = it->next;
+	}
+	if (!last)
+		*base = pool->next;
+	else
+		last->next = pool->next;
+	if (!last && !pool->next) {
+		/* Last object for that bpid on this portal, disable the BSCN
+		 * mask bit. */
+		bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
+		/* And "forget" that we last saw this pool as depleted */
+		bman_depletion_unset(&pool->portal->pools[1],
+					pool->params.bpid);
+	}
+	PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
+}
+
+/* In the case that the application's core loop calls qman_poll() and
+ * bman_poll(), we ought to balance how often we incur the overheads of the
+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
+ * constant is used when the last slow-poll detected no work to do, and the busy
+ * decrementer constant when the last slow-poll had work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+	struct bman_portal *p = ptr;
+	u32 clear = p->irq_sources;
+	u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
+
+	clear |= __poll_portal_slow(p, is);
+	bm_isr_status_clear(&p->p, clear);
+	return IRQ_HANDLED;
+}
+
+
+struct bman_portal *bman_create_portal(
+				       struct bman_portal *portal,
+				       const struct bm_portal_config *config)
+{
+	struct bm_portal *__p;
+	const struct bman_depletion *pools = &config->public_cfg.mask;
+	int ret;
+	u8 bpid = 0;
+
+	if (!portal) {
+		portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+		if (!portal)
+			return portal;
+		portal->alloced = 1;
+	} else
+		portal->alloced = 0;
+
+	__p = &portal->p;
+
+	/* prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference... */
+	__p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+	__p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+	if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
+		pr_err("RCR initialisation failed\n");
+		goto fail_rcr;
+	}
+	if (bm_mc_init(__p)) {
+		pr_err("MC initialisation failed\n");
+		goto fail_mc;
+	}
+	if (bm_isr_init(__p)) {
+		pr_err("ISR initialisation failed\n");
+		goto fail_isr;
+	}
+	portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+	if (!portal->pools)
+		goto fail_pools;
+	portal->pools[0] = *pools;
+	bman_depletion_init(portal->pools + 1);
+	while (bpid < bman_pool_max) {
+		/* Default to all BPIDs disabled, we enable as required at
+		 * run-time. */
+		bm_isr_bscn_mask(__p, bpid, 0);
+		bpid++;
+	}
+	portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	portal->rcri_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spin_lock_init(&portal->sharing_lock);
+#ifndef __rtems__
+	portal->is_shared = config->public_cfg.is_shared;
+	portal->sharing_redirect = NULL;
+#endif /* __rtems__ */
+#endif
+	memset(&portal->cb, 0, sizeof(portal->cb));
+	/* Write-to-clear any stale interrupt status bits */
+	bm_isr_disable_write(__p, 0xffffffff);
+	portal->irq_sources = 0;
+	bm_isr_enable_write(__p, portal->irq_sources);
+	bm_isr_status_clear(__p, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+	if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+			portal)) {
+		pr_err("request_irq() failed\n");
+		goto fail_irq;
+	}
+#ifndef __rtems__
+	if ((config->public_cfg.cpu != -1) &&
+			irq_can_set_affinity(config->public_cfg.irq) &&
+			irq_set_affinity(config->public_cfg.irq,
+				cpumask_of(config->public_cfg.cpu))) {
+		pr_err("irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+#endif /* __rtems__ */
+
+	/* Need RCR to be empty before continuing */
+	ret = bm_rcr_get_fill(__p);
+	if (ret) {
+		pr_err("RCR unclean\n");
+		goto fail_rcr_empty;
+	}
+	/* Success */
+	portal->config = config;
+
+	bm_isr_disable_write(__p, 0);
+	bm_isr_uninhibit(__p);
+	return portal;
+fail_rcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+	free_irq(config->public_cfg.irq, portal);
+fail_irq:
+	kfree(portal->pools);
+fail_pools:
+	bm_isr_finish(__p);
+fail_isr:
+	bm_mc_finish(__p);
+fail_mc:
+	bm_rcr_finish(__p);
+fail_rcr:
+	if (portal->alloced)
+		kfree(portal);
+	return NULL;
+}
+
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config)
+{
+	struct bman_portal *portal;
+
+	portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
+	portal = bman_create_portal(portal, config);
+#ifndef __rtems__
+	if (portal) {
+		spin_lock(&affine_mask_lock);
+		cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+		spin_unlock(&affine_mask_lock);
+	}
+#endif /* __rtems__ */
+	return portal;
+}
+
+
+#ifndef __rtems__
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+								int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+	struct bman_portal *p = &per_cpu(bman_affine_portal, cpu);
+
+	BUG_ON(p->config);
+	BUG_ON(p->is_shared);
+	BUG_ON(!redirect->config->public_cfg.is_shared);
+	p->irq_sources = 0;
+	p->sharing_redirect = redirect;
+	put_affine_portal();
+	return p;
+#else
+	BUG();
+	return NULL;
+#endif
+}
+#endif /* __rtems__ */
+
+void bman_destroy_portal(struct bman_portal *bm)
+{
+	const struct bm_portal_config *pcfg = bm->config;
+
+	bm_rcr_cce_update(&bm->p);
+	bm_rcr_cce_update(&bm->p);
+
+	free_irq(pcfg->public_cfg.irq, bm);
+
+	kfree(bm->pools);
+	bm_isr_finish(&bm->p);
+	bm_mc_finish(&bm->p);
+	bm_rcr_finish(&bm->p);
+	bm->config = NULL;
+	if (bm->alloced)
+		kfree(bm);
+}
+
+const struct bm_portal_config *bman_destroy_affine_portal(void)
+{
+	struct bman_portal *bm = get_raw_affine_portal();
+	const struct bm_portal_config *pcfg;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (bm->sharing_redirect) {
+		bm->sharing_redirect = NULL;
+		put_affine_portal();
+		return NULL;
+	}
+	bm->is_shared = 0;
+#endif /* __rtems__ */
+#endif
+	pcfg = bm->config;
+	bman_destroy_portal(bm);
+#ifndef __rtems__
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+	put_affine_portal();
+	return pcfg;
+}
+
+/* When release logic waits on available RCR space, we need a global waitqueue
+ * in the case of "affine" use (as the waits wake on different cpus which means
+ * different portals - so we can't wait on any per-portal waitqueue). */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
+{
+	struct bman_depletion tmp;
+	u32 ret = is;
+
+	/* There is a gotcha to be aware of. If we do the query before clearing
+	 * the status register, we may miss state changes that occur between the
+	 * two. If we write to clear the status register before the query, the
+	 * cache-enabled query command may overtake the status register write
+	 * unless we use a heavyweight sync (which we don't want). Instead, we
+	 * write-to-clear the status register then *read it back* before doing
+	 * the query, hence the odd while loop with the 'is' accumulation. */
+	if (is & BM_PIRQ_BSCN) {
+		struct bm_mc_result *mcr;
+		__maybe_unused unsigned long irqflags;
+		unsigned int i, j;
+		u32 __is;
+
+		bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+		while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
+			is |= __is;
+			bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+		}
+		is &= ~BM_PIRQ_BSCN;
+		PORTAL_IRQ_LOCK(p, irqflags);
+		bm_mc_start(&p->p);
+		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+		while (!(mcr = bm_mc_result(&p->p)))
+			cpu_relax();
+		tmp = mcr->query.ds.state;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		for (i = 0; i < 2; i++) {
+			int idx = i * 32;
+			/* tmp is a mask of currently-depleted pools.
+			 * pools[0] is mask of those we care about.
+			 * pools[1] is our previous view (we only want to
+			 * be told about changes). */
+			tmp.__state[i] &= p->pools[0].__state[i];
+			if (tmp.__state[i] == p->pools[1].__state[i])
+				/* fast-path, nothing to see, move along */
+				continue;
+			for (j = 0; j <= 31; j++, idx++) {
+				struct bman_pool *pool = p->cb[idx];
+				int b4 = bman_depletion_get(&p->pools[1], idx);
+				int af = bman_depletion_get(&tmp, idx);
+
+				if (b4 == af)
+					continue;
+				while (pool) {
+					pool->params.cb(p, pool,
+						pool->params.cb_ctx, af);
+					pool = pool->next;
+				}
+			}
+		}
+		p->pools[1] = tmp;
+	}
+
+	if (is & BM_PIRQ_RCRI) {
+		__maybe_unused unsigned long irqflags;
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		bm_rcr_cce_update(&p->p);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+		/* If waiting for sync, we only cancel the interrupt threshold
+		 * when the ring utilisation hits zero. */
+		if (p->rcri_owned) {
+			if (!bm_rcr_get_fill(&p->p)) {
+				p->rcri_owned = NULL;
+				bm_rcr_set_ithresh(&p->p, 0);
+			}
+		} else
+#endif
+		bm_rcr_set_ithresh(&p->p, 0);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+		bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
+		is &= ~BM_PIRQ_RCRI;
+	}
+
+	/* There should be no status register bits left undefined */
+	DPA_ASSERT(!is);
+	return ret;
+}
+
+const struct bman_portal_config *bman_get_portal_config(void)
+{
+	struct bman_portal *p = get_affine_portal();
+	const struct bman_portal_config *ret = &p->config->public_cfg;
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_get_portal_config);
+
+u32 bman_irqsource_get(void)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_get);
+
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
+{
+	__maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (p->sharing_redirect)
+		return -EINVAL;
+#endif /* __rtems__ */
+#endif
+	PORTAL_IRQ_LOCK(p, irqflags);
+	set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+	bm_isr_enable_write(&p->p, p->irq_sources);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	return 0;
+}
+EXPORT_SYMBOL(bman_p_irqsource_add);
+
+int bman_irqsource_add(__maybe_unused u32 bits)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	int ret = bman_p_irqsource_add(p, bits);
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_add);
+
+int bman_irqsource_remove(u32 bits)
+{
+	struct bman_portal *p = get_raw_affine_portal();
+	__maybe_unused unsigned long irqflags;
+	u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (p->sharing_redirect) {
+		put_affine_portal();
+		return -EINVAL;
+	}
+#endif /* __rtems__ */
+#endif
+	/* Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert. */
+	PORTAL_IRQ_LOCK(p, irqflags);
+	bits &= BM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	bm_isr_enable_write(&p->p, p->irq_sources);
+	ier = bm_isr_enable_read(&p->p);
+	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering. */
+	bm_isr_status_clear(&p->p, ~ier);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(bman_irqsource_remove);
+
+#ifndef __rtems__
+const cpumask_t *bman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(bman_affine_cpus);
+#endif /* __rtems__ */
+
+u32 bman_poll_slow(void)
+{
+	struct bman_portal *p = get_poll_portal();
+	u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (unlikely(p->sharing_redirect))
+		ret = (u32)-1;
+	else
+#endif /* __rtems__ */
+#endif
+	{
+		u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+
+		ret = __poll_portal_slow(p, is);
+		bm_isr_status_clear(&p->p, ret);
+	}
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_poll_slow);
+
+/* Legacy wrapper */
+void bman_poll(void)
+{
+	struct bman_portal *p = get_poll_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (unlikely(p->sharing_redirect))
+		goto done;
+#endif /* __rtems__ */
+#endif
+	if (!(p->slowpoll--)) {
+		u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+		u32 active = __poll_portal_slow(p, is);
+
+		if (active)
+			p->slowpoll = SLOW_POLL_BUSY;
+		else
+			p->slowpoll = SLOW_POLL_IDLE;
+	}
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+done:
+#endif /* __rtems__ */
+#endif
+	put_poll_portal();
+}
+EXPORT_SYMBOL(bman_poll);
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+	struct bman_pool *pool = NULL;
+	u32 bpid;
+
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+		if (bman_alloc_bpid(&bpid))
+			return NULL;
+	} else {
+		if (params->bpid >= bman_pool_max)
+			return NULL;
+		bpid = params->bpid;
+	}
+#ifdef CONFIG_FSL_BMAN
+	if (params->flags & BMAN_POOL_FLAG_THRESH) {
+		if (bm_pool_set(bpid, params->thresholds))
+			goto err;
+	}
+#else
+	if (params->flags & BMAN_POOL_FLAG_THRESH)
+		goto err;
+#endif
+	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		goto err;
+	pool->sp = NULL;
+	pool->sp_fill = 0;
+	pool->params = *params;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_set(&pool->in_use, 1);
+#endif
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		pool->params.bpid = bpid;
+	if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
+		pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
+					GFP_KERNEL);
+		if (!pool->sp)
+			goto err;
+	}
+	if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
+		struct bman_portal *p = get_affine_portal();
+
+		if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
+			pr_err("Depletion events disabled for bpid %d\n", bpid);
+			goto err;
+		}
+		depletion_link(p, pool);
+		put_affine_portal();
+	}
+	return pool;
+err:
+#ifdef CONFIG_FSL_BMAN
+	if (params->flags & BMAN_POOL_FLAG_THRESH)
+		bm_pool_set(bpid, zero_thresholds);
+#endif
+	if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		bman_release_bpid(bpid);
+	if (pool) {
+		kfree(pool->sp);
+		kfree(pool);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+#ifdef CONFIG_FSL_BMAN
+	if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+		bm_pool_set(pool->params.bpid, zero_thresholds);
+#endif
+	if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
+		depletion_unlink(pool);
+	if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
+		if (pool->sp_fill)
+			pr_err("Stockpile not flushed, has %u in bpid %u.\n",
+				pool->sp_fill, pool->params.bpid);
+		kfree(pool->sp);
+		pool->sp = NULL;
+		pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
+	}
+	if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+		bman_release_bpid(pool->params.bpid);
+	kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+	return &pool->params;
+}
+EXPORT_SYMBOL(bman_get_params);
+
+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
+{
+	if (avail)
+		bm_rcr_cce_prefetch(&p->p);
+	else
+		bm_rcr_cce_update(&p->p);
+}
+
+int bman_rcr_is_empty(void)
+{
+	__maybe_unused unsigned long irqflags;
+	struct bman_portal *p = get_affine_portal();
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	update_rcr_ci(p, 0);
+	avail = bm_rcr_get_fill(&p->p);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return avail == 0;
+}
+EXPORT_SYMBOL(bman_rcr_is_empty);
+
+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
+#ifdef FSL_DPA_CAN_WAIT
+					__maybe_unused struct bman_pool *pool,
+#endif
+					__maybe_unused unsigned long *irqflags,
+					__maybe_unused u32 flags)
+{
+	struct bm_rcr_entry *r;
+	u8 avail;
+
+	*p = get_affine_portal();
+	PORTAL_IRQ_LOCK(*p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+		if ((*p)->rcri_owned) {
+			PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+			put_affine_portal();
+			return NULL;
+		}
+		(*p)->rcri_owned = pool;
+	}
+#endif
+	avail = bm_rcr_get_avail(&(*p)->p);
+	if (avail < 2)
+		update_rcr_ci(*p, avail);
+	r = bm_rcr_start(&(*p)->p);
+	if (unlikely(!r)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+		if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+				(flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
+			(*p)->rcri_owned = NULL;
+#endif
+		PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+		put_affine_portal();
+	}
+	return r;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
+					struct bman_pool *pool,
+					__maybe_unused unsigned long *irqflags,
+					u32 flags)
+{
+	struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
+
+	if (!rcr)
+		bm_rcr_set_ithresh(&(*p)->p, 1);
+	return rcr;
+}
+
+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
+					struct bman_pool *pool,
+					__maybe_unused unsigned long *irqflags,
+					u32 flags)
+{
+	struct bm_rcr_entry *rcr;
+#ifndef FSL_DPA_CAN_WAIT_SYNC
+	pool = NULL;
+#endif
+#ifndef __rtems__
+	if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(rcr = __wait_rel_start(p, pool, irqflags, flags)));
+	else
+#endif /* __rtems__ */
+		wait_event(affine_queue,
+			(rcr = __wait_rel_start(p, pool, irqflags, flags)));
+	return rcr;
+}
+#endif
+
+/* to facilitate better copying of bufs into the ring without either (a) copying
+ * noise into the first byte (prematurely triggering the command), nor (b) being
+ * very inefficient by copying small fields using read-modify-write */
+struct overlay_bm_buffer {
+	u32 first;
+	u32 second;
+};
+
+static inline int __bman_release(struct bman_pool *pool,
+			const struct bm_buffer *bufs, u8 num, u32 flags)
+{
+	struct bman_portal *p;
+	struct bm_rcr_entry *r;
+	struct overlay_bm_buffer *o_dest;
+	struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0];
+	__maybe_unused unsigned long irqflags;
+	u32 i = num - 1;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & BMAN_RELEASE_FLAG_WAIT)
+		r = wait_rel_start(&p, pool, &irqflags, flags);
+	else
+		r = try_rel_start(&p, pool, &irqflags, flags);
+#else
+	r = try_rel_start(&p, &irqflags, flags);
+#endif
+	if (!r)
+		return -EBUSY;
+	/* We can copy all but the first entry, as this can trigger badness
+	 * with the valid-bit. Use the overlay to mask the verb byte. */
+	o_dest = (struct overlay_bm_buffer *)&r->bufs[0];
+	o_dest->first = (o_src->first & 0x0000ffff) |
+		(((u32)pool->params.bpid << 16) & 0x00ff0000);
+	o_dest->second = o_src->second;
+	if (i)
+		copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+			(num & BM_RCR_VERB_BUFCOUNT_MASK));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	/* if we wish to sync we need to set the threshold after h/w sees the
+	 * new ring entry. As we're mixing cache-enabled and cache-inhibited
+	 * accesses, this requires a heavy-weight sync. */
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+		hwsync();
+		bm_rcr_set_ithresh(&p->p, 1);
+	}
+#endif
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+			(flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->rcri_owned != pool));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->rcri_owned != pool));
+	}
+#endif
+	return 0;
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+			u32 flags)
+{
+	int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!num || (num > 8))
+		return -EINVAL;
+	if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+		return -EINVAL;
+#endif
+	/* Without stockpile, this API is a pass-through to the h/w operation */
+	if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+		return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!atomic_dec_and_test(&pool->in_use)) {
+		pr_crit("Parallel attempts to enter bman_released() detected.");
+		panic("only one instance of bman_released/acquired allowed");
+	}
+#endif
+	/* This needs some explanation. Adding the given buffers may take the
+	 * stockpile over the threshold, but in fact the stockpile may already
+	 * *be* over the threshold if a previous release-to-hw attempt had
+	 * failed. So we have 3 cases to cover;
+	 *   1. we add to the stockpile and don't hit the threshold,
+	 *   2. we add to the stockpile, hit the threshold and release-to-hw,
+	 *   3. we have to release-to-hw before adding to the stockpile
+	 *	(not enough room in the stockpile for case 2).
+	 * Our constraints on thresholds guarantee that in case 3, there must be
+	 * at least 8 bufs already in the stockpile, so all release-to-hw ops
+	 * are for 8 bufs. Despite all this, the API must indicate whether the
+	 * given buffers were taken off the caller's hands, irrespective of
+	 * whether a release-to-hw was attempted. */
+	while (num) {
+		/* Add buffers to stockpile if they fit */
+		if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) {
+			copy_words(pool->sp + pool->sp_fill, bufs,
+				sizeof(struct bm_buffer) * num);
+			pool->sp_fill += num;
+			num = 0; /* --> will return success no matter what */
+		}
+		/* Do hw op if hitting the high-water threshold */
+		if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) {
+			ret = __bman_release(pool,
+				pool->sp + (pool->sp_fill - 8), 8, flags);
+			if (ret) {
+				ret = (num ? ret : 0);
+				goto release_done;
+			}
+			pool->sp_fill -= 8;
+		}
+	}
+release_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_inc(&pool->in_use);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(bman_release);
+
+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
+					u8 num)
+{
+	struct bman_portal *p = get_affine_portal();
+	struct bm_mc_command *mcc;
+	struct bm_mc_result *mcr;
+	__maybe_unused unsigned long irqflags;
+	int ret;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = bm_mc_start(&p->p);
+	mcc->acquire.bpid = pool->params.bpid;
+	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+			(num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+	while (!(mcr = bm_mc_result(&p->p)))
+		cpu_relax();
+	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+	if (bufs)
+		copy_words(&bufs[0], &mcr->acquire.bufs[0],
+				num * sizeof(bufs[0]));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (ret != num)
+		ret = -ENOMEM;
+	return ret;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+			u32 flags)
+{
+	int ret = 0;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!num || (num > 8))
+		return -EINVAL;
+	if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+		return -EINVAL;
+#endif
+	/* Without stockpile, this API is a pass-through to the h/w operation */
+	if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+		return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (!atomic_dec_and_test(&pool->in_use)) {
+		pr_crit("Parallel attempts to enter bman_acquire() detected.");
+		panic("only one instance of bman_released/acquired allowed");
+	}
+#endif
+	/* Only need a h/w op if we'll hit the low-water thresh */
+	if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) &&
+			(pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) {
+		/* refill stockpile with max amount, but if max amount
+		 * isn't available, try amount the user wants */
+		int bufcount = 8;
+
+		ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount);
+		if (ret < 0 && bufcount != num) {
+			bufcount = num;
+			/* Maybe buffer pool has less than 8 */
+			ret = __bman_acquire(pool, pool->sp + pool->sp_fill,
+						bufcount);
+		}
+		if (ret < 0)
+			goto hw_starved;
+		DPA_ASSERT(ret == bufcount);
+		pool->sp_fill += bufcount;
+	} else {
+hw_starved:
+		if (pool->sp_fill < num) {
+			ret = -ENOMEM;
+			goto acquire_done;
+		}
+	}
+	copy_words(bufs, pool->sp + (pool->sp_fill - num),
+		sizeof(struct bm_buffer) * num);
+	pool->sp_fill -= num;
+	ret = num;
+acquire_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+	atomic_inc(&pool->in_use);
+#endif
+	return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
+{
+	u8 num;
+	int ret;
+
+	while (pool->sp_fill) {
+		num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
+		ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
+				     num, flags);
+		if (ret)
+			return ret;
+		pool->sp_fill -= num;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(bman_flush_stockpile);
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+	struct bman_portal *p = get_affine_portal();
+	struct bm_mc_result *mcr;
+	__maybe_unused unsigned long irqflags;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	bm_mc_start(&p->p);
+	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+	while (!(mcr = bm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
+	*state = mcr->query;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(bman_query_pools);
+
+#ifdef CONFIG_FSL_BMAN
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+	return bm_pool_free_buffers(pool->params.bpid);
+}
+EXPORT_SYMBOL(bman_query_free_buffers);
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+	u32 bpid;
+
+	bpid = bman_get_params(pool)->bpid;
+
+	return bm_pool_set(bpid, thresholds);
+}
+EXPORT_SYMBOL(bman_update_pool_thresholds);
+#endif
+
+int bman_shutdown_pool(u32 bpid)
+{
+	struct bman_portal *p = get_affine_portal();
+	__maybe_unused unsigned long irqflags;
+	int ret;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	ret = bm_shutdown_pool(&p->p, bpid);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(bman_shutdown_pool);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+#ifndef __rtems__
+	return portal->sharing_redirect ? NULL : portal->config;
+#else /* __rtems__ */
+	return portal->config;
+#endif /* __rtems__ */
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_portal.c b/linux/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 0000000..f9fd022
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,399 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+/*
+ * Global variables of the max portal/pool number this BMan version supported
+ */
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+u16 bman_pool_max;
+EXPORT_SYMBOL(bman_pool_max);
+
+/* After initialising cpus that own shared portal configs, we cache the
+ * resulting portals (ie. not just the configs) in this array. Then we
+ * initialise slave cpus that don't have their own portals, redirecting them to
+ * portals from this cache in a round-robin assignment. */
+static struct bman_portal *shared_portals[NR_CPUS] __initdata;
+static int num_shared_portals __initdata;
+static int shared_portals_idx __initdata;
+
+static LIST_HEAD(unused_pcfgs);
+static void *affine_bportals[NR_CPUS];
+
+#ifndef __rtems__
+static const int flags[] = {0, _PAGE_GUARDED | _PAGE_NO_CACHE};
+#else /* __rtems__ */
+static const int flags[] = {0, 0};
+#endif /* __rtems__ */
+
+static struct bm_portal_config * __init get_pcfg(struct list_head *list)
+{
+	struct bm_portal_config *pcfg;
+
+	if (list_empty(list))
+		return NULL;
+	pcfg = list_entry(list->prev, struct bm_portal_config, list);
+	list_del(&pcfg->list);
+
+	return pcfg;
+}
+
+static struct bman_portal * __init init_pcfg(struct bm_portal_config *pcfg)
+{
+	struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+	if (p) {
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+		bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+#endif
+		pr_info("Portal %sinitialised, cpu %d\n",
+			pcfg->public_cfg.is_shared ? "(shared) " : "",
+			pcfg->public_cfg.cpu);
+		affine_bportals[pcfg->public_cfg.cpu] = p;
+	} else
+		pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
+
+	return p;
+}
+
+static void __init init_slave(int cpu)
+{
+	struct bman_portal *p;
+
+	p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+	if (!p)
+		pr_err("Slave portal failure on cpu %d\n", cpu);
+	else
+		pr_info("Portal %s initialised, cpu %d\n", "(slave) ", cpu);
+	if (shared_portals_idx >= num_shared_portals)
+		shared_portals_idx = 0;
+	affine_bportals[cpu] = p;
+}
+
+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
+ * parsing is in dpaa_sys.h. The syntax is a comma-separated list of indexes
+ * and/or ranges of indexes, with each being optionally prefixed by "s" to
+ * explicitly mark it or them for sharing.
+ *    Eg;
+ *	  bportals=s0,1-3,s4
+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
+ * portals, and any remaining cpus share the portals that are assigned to cpus 0
+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
+ * 0's portal.) */
+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
+static struct cpumask want_shared __initdata; /* cpus requested with "s" */
+
+static int __init parse_bportals(char *str)
+{
+	return parse_portals_bootarg(str, &want_shared, &want_unshared,
+				     "bportals");
+}
+__setup("bportals=", parse_bportals);
+
+static void __cold bman_offline_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (p) {
+		pcfg = bman_get_bm_portal_config(p);
+		if (pcfg)
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+	}
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cold bman_online_cpu(unsigned int cpu)
+{
+	struct bman_portal *p = (struct bman_portal *)affine_bportals[cpu];
+	const struct bm_portal_config *pcfg;
+
+	if (p) {
+		pcfg = bman_get_bm_portal_config(p);
+		if (pcfg)
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+	}
+}
+
+static int __cold bman_hotplug_cpu_callback(struct notifier_block *nfb,
+					    unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		bman_online_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		bman_offline_cpu(cpu);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+	.notifier_call = bman_hotplug_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int __cold bman_portal_probe(struct platform_device *of_dev)
+{
+	struct device *dev = &of_dev->dev;
+	struct device_node *node = dev->of_node;
+	struct bm_portal_config *pcfg;
+	int i, irq, ret;
+
+	if (!of_device_is_available(node))
+		return -ENODEV;
+
+	if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
+		of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
+		bman_ip_rev = BMAN_REV10;
+		bman_pool_max = 64;
+	} else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
+		of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
+		bman_ip_rev = BMAN_REV20;
+		bman_pool_max = 8;
+	} else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.1") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.2") ||
+		   of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
+		bman_ip_rev = BMAN_REV21;
+		bman_pool_max = 64;
+	}
+
+	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg) {
+		dev_err(dev, "Can't allocate portal config\n");
+		return -ENOMEM;
+	}
+
+	for (i = DPA_PORTAL_CE; i <= DPA_PORTAL_CI; i++) {
+		ret = of_address_to_resource(node, i, pcfg->addr_phys + i);
+		if (ret < 0) {
+			dev_err(dev, "Can't get %s property 'reg::%d'\n",
+				node->full_name, i);
+			return ret;
+		}
+		ret = devm_request_resource(dev, &iomem_resource,
+					    pcfg->addr_phys + i);
+		if (ret < 0)
+			return ret;
+		pcfg->addr_virt[i] = devm_ioremap_prot(dev,
+					pcfg->addr_phys[i].start,
+					resource_size(pcfg->addr_phys + i),
+					flags[i]);
+		if (!pcfg->addr_virt[i])
+			return -ENXIO;
+	}
+
+	pcfg->public_cfg.cpu = -1;
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		dev_err(dev, "Can't get %s property 'interrupts'\n",
+			node->full_name);
+		return -ENXIO;
+	}
+	pcfg->public_cfg.irq = irq;
+
+	bman_depletion_fill(&pcfg->public_cfg.mask);
+
+	list_add_tail(&pcfg->list, &unused_pcfgs);
+
+	return 0;
+};
+
+static int __cold bman_portal_remove(struct platform_device *of_dev)
+{
+	return 0;
+};
+
+static const struct of_device_id bman_portal_ids[] = {
+	{
+		.compatible = "fsl,bman-portal",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = bman_portal_ids,
+	},
+	.probe = bman_portal_probe,
+	.remove = bman_portal_remove,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+	int _errno;
+	struct cpumask slave_cpus;
+	struct cpumask unshared_cpus = *cpu_none_mask;
+	struct cpumask shared_cpus = *cpu_none_mask;
+	LIST_HEAD(unshared_pcfgs);
+	LIST_HEAD(shared_pcfgs);
+	struct bm_portal_config *pcfg;
+	struct bman_portal *p;
+	int cpu;
+	struct cpumask offline_cpus;
+
+	_errno = platform_driver_register(drv);
+	if (_errno < 0)
+		return _errno;
+
+/* Initialise the BMan driver. The meat of this function deals with portals. The
+ * following describes the flow of portal-handling, the code "steps" refer to
+ * this description;
+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
+ *    ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
+ *    bound).
+ * 2. The "want_shared" and "want_unshared" lists (as filled by the
+ *    "bportals=[...]" bootarg) are processed, allocating portals and assigning
+ *    them to cpus, placing them in the relevant list and setting ::cpu as
+ *    appropriate. If no "bportals" bootarg was present, the defaut is to try to
+ *    assign portals to all online cpus at the time of driver initialisation.
+ *    Any failure to allocate portals (when parsing the "want" lists or when
+ *    using default behaviour) will be silently tolerated (the "fixup" logic in
+ *    step 3 will determine what happens in this case).
+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
+ *    sharing and sharing is required (because not all cpus have been assigned
+ *    portals), then one portal will marked for sharing. Conversely if no
+ *    sharing is required, any portals marked for sharing will not be shared. It
+ *    may be that sharing occurs when it wasn't expected, if portal allocation
+ *    failed to honour all the requested assignments (including the default
+ *    assignments if no bootarg is present).
+ * 4. Unshared portals are initialised on their respective cpus.
+ * 5. Shared portals are initialised on their respective cpus.
+ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
+ *    which are selected in a round-robin fashion.
+ */
+	/* Step 2. */
+	for_each_possible_cpu(cpu) {
+		if (cpumask_test_cpu(cpu, &want_shared)) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+			cpumask_set_cpu(cpu, &shared_cpus);
+		}
+		if (cpumask_test_cpu(cpu, &want_unshared)) {
+			if (cpumask_test_cpu(cpu, &shared_cpus))
+				continue;
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+		/* Default, give an unshared portal to each online cpu */
+		for_each_possible_cpu(cpu) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	/* Step 3. */
+	cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+	cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+	if (cpumask_empty(&slave_cpus)) {
+		/* No sharing required */
+		if (!list_empty(&shared_pcfgs)) {
+			/* Migrate "shared" to "unshared" */
+			cpumask_or(&unshared_cpus, &unshared_cpus,
+				   &shared_cpus);
+			cpumask_clear(&shared_cpus);
+			list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+			INIT_LIST_HEAD(&shared_pcfgs);
+		}
+	} else {
+		/* Sharing required */
+		if (list_empty(&shared_pcfgs)) {
+			/* Migrate one "unshared" to "shared" */
+			pcfg = get_pcfg(&unshared_pcfgs);
+			if (!pcfg) {
+				pr_crit("No portals available!\n");
+				return 0;
+			}
+			cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+			cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+		}
+	}
+	/* Step 4. */
+	list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 0;
+		p = init_pcfg(pcfg);
+	}
+	/* Step 5. */
+	list_for_each_entry(pcfg, &shared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 1;
+		p = init_pcfg(pcfg);
+		if (p)
+			shared_portals[num_shared_portals++] = p;
+	}
+	/* Step 6. */
+	if (!cpumask_empty(&slave_cpus))
+		for_each_cpu(cpu, &slave_cpus)
+			init_slave(cpu);
+	pr_info("Portals initialised\n");
+	cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+	for_each_cpu(cpu, &offline_cpus)
+		bman_offline_cpu(cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+#endif
+
+	bman_seed_bpid_range(0, bman_pool_max);
+
+	return 0;
+}
+
+module_driver(bman_portal_driver,
+	      bman_portal_driver_register, platform_driver_unregister);
diff --git a/linux/drivers/soc/fsl/qbman/bman_priv.h b/linux/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 0000000..e87f17a
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,136 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+	bm_isr_status = 0,
+	bm_isr_enable = 1,
+	bm_isr_disable = 2,
+	bm_isr_inhibit = 3
+};
+
+#ifdef CONFIG_FSL_BMAN
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to BMan CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree). */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+#define BM_POOL_THRESH_SW_ENTER 0
+#define BM_POOL_THRESH_SW_EXIT	1
+#define BM_POOL_THRESH_HW_ENTER 2
+#define BM_POOL_THRESH_HW_EXIT	3
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+#endif /* CONFIG_FSL_BMAN */
+
+#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE)
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev;	/* 0 if uninitialised, otherwise BMAN_REVx */
+
+struct bm_portal_config {
+	/* Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited. */
+	__iomem void *addr_virt[2];
+#ifndef __rtems__
+	struct resource addr_phys[2];
+	/* Allow these to be joined in lists */
+	struct list_head list;
+#endif /* __rtems__ */
+	/* User-visible portal configuration settings */
+	struct bman_portal_config public_cfg;
+};
+
+/* Hooks from bman_driver.c in to bman_high.c */
+struct bman_portal *bman_create_portal(
+				       struct bman_portal *portal,
+				       const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_portal(
+			const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+								int cpu);
+void bman_destroy_portal(struct bman_portal *bm);
+
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
+ * might fail (if the buffer pool is depleted). So this value provides some
+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
+ * are requested at once or if h/w has been tested a couple of times without
+ * luck. The _HIGH value: when bman_release() is called and the stockpile
+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
+ * the release ring is full). So this value provides some "stagger" so that
+ * ring-access is retried a couple of times prior to the API returning a
+ * failure. The following *must* be true;
+ *   BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
+ *     (to avoid thrashing)
+ *   BMAN_STOCKPILE_SZ >= 16
+ *     (as the release logic expects to either send 8 buffers to hw prior to
+ *     adding the given buffers to the stockpile or add the buffers to the
+ *     stockpile before sending 8 to hw, as the API must be an all-or-nothing
+ *     success/fail.)
+ */
+#define BMAN_STOCKPILE_SZ   16u /* number of bufs in per-pool cache */
+#define BMAN_STOCKPILE_LOW  2u	/* when fill is <= this, acquire from hw */
+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
+
+/*************************************************/
+/*   BMan s/w corenet portal, low-level i/face	 */
+/*************************************************/
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE	(BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define bm_isr_status_read(bm)		__bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m)	__bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm)		__bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v)	__bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm)		__bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v)	__bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm)		__bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm)		__bm_isr_write(bm, bm_isr_inhibit, 0)
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
+#endif	/* CONFIG_FSL_BMAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.c b/linux/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 0000000..154b737
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,60 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+	int loop = 1;
+
+	while (loop--)
+		bman_test_api();
+#endif
+#ifdef CONFIG_FSL_BMAN_TEST_THRESH
+	bman_test_thresh();
+#endif
+	return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test.h b/linux/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 0000000..9c51c38
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+void bman_test_api(void);
+void bman_test_thresh(void);
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_api.c b/linux/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 0000000..5585bdf
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,188 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define PORTAL_OPAQUE	((void *)0xf00dbeef)
+#define POOL_OPAQUE	((void *)0xdeadabba)
+#define NUM_BUFS	93
+#define LOOPS		3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+/***************/
+/* global vars */
+/***************/
+
+static struct bman_pool *pool;
+static int depleted;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+/* Predeclare the callback so we can instantiate pool parameters */
+static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
+
+/**********************/
+/* internal functions */
+/**********************/
+
+static void bufs_init(void)
+{
+	int i;
+
+	for (i = 0; i < NUM_BUFS; i++)
+		bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+	bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+	if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
+
+		/* On SoCs with BMan revison 2.0, BMan only respects the 40
+		 * LS-bits of buffer addresses, masking off the upper 8-bits on
+		 * release commands. The API provides for 48-bit addresses
+		 * because some SoCs support all 48-bits. When generating
+		 * garbage addresses for testing, we either need to zero the
+		 * upper 8-bits when releasing to BMan (otherwise we'll be
+		 * disappointed when the buffers we acquire back from BMan
+		 * don't match), or we need to mask the upper 8-bits off when
+		 * comparing. We do the latter.
+		 */
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+				< (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return -1;
+		if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+				> (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+			return 1;
+	} else {
+		if (bm_buffer_get64(a) < bm_buffer_get64(b))
+			return -1;
+		if (bm_buffer_get64(a) > bm_buffer_get64(b))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void bufs_confirm(void)
+{
+	int i, j;
+
+	for (i = 0; i < NUM_BUFS; i++) {
+		int matches = 0;
+
+		for (j = 0; j < NUM_BUFS; j++)
+			if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+				matches++;
+		BUG_ON(matches != 1);
+	}
+}
+
+/********/
+/* test */
+/********/
+
+static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
+			void *pool_ctx, int __depleted)
+{
+	BUG_ON(__pool != pool);
+	BUG_ON(pool_ctx != POOL_OPAQUE);
+	depleted = __depleted;
+}
+
+void bman_test_api(void)
+{
+	struct bman_pool_params pparams = {
+		.flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
+		.cb = depletion_cb,
+		.cb_ctx = POOL_OPAQUE,
+	};
+	int i, loops = LOOPS;
+
+	bufs_init();
+
+	pr_info("	--- Starting high-level test ---\n");
+
+	pool = bman_new_pool(&pparams);
+	BUG_ON(!pool);
+
+	/*******************/
+	/* Release buffers */
+	/*******************/
+do_loop:
+	i = 0;
+	while (i < NUM_BUFS) {
+		u32 flags = BMAN_RELEASE_FLAG_WAIT;
+		int num = 8;
+
+		if ((i + num) > NUM_BUFS)
+			num = NUM_BUFS - i;
+		if ((i + num) == NUM_BUFS)
+			flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
+		if (bman_release(pool, bufs_in + i, num, flags))
+			panic("bman_release() failed\n");
+		i += num;
+	}
+
+	/*******************/
+	/* Acquire buffers */
+	/*******************/
+	while (i > 0) {
+		int tmp, num = 8;
+
+		if (num > i)
+			num = i;
+		tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
+		BUG_ON(tmp != num);
+		i -= num;
+	}
+	i = bman_acquire(pool, NULL, 1, 0);
+	BUG_ON(i > 0);
+
+	bufs_confirm();
+
+	if (--loops)
+		goto do_loop;
+
+	/************/
+	/* Clean up */
+	/************/
+	bman_free_pool(pool);
+	pr_info("	--- Finished high-level test ---\n");
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_test_thresh.c b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
new file mode 100644
index 0000000..c0f045b
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_test_thresh.c
@@ -0,0 +1,216 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+/* Test constants */
+#define TEST_NUMBUFS	129728
+#define TEST_EXIT	129536
+#define TEST_ENTRY	129024
+
+struct affine_test_data {
+	struct task_struct *t;
+	int cpu;
+#ifndef __rtems__
+	int expect_affinity;
+#endif /* __rtems__ */
+	int drain;
+	int num_enter;
+	int num_exit;
+	struct list_head node;
+	struct completion wakethread;
+	struct completion wakeparent;
+};
+
+static void cb_depletion(struct bman_portal *portal,
+			struct bman_pool *pool,
+			void *opaque,
+			int depleted)
+{
+	struct affine_test_data *data = opaque;
+	int c = smp_processor_id();
+
+	pr_info("%s(): bpid=%d, depleted=%d, cpu=%d, original=%d\n", __func__,
+		bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
+	/* We should be executing on the CPU of the thread that owns the pool if
+	 * and that CPU has an affine portal (ie. it isn't slaved). */
+#ifndef __rtems__
+	BUG_ON((c != data->cpu) && data->expect_affinity);
+	BUG_ON((c == data->cpu) && !data->expect_affinity);
+#endif /* __rtems__ */
+	if (depleted)
+		data->num_enter++;
+	else
+		data->num_exit++;
+}
+
+/* Params used to set up a pool, this also dynamically allocates a BPID */
+static const struct bman_pool_params params_nocb = {
+	.flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
+	.thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
+};
+
+/* Params used to set up each cpu's pool with callbacks enabled */
+static struct bman_pool_params params_cb = {
+	.bpid = 0, /* will be replaced to match pool_nocb */
+	.flags = BMAN_POOL_FLAG_DEPLETION,
+	.cb = cb_depletion
+};
+
+static struct bman_pool *pool_nocb;
+static LIST_HEAD(threads);
+
+static int affine_test(void *__data)
+{
+	struct bman_pool *pool;
+	struct affine_test_data *data = __data;
+	struct bman_pool_params my_params = params_cb;
+
+	pr_info("Thread %d: starting\n", data->cpu);
+	/* create the pool */
+	my_params.cb_ctx = data;
+	pool = bman_new_pool(&my_params);
+	BUG_ON(!pool);
+	complete(&data->wakeparent);
+	wait_for_completion(&data->wakethread);
+	init_completion(&data->wakethread);
+
+	/* if we're the drainer, we get signalled for that */
+	if (data->drain) {
+		struct bm_buffer buf;
+		int ret;
+
+		pr_info("Thread %d: draining...\n", data->cpu);
+		do {
+			ret = bman_acquire(pool, &buf, 1, 0);
+		} while (ret > 0);
+		pr_info("Thread %d: draining done.\n", data->cpu);
+		complete(&data->wakeparent);
+		wait_for_completion(&data->wakethread);
+		init_completion(&data->wakethread);
+	}
+
+	/* cleanup */
+	bman_free_pool(pool);
+	while (!kthread_should_stop())
+		cpu_relax();
+	pr_info("Thread %d: exiting\n", data->cpu);
+	return 0;
+}
+
+static struct affine_test_data *start_affine_test(int cpu, int drain)
+{
+	struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+	if (!data)
+		return NULL;
+	data->cpu = cpu;
+#ifndef __rtems__
+	data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
+#endif /* __rtems__ */
+	data->drain = drain;
+	data->num_enter = 0;
+	data->num_exit = 0;
+	init_completion(&data->wakethread);
+	init_completion(&data->wakeparent);
+	list_add_tail(&data->node, &threads);
+	data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
+#ifndef __rtems__
+	BUG_ON(IS_ERR(data->t));
+#else /* __rtems__ */
+	BUG_ON(data->t == NULL);
+#endif /* __rtems__ */
+	kthread_bind(data->t, cpu);
+	wake_up_process(data->t);
+	return data;
+}
+
+void bman_test_thresh(void)
+{
+	int loop = TEST_NUMBUFS;
+	int ret, num_cpus = 0;
+	struct affine_test_data *data, *drainer = NULL;
+
+	pr_info("%s(): Start\n", __func__);
+
+	/* allocate a BPID and seed it */
+	pool_nocb = bman_new_pool(&params_nocb);
+	BUG_ON(!pool_nocb);
+	while (loop--) {
+		struct bm_buffer buf;
+
+		bm_buffer_set64(&buf, 0x0badbeef + loop);
+		ret = bman_release(pool_nocb, &buf, 1,
+					BMAN_RELEASE_FLAG_WAIT);
+		BUG_ON(ret);
+	}
+	while (!bman_rcr_is_empty())
+		cpu_relax();
+	pr_info("%s(): Buffers are in\n", __func__);
+
+	/* create threads and wait for them to create pools */
+	params_cb.bpid = bman_get_params(pool_nocb)->bpid;
+#ifndef __rtems__
+	for_each_cpu(loop, cpu_online_mask) {
+#else /* __rtems__ */
+	for (loop = 0; loop < rtems_get_processor_count(); ++loop) {
+#endif /* __rtems__ */
+		data = start_affine_test(loop, drainer ? 0 : 1);
+		BUG_ON(!data);
+		if (!drainer)
+			drainer = data;
+		num_cpus++;
+		wait_for_completion(&data->wakeparent);
+	}
+
+	/* signal the drainer to start draining */
+	complete(&drainer->wakethread);
+	wait_for_completion(&drainer->wakeparent);
+	init_completion(&drainer->wakeparent);
+
+	/* tear down */
+	list_for_each_entry_safe(data, drainer, &threads, node) {
+		complete(&data->wakethread);
+		ret = kthread_stop(data->t);
+		BUG_ON(ret);
+		list_del(&data->node);
+		/* check that we get the expected callbacks (and no others) */
+		BUG_ON(data->num_enter != 1);
+		BUG_ON(data->num_exit != 0);
+		kfree(data);
+	}
+	bman_free_pool(pool_nocb);
+
+	pr_info("%s(): Done\n", __func__);
+}
diff --git a/linux/drivers/soc/fsl/qbman/bman_utils.c b/linux/drivers/soc/fsl/qbman/bman_utils.c
new file mode 100644
index 0000000..c6fa0b3
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/bman_utils.c
@@ -0,0 +1,76 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* BMan APIs are front-ends to the common code */
+
+static DECLARE_DPAA_RESOURCE(bpalloc); /* BPID allocator */
+
+/* BPID allocator front-end */
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&bpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(bman_alloc_bpid_range);
+
+static int bp_cleanup(u32 bpid)
+{
+	return bman_shutdown_pool(bpid) == 0;
+}
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&bpalloc,
+						  bpid, count, bp_cleanup);
+
+	if (total_invalid)
+		pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
+			bpid, bpid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(bman_release_bpid_range);
+
+void bman_seed_bpid_range(u32 bpid, u32 count)
+{
+	dpaa_resource_seed(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_seed_bpid_range);
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+	return dpaa_resource_reserve(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_reserve_bpid_range);
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_resource.c b/linux/drivers/soc/fsl/qbman/dpaa_resource.c
new file mode 100644
index 0000000..3f7b74b
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/dpaa_resource.c
@@ -0,0 +1,363 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(CONFIG_FSL_BMAN_PORTAL) ||		\
+    defined(CONFIG_FSL_BMAN_PORTAL_MODULE) ||	\
+    defined(CONFIG_FSL_QMAN_PORTAL) ||		\
+    defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
+#include "dpaa_sys.h"
+
+/* The allocator is a (possibly-empty) list of these */
+struct dpaa_resource_node {
+	struct list_head list;
+	u32 base;
+	u32 num;
+	/* refcount and is_alloced are only set
+	   when the node is in the used list */
+	unsigned int refcount;
+	int is_alloced;
+};
+
+#ifdef DPAA_RESOURCE_DEBUG
+#define DPRINT pr_info
+static void DUMP(struct dpaa_resource *alloc)
+{
+	int off = 0;
+	char buf[256];
+	struct dpaa_resource_node *p;
+
+	pr_info("Free Nodes\n");
+	list_for_each_entry(p, &alloc->free, list) {
+		if (off < 255)
+			off += snprintf(buf + off, 255-off, "{%d,%d}",
+				p->base, p->base + p->num - 1);
+	}
+	pr_info("%s\n", buf);
+
+	off = 0;
+	pr_info("Used Nodes\n");
+	list_for_each_entry(p, &alloc->used, list) {
+		if (off < 255)
+			off += snprintf(buf + off, 255-off, "{%d,%d}",
+				p->base, p->base + p->num - 1);
+	}
+	pr_info("%s\n", buf);
+}
+#else
+#define DPRINT(x...)
+#define DUMP(a)
+#endif
+
+int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
+		      u32 count, u32 align, int partial)
+{
+	struct dpaa_resource_node *i = NULL, *next_best = NULL,
+		*used_node = NULL;
+	u32 base, next_best_base = 0, num = 0, next_best_num = 0;
+	struct dpaa_resource_node *margin_left, *margin_right;
+
+	*result = (u32)-1;
+	DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
+	DUMP(alloc);
+	/* If 'align' is 0, it should behave as though it was 1 */
+	if (!align)
+		align = 1;
+	margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
+	if (!margin_left)
+		goto err;
+	margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
+	if (!margin_right) {
+		kfree(margin_left);
+		goto err;
+	}
+	spin_lock_irq(&alloc->lock);
+	list_for_each_entry(i, &alloc->free, list) {
+		base = (i->base + align - 1) / align;
+		base *= align;
+		if ((base - i->base) >= i->num)
+			/* alignment is impossible, regardless of count */
+			continue;
+		num = i->num - (base - i->base);
+		if (num >= count) {
+			/* this one will do nicely */
+			num = count;
+			goto done;
+		}
+		if (num > next_best_num) {
+			next_best = i;
+			next_best_base = base;
+			next_best_num = num;
+		}
+	}
+	if (partial && next_best) {
+		i = next_best;
+		base = next_best_base;
+		num = next_best_num;
+	} else
+		i = NULL;
+done:
+	if (i) {
+		if (base != i->base) {
+			margin_left->base = i->base;
+			margin_left->num = base - i->base;
+			list_add_tail(&margin_left->list, &i->list);
+		} else
+			kfree(margin_left);
+		if ((base + num) < (i->base + i->num)) {
+			margin_right->base = base + num;
+			margin_right->num = (i->base + i->num) -
+						(base + num);
+			list_add(&margin_right->list, &i->list);
+		} else
+			kfree(margin_right);
+		list_del(&i->list);
+		kfree(i);
+		*result = base;
+	}
+	spin_unlock_irq(&alloc->lock);
+err:
+	DPRINT("returning %d\n", i ? num : -ENOMEM);
+	DUMP(alloc);
+	if (!i)
+		return -ENOMEM;
+
+	/* Add the allocation to the used list with a refcount of 1 */
+	used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+	if (!used_node)
+		return -ENOMEM;
+	used_node->base = *result;
+	used_node->num = num;
+	used_node->refcount = 1;
+	used_node->is_alloced = 1;
+	list_add_tail(&used_node->list, &alloc->used);
+	return (int)num;
+}
+EXPORT_SYMBOL(dpaa_resource_new);
+
+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
+ * forcing error-handling on to users in the deallocation path. */
+static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
+				u32 count)
+{
+	struct dpaa_resource_node *i,
+		*node = kmalloc(sizeof(*node), GFP_ATOMIC);
+
+	BUG_ON(!node);
+	DPRINT("release_range(%d,%d)\n", base_id, count);
+	DUMP(alloc);
+	BUG_ON(!count);
+	spin_lock_irq(&alloc->lock);
+
+	node->base = base_id;
+	node->num = count;
+	list_for_each_entry(i, &alloc->free, list) {
+		if (i->base >= node->base) {
+			/* BUG_ON(any overlapping) */
+			BUG_ON(i->base < (node->base + node->num));
+			list_add_tail(&node->list, &i->list);
+			goto done;
+		}
+	}
+	list_add_tail(&node->list, &alloc->free);
+done:
+	/* Merge to the left */
+	i = list_entry(node->list.prev, struct dpaa_resource_node, list);
+	if (node->list.prev != &alloc->free) {
+		BUG_ON((i->base + i->num) > node->base);
+		if ((i->base + i->num) == node->base) {
+			node->base = i->base;
+			node->num += i->num;
+			list_del(&i->list);
+			kfree(i);
+		}
+	}
+	/* Merge to the right */
+	i = list_entry(node->list.next, struct dpaa_resource_node, list);
+	if (node->list.next != &alloc->free) {
+		BUG_ON((node->base + node->num) > i->base);
+		if ((node->base + node->num) == i->base) {
+			node->num += i->num;
+			list_del(&i->list);
+			kfree(i);
+		}
+	}
+	spin_unlock_irq(&alloc->lock);
+	DUMP(alloc);
+}
+
+static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
+			       u32 count)
+{
+	struct dpaa_resource_node *i = NULL;
+
+	spin_lock_irq(&alloc->lock);
+
+	/* First find the node in the used list and decrement its ref count */
+	list_for_each_entry(i, &alloc->used, list) {
+		if (i->base == base_id && i->num == count) {
+			--i->refcount;
+			if (i->refcount == 0) {
+				list_del(&i->list);
+				spin_unlock_irq(&alloc->lock);
+				if (i->is_alloced)
+					_dpaa_resource_free(alloc, base_id,
+							    count);
+				kfree(i);
+				return;
+			}
+			spin_unlock_irq(&alloc->lock);
+			return;
+		}
+	}
+	/* Couldn't find the allocation */
+	pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
+	       base_id, count);
+	spin_unlock_irq(&alloc->lock);
+}
+
+/* Same as free but no previous allocation checking is needed */
+void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count)
+{
+	_dpaa_resource_free(alloc, base_id, count);
+}
+EXPORT_SYMBOL(dpaa_resource_seed);
+
+/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
+ * desired range is not available, or 0 for success
+ */
+int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num)
+{
+	struct dpaa_resource_node *i = NULL, *used_node;
+
+	DPRINT("alloc_reserve(%d,%d)\n", base, num);
+	DUMP(alloc);
+
+	spin_lock_irq(&alloc->lock);
+
+	/* Check for the node in the used list.
+	   If found, increase it's refcount */
+	list_for_each_entry(i, &alloc->used, list) {
+		if ((i->base == base) && (i->num == num)) {
+			++i->refcount;
+			spin_unlock_irq(&alloc->lock);
+			return 0;
+		}
+		if ((base >= i->base) && (base < (i->base + i->num))) {
+			/* This is an attempt to reserve a region that was
+			   already reserved or alloced with a different
+			   base or num */
+			pr_err("Cannot reserve %d - %d, it overlaps with"
+			       " existing reservation from %d - %d\n",
+			       base, base + num - 1, i->base,
+			       i->base + i->num - 1);
+			spin_unlock_irq(&alloc->lock);
+			return -1;
+		}
+	}
+	/* Check to make sure this ID isn't in the free list */
+	list_for_each_entry(i, &alloc->free, list) {
+		if ((base >= i->base) && (base < (i->base + i->num))) {
+			/* yep, the reservation is within this node */
+			pr_err("Cannot reserve %d - %d, it overlaps with"
+			       " free range %d - %d and must be alloced\n",
+			       base, base + num - 1,
+			       i->base, i->base + i->num - 1);
+			spin_unlock_irq(&alloc->lock);
+			return -1;
+		}
+	}
+	/* Add the allocation to the used list with a refcount of 1 */
+	used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+	if (!used_node) {
+		spin_unlock_irq(&alloc->lock);
+		return -ENOMEM;
+
+	}
+	used_node->base = base;
+	used_node->num = num;
+	used_node->refcount = 1;
+	used_node->is_alloced = 0;
+	list_add_tail(&used_node->list, &alloc->used);
+	spin_unlock_irq(&alloc->lock);
+	return 0;
+}
+EXPORT_SYMBOL(dpaa_resource_reserve);
+
+/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when
+ * releasing FQIDs (probably from user-space), it can filter out those
+ * that aren't in the OOS state (better to leak a h/w resource than to
+ * crash). This function returns the number of invalid IDs that were not
+ * released.
+*/
+u32 dpaa_resource_release(struct dpaa_resource *alloc,
+			  u32 id, u32 count, int (*is_valid)(u32 id))
+{
+	int valid_mode = 0;
+	u32 loop = id, total_invalid = 0;
+
+	while (loop < (id + count)) {
+		int isvalid = is_valid ? is_valid(loop) : 1;
+
+		if (!valid_mode) {
+			/* We're looking for a valid ID to terminate an invalid
+			 * range */
+			if (isvalid) {
+				/* We finished a range of invalid IDs, a valid
+				 * range is now underway */
+				valid_mode = 1;
+				count -= (loop - id);
+				id = loop;
+			} else
+				total_invalid++;
+		} else {
+			/* We're looking for an invalid ID to terminate a
+			 * valid range */
+			if (!isvalid) {
+				/* Release the range of valid IDs, an unvalid
+				 * range is now underway */
+				if (loop > id)
+					dpaa_resource_free(alloc, id,
+							   loop - id);
+				valid_mode = 0;
+			}
+		}
+		loop++;
+	}
+	/* Release any unterminated range of valid IDs */
+	if (valid_mode && count)
+		dpaa_resource_free(alloc, id, count);
+	return total_invalid;
+}
+EXPORT_SYMBOL(dpaa_resource_release);
+#endif	/* CONFIG_FSL_*MAN_PORTAL* */
diff --git a/linux/drivers/soc/fsl/qbman/dpaa_sys.h b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 0000000..85f8780
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,292 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#endif
+
+#include <asm/pgtable.h>
+#ifdef __rtems__
+#include <asm/cache.h>
+#include <asm/mpc85xx.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/rbtree.h>
+#include <bsp/linker-symbols.h>
+#define	DPAA_NOCACHENOLOAD_ALIGNED_REGION(designator, size) \
+    BSP_NOCACHENOLOAD_SUBSECTION(designator) __aligned(size) \
+    uint8_t designator[size]
+#endif /* __rtems__ */
+
+struct dpaa_resource {
+	struct list_head free;
+	spinlock_t lock;
+	struct list_head used;
+};
+
+#define DECLARE_DPAA_RESOURCE(name)			\
+struct dpaa_resource name = {				\
+	.free = {					\
+		.prev = &name.free,			\
+		.next = &name.free			\
+	},						\
+	.lock = __SPIN_LOCK_UNLOCKED(name.lock),	\
+	.used = {					\
+		 .prev = &name.used,			\
+		 .next = &name.used			\
+	}						\
+}
+
+int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
+		      u32 count, u32 align, int partial);
+u32 dpaa_resource_release(struct dpaa_resource *alloc,
+			  u32 id, u32 count, int (*is_valid)(u32 id));
+void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count);
+int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num);
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPA_PORTAL_CE 0
+#define DPA_PORTAL_CI 1
+
+/* Misc inline assists */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+#ifndef __rtems__
+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#else /* __rtems__ */
+  #ifdef __PPC_CPU_E6500__
+    #define lwsync() ppc_light_weight_synchronize()
+  #else
+    #define lwsync() ppc_synchronize_data()
+  #endif
+#endif /* __rtems__ */
+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
+#define dcbi(p) dcbf(p)
+#ifdef CONFIG_PPC_E500MC
+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
+#define dcbz_64(p) dcbzl(p)
+#define dcbf_64(p) dcbf(p)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+	do { \
+		dcbi(p); \
+		dcbt_ro(p); \
+	} while (0)
+#else
+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
+#define dcbz_64(p) \
+	do { \
+		dcbz((u32)p + 32);	\
+		dcbz(p);	\
+	} while (0)
+#define dcbf_64(p) \
+	do { \
+		dcbf((u32)p + 32); \
+		dcbf(p); \
+	} while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+	do { \
+		dcbi(p); \
+		dcbi((u32)p + 32); \
+		dcbt_ro(p); \
+		dcbt_ro((u32)p + 32); \
+	} while (0)
+#endif /* CONFIG_PPC_E500MC */
+
+static inline u64 mfatb(void)
+{
+	u32 hi, lo, chk;
+
+	do {
+		hi = mfspr(SPRN_ATBU);
+		lo = mfspr(SPRN_ATBL);
+		chk = mfspr(SPRN_ATBU);
+	} while (unlikely(hi != chk));
+	return ((u64)hi << 32) | (u64)lo;
+}
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+#define DPA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPA_ASSERT(x)
+#endif
+
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+	u32 *__dest = dest;
+	const u32 *__src = src;
+	size_t __sz = sz >> 2;
+
+	BUG_ON((unsigned long)dest & 0x3);
+	BUG_ON((unsigned long)src & 0x3);
+	BUG_ON(sz & 0x3);
+	while (__sz--)
+		*(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#endif
+
+/* RB-trees */
+
+/* We encapsulate RB-trees so that its easier to use non-linux forms in
+ * non-linux systems. This also encapsulates the extra plumbing that linux code
+ * usually provides when using RB-trees. This encapsulation assumes that the
+ * data type held by the tree is u32. */
+
+struct dpa_rbtree {
+	struct rb_root root;
+};
+#define DPA_RBTREE { .root = RB_ROOT }
+
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+	tree->root = RB_ROOT;
+}
+
+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+	struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
+	while (*p) { \
+		u32 item; \
+		parent = *p; \
+		item = rb_entry(parent, type, node_field)->val_field; \
+		if (obj->val_field < item) \
+			p = &parent->rb_left; \
+		else if (obj->val_field > item) \
+			p = &parent->rb_right; \
+		else \
+			return -EBUSY; \
+	} \
+	rb_link_node(&obj->node_field, parent, p); \
+	rb_insert_color(&obj->node_field, &tree->root); \
+	return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+	rb_erase(&obj->node_field, &tree->root); \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+	type *ret; \
+	struct rb_node *p = tree->root.rb_node; \
+	while (p) { \
+		ret = rb_entry(p, type, node_field); \
+		if (val < ret->val_field) \
+			p = p->rb_left; \
+		else if (val > ret->val_field) \
+			p = p->rb_right; \
+		else \
+			return ret; \
+	} \
+	return NULL; \
+}
+
+#ifndef __rtems__
+/* Bootargs */
+
+/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax
+ * though; a comma-separated list of items, each item being a cpu index and/or a
+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
+ * that the portal associated with that cpu should be shared. See bman_driver.c
+ * for more specifics. */
+static int __parse_portals_cpu(const char **s, unsigned int *cpu)
+{
+	*cpu = 0;
+	if (!isdigit(**s))
+		return -EINVAL;
+	while (isdigit(**s))
+		*cpu = *cpu * 10 + (*((*s)++) - '0');
+	return 0;
+}
+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
+					struct cpumask *want_unshared,
+					const char *argname)
+{
+	const char *s = str;
+	unsigned int shared, cpu1, cpu2, loop;
+
+keep_going:
+	if (*s == 's') {
+		shared = 1;
+		s++;
+	} else
+		shared = 0;
+	if (__parse_portals_cpu(&s, &cpu1))
+		goto err;
+	if (*s == '-') {
+		s++;
+		if (__parse_portals_cpu(&s, &cpu2))
+			goto err;
+		if (cpu2 < cpu1)
+			goto err;
+	} else
+		cpu2 = cpu1;
+	for (loop = cpu1; loop <= cpu2; loop++)
+		cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
+	if (*s == ',') {
+		s++;
+		goto keep_going;
+	} else if ((*s == '\0') || isspace(*s))
+		return 0;
+err:
+	pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
+		(unsigned long)s - (unsigned long)str);
+	return -EINVAL;
+}
+#endif /* __rtems__ */
+#endif	/* __DPAA_SYS_H */
diff --git a/linux/drivers/soc/fsl/qbman/qman-debugfs.c b/linux/drivers/soc/fsl/qbman/qman-debugfs.c
new file mode 100644
index 0000000..ecdb6be
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman-debugfs.c
@@ -0,0 +1,1317 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2010 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "qman_priv.h"
+
+#define MAX_FQID (0x00ffffff)
+#define QM_FQD_BLOCK_SIZE     64
+#define QM_FQD_AR	      (0xC10)
+
+static u32 fqid_max;
+static u64 qman_ccsr_start;
+static u64 qman_ccsr_size;
+
+static const char * const state_txt[] = {
+	"Out of Service",
+	"Retired",
+	"Tentatively Scheduled",
+	"Truly Scheduled",
+	"Parked",
+	"Active, Active Held or Held Suspended",
+	"Unknown State 6",
+	"Unknown State 7",
+	NULL,
+};
+
+static const u8 fqd_states[] = {
+	QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
+	QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
+	QM_MCR_NP_STATE_ACTIVE};
+
+struct mask_to_text {
+	u16 mask;
+	const char *txt;
+};
+
+struct mask_filter_s {
+	u16 mask;
+	u8 filter;
+};
+
+static const struct mask_filter_s mask_filter[] = {
+	{QM_FQCTRL_PREFERINCACHE, 0},
+	{QM_FQCTRL_PREFERINCACHE, 1},
+	{QM_FQCTRL_HOLDACTIVE, 0},
+	{QM_FQCTRL_HOLDACTIVE, 1},
+	{QM_FQCTRL_AVOIDBLOCK, 0},
+	{QM_FQCTRL_AVOIDBLOCK, 1},
+	{QM_FQCTRL_FORCESFDR, 0},
+	{QM_FQCTRL_FORCESFDR, 1},
+	{QM_FQCTRL_CPCSTASH, 0},
+	{QM_FQCTRL_CPCSTASH, 1},
+	{QM_FQCTRL_CTXASTASHING, 0},
+	{QM_FQCTRL_CTXASTASHING, 1},
+	{QM_FQCTRL_ORP, 0},
+	{QM_FQCTRL_ORP, 1},
+	{QM_FQCTRL_TDE, 0},
+	{QM_FQCTRL_TDE, 1},
+	{QM_FQCTRL_CGE, 0},
+	{QM_FQCTRL_CGE, 1}
+};
+
+static const struct mask_to_text fq_ctrl_text_list[] = {
+	{
+		.mask = QM_FQCTRL_PREFERINCACHE,
+		.txt = "Prefer in cache",
+	},
+	{
+		.mask = QM_FQCTRL_HOLDACTIVE,
+		.txt =	"Hold active in portal",
+	},
+	{
+		.mask = QM_FQCTRL_AVOIDBLOCK,
+		.txt = "Avoid Blocking",
+	},
+	{
+		.mask = QM_FQCTRL_FORCESFDR,
+		.txt = "High-priority SFDRs",
+	},
+	{
+		.mask = QM_FQCTRL_CPCSTASH,
+		.txt = "CPC Stash Enable",
+	},
+	{
+		.mask = QM_FQCTRL_CTXASTASHING,
+		.txt =	"Context-A stashing",
+	},
+	{
+		.mask = QM_FQCTRL_ORP,
+		.txt =	"ORP Enable",
+	},
+	{
+		.mask = QM_FQCTRL_TDE,
+		.txt = "Tail-Drop Enable",
+	},
+	{
+		.mask = QM_FQCTRL_CGE,
+		.txt = "Congestion Group Enable",
+	},
+	{
+		.mask = 0,
+		.txt = NULL,
+	}
+};
+
+static const char *get_fqd_ctrl_text(u16 mask)
+{
+	int i = 0;
+
+	while (fq_ctrl_text_list[i].txt != NULL) {
+		if (fq_ctrl_text_list[i].mask == mask)
+			return fq_ctrl_text_list[i].txt;
+		i++;
+	}
+	return NULL;
+}
+
+static const struct mask_to_text stashing_text_list[] = {
+	{
+		.mask = QM_STASHING_EXCL_CTX,
+		.txt = "FQ Ctx Stash"
+	},
+	{
+		.mask = QM_STASHING_EXCL_DATA,
+		.txt =	"Frame Data Stash",
+	},
+	{
+		.mask = QM_STASHING_EXCL_ANNOTATION,
+		.txt = "Frame Annotation Stash",
+	},
+	{
+		.mask = 0,
+		.txt = NULL,
+	},
+};
+
+static int user_input_convert(const char __user *user_buf, size_t count,
+				unsigned long *val)
+{
+	char buf[12];
+
+	if (count > sizeof(buf) - 1)
+		return -EINVAL;
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	if (kstrtoul(buf, 0, val))
+		return -EINVAL;
+	return 0;
+}
+
+struct line_buffer_fq {
+	u32 buf[8];
+	u32 buf_cnt;
+	int line_cnt;
+};
+
+static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
+			struct seq_file *file)
+{
+	line_buf->buf[line_buf->buf_cnt] = fqid;
+	line_buf->buf_cnt++;
+	if (line_buf->buf_cnt == 8) {
+		/* Buffer is full, flush it */
+		if (line_buf->line_cnt != 0)
+			seq_puts(file, ",\n");
+		seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
+			"0x%06x,0x%06x,0x%06x",
+			line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
+			line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
+			line_buf->buf[6], line_buf->buf[7]);
+		line_buf->buf_cnt = 0;
+		line_buf->line_cnt++;
+	}
+}
+
+static void flush_line_buffer(struct line_buffer_fq *line_buf,
+				struct seq_file *file)
+{
+	if (line_buf->buf_cnt) {
+		int y = 0;
+
+		if (line_buf->line_cnt != 0)
+			seq_puts(file, ",\n");
+		while (y != line_buf->buf_cnt) {
+			if (y+1 == line_buf->buf_cnt)
+				seq_printf(file, "0x%06x", line_buf->buf[y]);
+			else
+				seq_printf(file, "0x%06x,", line_buf->buf[y]);
+			y++;
+		}
+		line_buf->line_cnt++;
+	}
+	if (line_buf->line_cnt)
+		seq_putc(file, '\n');
+}
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/* Query Frame Queue Non Programmable Fields */
+
+struct query_fq_np_fields_data_s {
+	u32 fqid;
+};
+static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
+	.fqid = 1,
+};
+
+static int query_fq_np_fields_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_mcr_queryfq_np np;
+	struct qman_fq fq;
+
+	fq.fqid = query_fq_np_fields_data.fqid;
+	ret = qman_query_fq_np(&fq, &np);
+	if (ret)
+		return ret;
+	/* Print state */
+	seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
+			fq.fqid);
+	seq_printf(file, " force eligible pending: %s\n",
+		(np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
+	seq_printf(file, " retirement pending: %s\n",
+		(np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
+	seq_printf(file, " state: %s\n",
+		state_txt[np.state & QM_MCR_NP_STATE_MASK]);
+	seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
+	seq_printf(file, " odp_seq: %u\n", np.odp_seq);
+	seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
+	seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
+	seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
+	seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
+	seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
+	seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
+	seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
+	seq_printf(file, " is: ics_surp contains a %s\n",
+		(np.is) ? "deficit" : "surplus");
+	seq_printf(file, " ics_surp: %u\n", np.ics_surp);
+	seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
+	seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
+	seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
+	seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
+	seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
+	seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
+	seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
+	return 0;
+}
+
+static int query_fq_np_fields_open(struct inode *inode,
+					struct file *file)
+{
+	return single_open(file, query_fq_np_fields_show, NULL);
+}
+
+static ssize_t query_fq_np_fields_write(struct file *f,
+			const char __user *buf, size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > MAX_FQID)
+		return -EINVAL;
+	query_fq_np_fields_data.fqid = (u32)val;
+	return count;
+}
+
+static const struct file_operations query_fq_np_fields_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_fq_np_fields_open,
+	.read		= seq_read,
+	.write		= query_fq_np_fields_write,
+	.release	= single_release,
+};
+
+/* Frame Queue Programmable Fields */
+
+struct query_fq_fields_data_s {
+	u32 fqid;
+};
+
+static struct query_fq_fields_data_s query_fq_fields_data = {
+	.fqid = 1,
+};
+
+static int query_fq_fields_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_fqd fqd;
+	struct qman_fq fq;
+	int i = 0;
+
+	memset(&fqd, 0, sizeof(struct qm_fqd));
+	fq.fqid = query_fq_fields_data.fqid;
+	ret = qman_query_fq(&fq, &fqd);
+	if (ret)
+		return ret;
+	seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
+			fq.fqid);
+	seq_printf(file, " orprws: %u\n", fqd.orprws);
+	seq_printf(file, " oa: %u\n", fqd.oa);
+	seq_printf(file, " olws: %u\n", fqd.olws);
+
+	seq_printf(file, " cgid: %u\n", fqd.cgid);
+
+	if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
+		seq_puts(file, " fq_ctrl: None\n");
+	else {
+		i = 0;
+		seq_puts(file, " fq_ctrl:\n");
+		while (fq_ctrl_text_list[i].txt != NULL) {
+			if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+					fq_ctrl_text_list[i].mask)
+				seq_printf(file, "  %s\n",
+					fq_ctrl_text_list[i].txt);
+			i++;
+		}
+	}
+	seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
+	seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
+	seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
+	seq_printf(file, " td_mant: %u\n", fqd.td.mant);
+	seq_printf(file, " td_exp: %u\n", fqd.td.exp);
+
+	seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
+
+	seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
+	/* Any stashing configured */
+	if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
+		seq_puts(file, " ctx_a_stash_exclusive: None\n");
+	else {
+		seq_puts(file, " ctx_a_stash_exclusive:\n");
+		i = 0;
+		while (stashing_text_list[i].txt != NULL) {
+			if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
+				seq_printf(file, "  %s\n",
+					stashing_text_list[i].txt);
+			i++;
+		}
+	}
+	seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
+			fqd.context_a.stashing.annotation_cl);
+	seq_printf(file, " ctx_a_stash_data_cl: %u\n",
+			fqd.context_a.stashing.data_cl);
+	seq_printf(file, " ctx_a_stash_context_cl: %u\n",
+			fqd.context_a.stashing.context_cl);
+	return 0;
+}
+
+static int query_fq_fields_open(struct inode *inode,
+					struct file *file)
+{
+	return single_open(file, query_fq_fields_show, NULL);
+}
+
+static ssize_t query_fq_fields_write(struct file *f,
+			const char __user *buf, size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > MAX_FQID)
+		return -EINVAL;
+	query_fq_fields_data.fqid = (u32)val;
+	return count;
+}
+
+static const struct file_operations query_fq_fields_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_fq_fields_open,
+	.read		= seq_read,
+	.write		= query_fq_fields_write,
+	.release	= single_release,
+};
+
+/* Query WQ lengths */
+
+struct query_wq_lengths_data_s {
+	union {
+		u16 channel_wq; /* ignores wq (3 lsbits) */
+		struct {
+			u16 id:13; /* qm_channel */
+			u16 __reserved:3;
+		} __packed channel;
+	};
+};
+static struct query_wq_lengths_data_s query_wq_lengths_data;
+static int query_wq_lengths_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_mcr_querywq wq;
+	int i;
+
+	memset(&wq, 0, sizeof(struct qm_mcr_querywq));
+	wq.channel.id = query_wq_lengths_data.channel.id;
+	ret = qman_query_wq(0, &wq);
+	if (ret)
+		return ret;
+	seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
+	for (i = 0; i < 8; i++)
+		/* mask out upper 4 bits since they are not part of length */
+		seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
+	return 0;
+}
+
+static int query_wq_lengths_open(struct inode *inode,
+					struct file *file)
+{
+	return single_open(file, query_wq_lengths_show, NULL);
+}
+
+static ssize_t query_wq_lengths_write(struct file *f,
+			const char __user *buf, size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > 0xfff8)
+		return -EINVAL;
+	query_wq_lengths_data.channel.id = (u16)val;
+	return count;
+}
+
+static const struct file_operations query_wq_lengths_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_wq_lengths_open,
+	.read		= seq_read,
+	.write		= query_wq_lengths_write,
+	.release	= single_release,
+};
+
+/* Query CGR */
+
+struct query_cgr_s {
+	u8 cgid;
+};
+static struct query_cgr_s query_cgr_data;
+
+static int query_cgr_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_mcr_querycgr cgrd;
+	struct qman_cgr cgr;
+	int i, j;
+	u32 mask;
+
+	memset(&cgr, 0, sizeof(cgr));
+	memset(&cgrd, 0, sizeof(cgrd));
+	cgr.cgrid = query_cgr_data.cgid;
+	ret = qman_query_cgr(&cgr, &cgrd);
+	if (ret)
+		return ret;
+	seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
+	seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
+		cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
+		cgrd.cgr.wr_parm_g.Pn);
+
+	seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
+		cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
+		cgrd.cgr.wr_parm_y.Pn);
+
+	seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
+		cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
+		cgrd.cgr.wr_parm_r.Pn);
+
+	seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+		cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
+
+	seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		seq_puts(file, " cscn_targ_dcp:\n");
+		mask = 0x80000000;
+		for (i = 0; i < 32; i++) {
+			if (cgrd.cgr.cscn_targ & mask)
+				seq_printf(file, "  send CSCN to dcp %u\n",
+								(31 - i));
+			mask >>= 1;
+		}
+
+		seq_puts(file, " cscn_targ_swp:\n");
+		for (i = 0; i < 4; i++) {
+			mask = 0x80000000;
+			for (j = 0; j < 32; j++) {
+				if (cgrd.cscn_targ_swp[i] & mask)
+					seq_printf(file, "  send CSCN to swp"
+						" %u\n", (127 - (i * 32) - j));
+				mask >>= 1;
+			}
+		}
+	} else {
+		seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
+	}
+	seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
+	seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
+
+	seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+		cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
+
+	seq_printf(file, " mode: %s\n",
+		(cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+		"frame count" : "byte count");
+	seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
+	seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
+
+	return 0;
+}
+
+static int query_cgr_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, query_cgr_show, NULL);
+}
+
+static ssize_t query_cgr_write(struct file *f, const char __user *buf,
+				size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > 0xff)
+		return -EINVAL;
+	query_cgr_data.cgid = (u8)val;
+	return count;
+}
+
+static const struct file_operations query_cgr_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_cgr_open,
+	.read		= seq_read,
+	.write		= query_cgr_write,
+	.release	= single_release,
+};
+
+/* Test Write CGR */
+
+struct test_write_cgr_s {
+	u64 i_bcnt;
+	u8 cgid;
+};
+static struct test_write_cgr_s test_write_cgr_data;
+
+static int testwrite_cgr_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_mcr_cgrtestwrite result;
+	struct qman_cgr cgr;
+	u64 i_bcnt;
+
+	memset(&cgr, 0, sizeof(struct qman_cgr));
+	memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
+	cgr.cgrid = test_write_cgr_data.cgid;
+	i_bcnt = test_write_cgr_data.i_bcnt;
+	ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
+	if (ret)
+		return ret;
+	seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
+	seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
+		result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
+		result.cgr.wr_parm_g.Pn);
+	seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
+		result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
+		result.cgr.wr_parm_y.Pn);
+	seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+		result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
+		result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
+		result.cgr.wr_parm_r.Pn);
+	seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+		result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
+	seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
+	seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
+	seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
+	seq_printf(file, " cs: %u\n", result.cgr.cs);
+	seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+		result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
+
+	/* Add Mode for Si 2 */
+	seq_printf(file, " mode: %s\n",
+		(result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+		"frame count" : "byte count");
+
+	seq_printf(file, " i_bcnt: %llu\n",
+		qm_mcr_cgrtestwrite_i_get64(&result));
+	seq_printf(file, " a_bcnt: %llu\n",
+		qm_mcr_cgrtestwrite_a_get64(&result));
+	seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
+	seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
+	seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
+	return 0;
+}
+
+static int testwrite_cgr_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, testwrite_cgr_show, NULL);
+}
+
+static const struct file_operations testwrite_cgr_fops = {
+	.owner		= THIS_MODULE,
+	.open		= testwrite_cgr_open,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+
+static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
+{
+	seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
+	return 0;
+}
+static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, testwrite_cgr_ibcnt_show, NULL);
+}
+
+static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
+				size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	test_write_cgr_data.i_bcnt = val;
+	return count;
+}
+
+static const struct file_operations teswrite_cgr_ibcnt_fops = {
+	.owner		= THIS_MODULE,
+	.open		= testwrite_cgr_ibcnt_open,
+	.read		= seq_read,
+	.write		= testwrite_cgr_ibcnt_write,
+	.release	= single_release,
+};
+
+static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
+{
+	seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
+	return 0;
+}
+static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, testwrite_cgr_cgrid_show, NULL);
+}
+
+static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
+				size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > 0xff)
+		return -EINVAL;
+	test_write_cgr_data.cgid = (u8)val;
+	return count;
+}
+
+static const struct file_operations teswrite_cgr_cgrid_fops = {
+	.owner		= THIS_MODULE,
+	.open		= testwrite_cgr_cgrid_open,
+	.read		= seq_read,
+	.write		= testwrite_cgr_cgrid_write,
+	.release	= single_release,
+};
+
+/* Query Congestion State */
+
+static int query_congestion_show(struct seq_file *file, void *offset)
+{
+	int ret;
+	struct qm_mcr_querycongestion cs;
+	int i, j, in_cong = 0;
+	u32 mask;
+
+	memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
+	ret = qman_query_congestion(&cs);
+	if (ret)
+		return ret;
+	seq_puts(file, "Query Congestion Result\n");
+	for (i = 0; i < 8; i++) {
+		mask = 0x80000000;
+		for (j = 0; j < 32; j++) {
+			if (cs.state.__state[i] & mask) {
+				in_cong = 1;
+				seq_printf(file, " cg %u: %s\n", (i*32)+j,
+					"in congestion");
+			}
+			mask >>= 1;
+		}
+	}
+	if (!in_cong)
+		seq_puts(file, " All congestion groups not congested.\n");
+	return 0;
+}
+
+static int query_congestion_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, query_congestion_show, NULL);
+}
+
+static const struct file_operations query_congestion_fops = {
+	.owner		= THIS_MODULE,
+	.open		= query_congestion_open,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+/* QMan register */
+
+struct qman_register_s {
+	u32 val;
+};
+static struct qman_register_s qman_register_data;
+
+static void init_ccsrmempeek(void)
+{
+	struct device_node *dn;
+	const u32 *regaddr_p;
+
+	dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
+	if (!dn) {
+		pr_info("No fsl,qman node\n");
+		return;
+	}
+	regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
+	if (!regaddr_p) {
+		of_node_put(dn);
+		return;
+	}
+	qman_ccsr_start = of_translate_address(dn, regaddr_p);
+	of_node_put(dn);
+}
+/* This function provides access to QMan ccsr memory map */
+static int qman_ccsrmempeek(u32 *val, u32 offset)
+{
+	void __iomem *addr;
+	u64 phys_addr;
+
+	if (!qman_ccsr_start)
+		return -EINVAL;
+
+	if (offset > qman_ccsr_size - sizeof(u32))
+		return -EINVAL;
+
+	phys_addr = qman_ccsr_start + offset;
+	addr = ioremap(phys_addr, sizeof(u32));
+	if (!addr) {
+		pr_err("%s(): ioremap() failed\n", __func__);
+		return -EINVAL;
+	}
+	*val = in_be32(addr);
+	iounmap(addr);
+	return 0;
+}
+
+static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
+{
+	u32 b;
+
+	qman_ccsrmempeek(&b, qman_register_data.val);
+	seq_printf(file, "QMan register offset = 0x%x\n",
+		   qman_register_data.val);
+	seq_printf(file, "value = 0x%08x\n", b);
+
+	return 0;
+}
+
+static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_ccsrmempeek_show, NULL);
+}
+
+static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
+				size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	/* multiple of 4 */
+	if (val > qman_ccsr_size - sizeof(u32)) {
+		pr_info("Input 0x%lx > 0x%llx\n",
+			val, qman_ccsr_size - sizeof(u32));
+		return -EINVAL;
+	}
+	if (val & 0x3) {
+		pr_info("Input 0x%lx not multiple of 4\n", val);
+		return -EINVAL;
+	}
+	qman_register_data.val = val;
+	return count;
+}
+
+static const struct file_operations qman_ccsrmempeek_fops = {
+	.owner		= THIS_MODULE,
+	.open		= qman_ccsrmempeek_open,
+	.read		= seq_read,
+	.write		= qman_ccsrmempeek_write,
+};
+
+/* QMan state */
+
+static int qman_fqd_state_show(struct seq_file *file, void *offset)
+{
+	struct qm_mcr_queryfq_np np;
+	struct qman_fq fq;
+	struct line_buffer_fq line_buf;
+	int ret, i;
+	u8 *state = file->private;
+	u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+	memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+	memset(&line_buf, 0, sizeof(line_buf));
+
+	seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
+
+	for (i = 1; i < fqid_max; i++) {
+		fq.fqid = i;
+		ret = qman_query_fq_np(&fq, &np);
+		if (ret)
+			return ret;
+		if (*state == (np.state & QM_MCR_NP_STATE_MASK))
+			add_to_line_buffer(&line_buf, fq.fqid, file);
+		/* Keep a summary count of all states */
+		if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+			qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+	}
+	flush_line_buffer(&line_buf, file);
+
+	for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+		seq_printf(file, "%s count = %u\n", state_txt[i],
+			   qm_fq_state_cnt[i]);
+	}
+	return 0;
+}
+
+static int qman_fqd_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_fqd_state_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_state_fops =  {
+	.owner		= THIS_MODULE,
+	.open		= qman_fqd_state_open,
+	.read		= seq_read,
+};
+
+static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
+{
+	struct qm_fqd fqd;
+	struct qman_fq fq;
+	u32 fq_en_cnt = 0, fq_di_cnt = 0;
+	int ret, i;
+	struct mask_filter_s *data = file->private;
+	const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
+	struct line_buffer_fq line_buf;
+
+	memset(&line_buf, 0, sizeof(line_buf));
+	seq_printf(file, "List of fq ids with: %s :%s\n",
+		ctrl_txt, (data->filter) ? "enabled" : "disabled");
+	for (i = 1; i < fqid_max; i++) {
+		fq.fqid = i;
+		memset(&fqd, 0, sizeof(struct qm_fqd));
+		ret = qman_query_fq(&fq, &fqd);
+		if (ret)
+			return ret;
+		if (data->filter) {
+			if (fqd.fq_ctrl & data->mask)
+				add_to_line_buffer(&line_buf, fq.fqid, file);
+		} else {
+			if (!(fqd.fq_ctrl & data->mask))
+				add_to_line_buffer(&line_buf, fq.fqid, file);
+		}
+		if (fqd.fq_ctrl & data->mask)
+			fq_en_cnt++;
+		else
+			fq_di_cnt++;
+	}
+	flush_line_buffer(&line_buf, file);
+
+	seq_printf(file, "Total FQD with: %s :	enabled = %u\n",
+		   ctrl_txt, fq_en_cnt);
+	seq_printf(file, "Total FQD with: %s : disabled = %u\n",
+		   ctrl_txt, fq_di_cnt);
+	return 0;
+}
+
+/* QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE */
+static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_fqd_ctrl_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_ctrl_fops =  {
+	.owner		= THIS_MODULE,
+	.open		= qman_fqd_ctrl_open,
+	.read		= seq_read,
+};
+
+/* QMan ctrl summary */
+
+/* QMan summary state */
+
+static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
+{
+	struct qm_mcr_queryfq_np np;
+	struct qman_fq fq;
+	int ret, i;
+	u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+	memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+
+	for (i = 1; i < fqid_max; i++) {
+		fq.fqid = i;
+		ret = qman_query_fq_np(&fq, &np);
+		if (ret)
+			return ret;
+		/* Keep a summary count of all states */
+		if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+			qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+		seq_printf(file, "%s count = %u\n", state_txt[i],
+			   qm_fq_state_cnt[i]);
+	}
+	return 0;
+}
+
+static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
+{
+	struct qm_fqd fqd;
+	struct qman_fq fq;
+	int ret, i, j;
+	u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
+
+	memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
+
+	for (i = 1; i < fqid_max; i++) {
+		memset(&fqd, 0, sizeof(struct qm_fqd));
+		fq.fqid = i;
+		ret = qman_query_fq(&fq, &fqd);
+		if (ret)
+			return ret;
+		/* Keep a summary count of all states */
+		for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
+			if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+					mask_filter[j].mask)
+				qm_prog_cnt[j/2]++;
+	}
+	for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
+		seq_printf(file, "%s count = %u\n",
+			get_fqd_ctrl_text(mask_filter[i*2].mask),
+			   qm_prog_cnt[i]);
+	}
+	return 0;
+}
+
+static int qman_fqd_summary_show(struct seq_file *file, void *offset)
+{
+	int ret;
+
+	/* Display summary of non programmable fields */
+	ret = qman_fqd_non_prog_summary_show(file, offset);
+	if (ret)
+		return ret;
+	seq_puts(file, "-----------------------------------------\n");
+	/* Display programmable fields */
+	ret = qman_fqd_prog_summary_show(file, offset);
+	return ret;
+}
+
+static int qman_fqd_summary_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_fqd_summary_show, NULL);
+}
+
+static const struct file_operations qman_fqd_summary_fops =  {
+	.owner		= THIS_MODULE,
+	.open		= qman_fqd_summary_open,
+	.read		= seq_read,
+};
+
+/* QMan destination work queue */
+
+struct qman_dest_wq_s {
+	u16 wq_id;
+};
+static struct qman_dest_wq_s qman_dest_wq_data = {
+	.wq_id = 0,
+};
+
+static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
+{
+	struct qm_fqd fqd;
+	struct qman_fq fq;
+	int ret, i;
+	u16 *wq, wq_id = qman_dest_wq_data.wq_id;
+	struct line_buffer_fq line_buf;
+
+	memset(&line_buf, 0, sizeof(line_buf));
+	/* use vmalloc : need to allocate large memory region and don't
+	 * require the memory to be physically contiguous. */
+	wq = vzalloc(sizeof(u16) * (0xFFFF+1));
+	if (!wq)
+		return -ENOMEM;
+
+	seq_printf(file, "List of fq ids with destination work queue id"
+			" = 0x%x\n", wq_id);
+
+	for (i = 1; i < fqid_max; i++) {
+		fq.fqid = i;
+		memset(&fqd, 0, sizeof(struct qm_fqd));
+		ret = qman_query_fq(&fq, &fqd);
+		if (ret) {
+			vfree(wq);
+			return ret;
+		}
+		if (wq_id == fqd.dest_wq)
+			add_to_line_buffer(&line_buf, fq.fqid, file);
+		wq[fqd.dest_wq]++;
+	}
+	flush_line_buffer(&line_buf, file);
+
+	seq_puts(file, "Summary of all FQD destination work queue values\n");
+	for (i = 0; i < 0xFFFF; i++) {
+		if (wq[i])
+			seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
+				"count = %u\n", i >> 3, i & 0x3, i, wq[i]);
+	}
+	vfree(wq);
+	return 0;
+}
+
+static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
+				      size_t count, loff_t *off)
+{
+	int ret;
+	unsigned long val;
+
+	ret = user_input_convert(buf, count, &val);
+	if (ret)
+		return ret;
+	if (val > 0xFFFF)
+		return -EINVAL;
+	qman_dest_wq_data.wq_id = val;
+	return count;
+}
+
+static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_fqd_dest_wq_show, NULL);
+}
+
+static const struct file_operations qman_fqd_dest_wq_fops =  {
+	.owner		= THIS_MODULE,
+	.open		= qman_fqd_dest_wq_open,
+	.read		= seq_read,
+	.write		= qman_fqd_dest_wq_write,
+};
+
+/* QMan Intra-Class Scheduling Credit */
+
+static int qman_fqd_cred_show(struct seq_file *file, void *offset)
+{
+	struct qm_fqd fqd;
+	struct qman_fq fq;
+	int ret, i;
+	u32 fq_cnt = 0;
+	struct line_buffer_fq line_buf;
+
+	memset(&line_buf, 0, sizeof(line_buf));
+	seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
+			"\n");
+
+	for (i = 1; i < fqid_max; i++) {
+		fq.fqid = i;
+		memset(&fqd, 0, sizeof(struct qm_fqd));
+		ret = qman_query_fq(&fq, &fqd);
+		if (ret)
+			return ret;
+		if (fqd.ics_cred > 0) {
+			add_to_line_buffer(&line_buf, fq.fqid, file);
+			fq_cnt++;
+		}
+	}
+	flush_line_buffer(&line_buf, file);
+
+	seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
+	return 0;
+}
+
+static int qman_fqd_cred_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, qman_fqd_cred_show, NULL);
+}
+
+static const struct file_operations qman_fqd_cred_fops =  {
+	.owner		= THIS_MODULE,
+	.open		= qman_fqd_cred_open,
+	.read		= seq_read,
+};
+
+/* helper macros used in qman_debugfs_init */
+#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
+	do { \
+		d = debugfs_create_file(name, \
+			mode, parent, \
+			data, \
+			fops); \
+		if (d == NULL) { \
+			ret = -ENOMEM; \
+			goto _return; \
+		} \
+	} while (0)
+
+/* dfs_root as parent */
+#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
+	QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
+
+/* fqd_root as parent */
+#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
+	QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
+
+/* fqd state */
+#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
+	QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
+	(void *)&mask_filter[index], &qman_fqd_ctrl_fops)
+
+static int __init qman_debugfs_init(void)
+{
+	int ret = 0;
+	struct dentry *d, *fqd_root;
+	u32 reg;
+
+	fqid_max = 0;
+	init_ccsrmempeek();
+	if (qman_ccsr_start) {
+		if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
+			/* extract the size of the FQD window */
+			reg = reg & 0x3f;
+			/* calculate valid frame queue descriptor range */
+			fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
+		}
+	}
+
+	dfs_root = debugfs_create_dir("qman", NULL);
+	fqd_root = debugfs_create_dir("fqd", dfs_root);
+	if (dfs_root == NULL || fqd_root == NULL) {
+		pr_err("Cannot create dir\n");
+		return -ENOMEM;
+	}
+
+	if (fqid_max) {
+		QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
+				NULL, &qman_ccsrmempeek_fops);
+	}
+	QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
+		&query_fq_np_fields_data, &query_fq_np_fields_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
+		&query_fq_fields_data, &query_fq_fields_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
+		&query_wq_lengths_data, &query_wq_lengths_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
+		&query_cgr_data, &query_cgr_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
+		NULL, &query_congestion_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
+		NULL, &testwrite_cgr_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
+		NULL, &teswrite_cgr_cgrid_fops);
+
+	QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
+		NULL, &teswrite_cgr_ibcnt_fops);
+
+	/* Create files with fqd_root as parent */
+
+	QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
+		&qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
+		&qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
+		&qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
+		&qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
+		(void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
+		&qman_fqd_state_fops);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
+
+	QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
+		NULL, &qman_fqd_summary_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
+		NULL, &qman_fqd_dest_wq_fops);
+
+	QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
+		NULL, &qman_fqd_cred_fops);
+
+	return 0;
+
+_return:
+	debugfs_remove_recursive(dfs_root);
+
+	return ret;
+}
+
+static void __exit qman_debugfs_exit(void)
+{
+	debugfs_remove_recursive(dfs_root);
+}
+
+module_init(qman_debugfs_init);
+module_exit(qman_debugfs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/linux/drivers/soc/fsl/qbman/qman.c b/linux/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000..aa18888
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,1106 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <asm/cacheflush.h>
+
+/* Last updated for v00.800 of the BG */
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
+#define REG_DD_CFG		0x0200
+#define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC		0x0400
+#define REG_PFDR_FP_HEAD	0x0404
+#define REG_PFDR_FP_TAIL	0x0408
+#define REG_PFDR_FP_LWIT	0x0410
+#define REG_PFDR_CFG		0x0414
+#define REG_SFDR_CFG		0x0500
+#define REG_SFDR_IN_USE		0x0504
+#define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID	0x0630
+#define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_ECIR2		0x0a0c
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_MCR			0x0b00
+#define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG		0x0be0
+#define REG_HID_CFG		0x0bf0
+#define REG_IDLE_STAT		0x0bf4
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FQD_BARE		0x0c00
+#define REG_PFDR_BARE		0x0c20
+#define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE		0x0c80
+#define REG_QCSP_BAR		0x0c84
+#define REG_CI_SCHED_CFG	0x0d00
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_CI_RLM_AVG		0x0d14
+#define REG_ERR_ISR		0x0e00	/* + "enum qm_isr_reg" */
+#define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR		0x01000000
+#define MCR_get_rslt(v)		(u8)((v) >> 24)
+#define MCR_rslt_idle(r)	(!rslt || (rslt >= 0xf0))
+#define MCR_rslt_ok(r)		(rslt == 0xf0)
+#define MCR_rslt_eaccess(r)	(rslt == 0xf8)
+#define MCR_rslt_inval(r)	(rslt == 0xff)
+
+/* Corenet initiator settings. Stash request queues are 4-deep to match cores
+   ability to snarf. Stash priority is 3, other priorities are 2. */
+#define FSL_QMAN_CI_SCHED_CFG_SRCCIV   4
+#define FSL_QMAN_CI_SCHED_CFG_SRQ_W    3
+#define FSL_QMAN_CI_SCHED_CFG_RW_W     2
+#define FSL_QMAN_CI_SCHED_CFG_BMAN_W   2
+
+struct qman;
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+	qm_wq_portal = 0,
+	qm_wq_pool = 1,
+	qm_wq_fman0 = 2,
+	qm_wq_fman1 = 3,
+	qm_wq_caam = 4,
+	qm_wq_pme = 5,
+	qm_wq_first = qm_wq_portal,
+	qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+	qm_memory_fqd,
+	qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
+#define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
+#define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
+#define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
+#define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
+#define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+				QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+				QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+			QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+			QM_EIRQ_IFSI)
+
+union qman_ecir {
+	u32 ecir_raw;
+	struct {
+		u32 __reserved:2;
+		u32 portal_type:1;
+		u32 portal_num:5;
+		u32 fqid:24;
+	} __packed info;
+};
+
+union qman_ecir2 {
+	u32 ecir2_raw;
+	struct {
+		u32 portal_type:1;
+		u32 __reserved:21;
+		u32 portal_num:10;
+	} __packed info;
+};
+
+union qman_eadr {
+	u32 eadr_raw;
+	struct {
+		u32 __reserved1:4;
+		u32 memid:4;
+		u32 __reserved2:12;
+		u32 eadr:12;
+	} __packed info;
+	struct {
+		u32 __reserved1:3;
+		u32 memid:5;
+		u32 __reserved:8;
+		u32 eadr:16;
+	} __packed info_rev3;
+};
+
+struct qman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+	QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
+	QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
+	QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
+	QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
+	QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+	QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+	QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
+	QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
+	QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
+	QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
+	QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
+	QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
+	QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
+	QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
+	QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
+	QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
+	QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
+	QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
+};
+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
+
+struct qman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct qman_error_info_mdata error_mdata[] = {
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
+	QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
+	QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
+	QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
+	QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
+	QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
+	QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
+};
+#define QMAN_ERR_MDATA_COUNT \
+	(sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/**
+ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define qm_err_isr_status_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_status)
+#define qm_err_isr_status_clear(qm, m)	\
+		__qm_err_isr_write(qm, qm_isr_status, m)
+#define qm_err_isr_enable_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_enable)
+#define qm_err_isr_enable_write(qm, v)	\
+		__qm_err_isr_write(qm, qm_isr_enable, v)
+#define qm_err_isr_disable_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_disable)
+#define qm_err_isr_disable_write(qm, v)	\
+		__qm_err_isr_write(qm, qm_isr_disable, v)
+#define qm_err_isr_inhibit(qm)		\
+		__qm_err_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_err_isr_uninhibit(qm)	\
+		__qm_err_isr_write(qm, qm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Encapsulate "struct qman *" as a cast of the register space address. */
+
+static struct qman *qm_create(void *regs)
+{
+	return (struct qman *)regs;
+}
+
+static inline u32 __qm_in(struct qman *qm, u32 offset)
+{
+	return ioread32be((void *)qm + offset);
+}
+static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
+{
+	iowrite32be(val, (void *)qm + offset);
+}
+#define qm_in(reg)		__qm_in(qm, REG_##reg)
+#define qm_out(reg, val)	__qm_out(qm, REG_##reg, val)
+
+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
+{
+	return __qm_in(qm, REG_ERR_ISR + (n << 2));
+}
+
+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
+{
+	__qm_out(qm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
+			int ed, u8 sernd)
+{
+	DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
+			(portal == qm_dc_portal_fman1));
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+	else
+		qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
+			u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
+			u8 csw6, u8 csw7)
+{
+	qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+		((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+		((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+		((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(struct qman *qm)
+{
+	qm_out(HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(struct qman *qm)
+{
+	qm_out(CI_SCHED_CFG,
+	       0x80000000 | /* write srcciv enable */
+	       (FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
+	       (FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
+	       (FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
+	       FSL_QMAN_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = qm_in(IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
+			int enable, int prio, int stash, u32 size)
+{
+	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+			is_power_of_2(size));
+	/* choke if 'ba' has lower-alignment than 'size' */
+	DPA_ASSERT(!(ba & (size - 1)));
+	__qm_out(qm, offset, upper_32_bits(ba));
+	__qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
+	__qm_out(qm, offset + REG_offset_AR,
+		(enable ? 0x80000000 : 0) |
+		(prio ? 0x40000000 : 0) |
+		(stash ? 0x20000000 : 0) |
+		(exp - 1));
+}
+
+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
+{
+	qm_out(PFDR_FP_LWIT, th & 0xffffff);
+	qm_out(PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
+{
+	qm_out(SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
+{
+	u8 rslt = MCR_get_rslt(qm_in(MCR));
+
+	DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+	/* Make sure the command interface is 'idle' */
+	if (!MCR_rslt_idle(rslt))
+		panic("QMAN_MCR isn't idle");
+
+	/* Write the MCR command params then the verb */
+	qm_out(MCP(0), pfdr_start);
+	/* TODO: remove this - it's a workaround for a model bug that is
+	 * corrected in more recent versions. We use the workaround until
+	 * everyone has upgraded. */
+	qm_out(MCP(1), (pfdr_start + num - 16));
+	lwsync();
+	qm_out(MCR, MCR_INIT_PFDR);
+	/* Poll for the result */
+	do {
+		rslt = MCR_get_rslt(qm_in(MCR));
+	} while (!MCR_rslt_idle(rslt));
+	if (MCR_rslt_ok(rslt))
+		return 0;
+	if (MCR_rslt_eaccess(rslt))
+		return -EACCES;
+	if (MCR_rslt_inval(rslt))
+		return -EINVAL;
+	pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+	return -ENODEV;
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these */
+static struct qman *qm;
+#ifndef __rtems__
+static struct device_node *qm_node;
+#endif /* __rtems__ */
+
+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
+ * during qman_init_ccsr(). */
+#ifndef __rtems__
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+	fqd_a = rmem->base;
+	fqd_sz = rmem->size;
+
+	WARN_ON(!(fqd_a && fqd_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+#else /* __rtems__ */
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
+#define fqd_a ((uintptr_t)&fqd[0])
+#define fqd_sz sizeof(fqd)
+static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 33554432);
+#define pfdr_a ((uintptr_t)&pfdr[0])
+#define pfdr_sz sizeof(pfdr)
+#endif /* __rtems__ */
+
+size_t qman_fqd_size(void)
+{
+	return fqd_sz;
+}
+
+#ifndef __rtems__
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+	pfdr_a = rmem->base;
+	pfdr_sz = rmem->size;
+
+	WARN_ON(!(pfdr_a && pfdr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
+
+/* Parse the <name> property to extract the memory location and size and
+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
+ * size. Also flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static __init int parse_mem_property(struct device_node *node,
+				     dma_addr_t *addr, size_t *sz, int zero)
+{
+	int ret;
+
+	/* If using a "zero-pma", don't try to zero it, even if you asked */
+	if (zero && of_find_property(node, "zero-pma", &ret)) {
+		pr_info("  it's a 'zero-pma', not zeroing from s/w\n");
+		zero = 0;
+	}
+
+	if (zero) {
+		/* map as cacheable, non-guarded */
+		void __iomem *tmpp = ioremap_prot(*addr, *sz, 0);
+
+		memset_io(tmpp, 0, *sz);
+		flush_dcache_range((unsigned long)tmpp,
+				   (unsigned long)tmpp + *sz);
+		iounmap(tmpp);
+	}
+
+	return 0;
+}
+#else /* __rtems__ */
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+/* TODO:
+ * - there is obviously no handling of errors,
+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
+ *   both memory resources to zero.
+ */
+static int __init fsl_qman_init(struct device_node *node)
+{
+#ifndef __rtems__
+	struct resource res;
+#endif /* __rtems__ */
+	u32 __iomem *regs;
+#ifndef __rtems__
+	const char *s;
+	int ret, standby = 0;
+#endif /* __rtems__ */
+	u16 id;
+	u8 major, minor;
+
+#ifndef __rtems__
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		pr_err("Can't get %s property 'reg'\n", node->full_name);
+		return ret;
+	}
+	s = of_get_property(node, "fsl,hv-claimable", &ret);
+	if (s && !strcmp(s, "standby"))
+		standby = 1;
+	if (!standby) {
+		ret = parse_mem_property(node, &fqd_a, &fqd_sz, 1);
+		BUG_ON(ret);
+		ret = parse_mem_property(node, &pfdr_a, &pfdr_sz, 0);
+		BUG_ON(ret);
+	}
+#else /* __rtems__ */
+	memset((void *)fqd_a, 0, fqd_sz);
+#endif /* __rtems__ */
+	/* Global configuration */
+#ifndef __rtems__
+	regs = ioremap(res.start, res.end - res.start + 1);
+#else /* __rtems__ */
+	regs = (u32 __iomem *)&qoriq.qman;
+#endif /* __rtems__ */
+	qm = qm_create(regs);
+#ifndef __rtems__
+	qm_node = node;
+#endif /* __rtems__ */
+	qm_get_version(qm, &id, &major, &minor);
+	pr_info("Ver: %04x,%02x,%02x\n", id, major, minor);
+	if (!qman_ip_rev) {
+		if ((major == 1) && (minor == 0)) {
+			pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+#ifndef __rtems__
+			iounmap(regs);
+#endif /* __rtems__ */
+			return -ENODEV;
+		} else if ((major == 1) && (minor == 1))
+			qman_ip_rev = QMAN_REV11;
+		else if	((major == 1) && (minor == 2))
+			qman_ip_rev = QMAN_REV12;
+		else if ((major == 2) && (minor == 0))
+			qman_ip_rev = QMAN_REV20;
+		else if ((major == 3) && (minor == 0))
+			qman_ip_rev = QMAN_REV30;
+		else if ((major == 3) && (minor == 1))
+			qman_ip_rev = QMAN_REV31;
+		else {
+			pr_warn("Unknown version, default to rev1.1\n");
+			qman_ip_rev = QMAN_REV11;
+		}
+	}
+
+#ifndef __rtems__
+	if (standby) {
+		pr_info("  -> in standby mode\n");
+		return 0;
+	}
+#endif /* __rtems__ */
+	return 0;
+}
+
+int qman_have_ccsr(void)
+{
+	return qm ? 1 : 0;
+}
+
+#ifndef __rtems__
+__init void qman_init_early(void)
+{
+	struct device_node *dn;
+	int ret;
+
+	for_each_compatible_node(dn, NULL, "fsl,qman") {
+		if (qm)
+			pr_err("%s: only one 'fsl,qman' allowed\n",
+				dn->full_name);
+		else {
+			if (!of_device_is_available(dn))
+				continue;
+
+			ret = fsl_qman_init(dn);
+			BUG_ON(ret);
+		}
+	}
+}
+#else /* __rtems__ */
+#include <bsp/fdt.h>
+
+static void
+qman_sysinit(void)
+{
+	const char *fdt = bsp_fdt_get();
+	struct device_node dn;
+	const char *name;
+	int node;
+	int ret;
+
+	memset(&dn, 0, sizeof(dn));
+
+	name = "fsl,qman";
+	node = fdt_node_offset_by_compatible(fdt, 0, name);
+	if (node < 0)
+		panic("qman: no qman in FDT");
+
+	dn.full_name = name;
+	dn.offset = node;
+
+	ret = fsl_qman_init(&dn);
+	if (ret != 0)
+		panic("qman: init 1 failed");
+
+	ret = qman_init_ccsr(&dn);
+	if (ret != 0)
+		panic("qman: init CCSR failed");
+
+	ret = qman_init(&dn);
+	if (ret != 0)
+		panic("qman: init 2 failed");
+
+	ret = qman_resource_init();
+	if (ret != 0)
+		panic("qman: resource init failed");
+}
+SYSINIT(qman_sysinit, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
+#endif /* __rtems__ */
+
+static void log_edata_bits(u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	pr_warn("ErrInt, EDATA:\n");
+	i = bit_count/32;
+	if (bit_count%32) {
+		i++;
+		mask = ~(mask << bit_count%32);
+	}
+	j = 16-i;
+	pr_warn("  0x%08x\n", qm_in(EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		pr_warn("  0x%08x\n", qm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+	union qman_ecir ecir_val;
+	union qman_eadr eadr_val;
+
+	ecir_val.ecir_raw = qm_in(ECIR);
+	/* Is portal info valid */
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		union qman_ecir2 ecir2_val;
+
+		ecir2_val.ecir2_raw = qm_in(ECIR2);
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			pr_warn("ErrInt: %s id %d\n",
+				ecir2_val.info.portal_type ? "DCP" : "SWP",
+				ecir2_val.info.portal_num);
+		}
+		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+			pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.eadr_raw = qm_in(EADR);
+			pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+				error_mdata[eadr_val.info_rev3.memid].txt,
+				error_mdata[eadr_val.info_rev3.memid].addr_mask
+					& eadr_val.info_rev3.eadr);
+			log_edata_bits(
+				error_mdata[eadr_val.info_rev3.memid].bits);
+		}
+	} else {
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			pr_warn("ErrInt: %s id %d\n",
+				ecir_val.info.portal_type ? "DCP" : "SWP",
+				ecir_val.info.portal_num);
+		}
+		if (ecsr_val & FQID_ECSR_ERR)
+			pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.eadr_raw = qm_in(EADR);
+			pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+				error_mdata[eadr_val.info.memid].txt,
+				error_mdata[eadr_val.info.memid].addr_mask
+					& eadr_val.info.eadr);
+			log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+		}
+	}
+}
+
+/* QMan interrupt handler */
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+	ier_val = qm_err_isr_enable_read(qm);
+	isr_val = qm_err_isr_status_read(qm);
+	ecsr_val = qm_in(ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+	for (i = 0; i < QMAN_HWE_COUNT; i++) {
+		if (qman_hwerr_txts[i].mask & isr_mask) {
+			pr_warn("ErrInt: %s\n", qman_hwerr_txts[i].txt);
+			if (qman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(isr_mask, ecsr_val);
+				/* Re-arm error capture registers */
+				qm_out(ECSR, ecsr_val);
+			}
+			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
+				pr_devel("Un-enabling error 0x%x\n",
+					 qman_hwerr_txts[i].mask);
+				ier_val &= ~qman_hwerr_txts[i].mask;
+				qm_err_isr_enable_write(qm, ier_val);
+			}
+		}
+	}
+	qm_err_isr_status_clear(qm, isr_val);
+	return IRQ_HANDLED;
+}
+
+static int __bind_irq(struct device_node *node)
+{
+	int ret, err_irq;
+
+	err_irq = of_irq_to_resource(node, 0, NULL);
+	if (err_irq == NO_IRQ) {
+		pr_info("Can't get %s property 'interrupts'\n",
+			node->full_name);
+		return -ENODEV;
+	}
+	ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", node);
+	if (ret)  {
+		pr_err("request_irq() failed %d for '%s'\n",
+		       ret, node->full_name);
+		return -ENODEV;
+	}
+	/* Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init). */
+	qm_err_isr_status_clear(qm, 0xffffffff);
+	/* Enable Error Interrupts */
+	qm_err_isr_enable_write(qm, 0xffffffff);
+	return 0;
+}
+
+int qman_init_ccsr(struct device_node *node)
+{
+	int ret;
+
+#ifndef __rtems__
+	if (!qman_have_ccsr())
+		return 0;
+	if (node != qm_node)
+		return -EINVAL;
+#endif /* __rtems__ */
+	/* FQD memory */
+	qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
+	/* PFDR memory */
+	qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
+	qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
+	/* thresholds */
+	qm_set_pfdr_threshold(qm, 512, 64);
+	qm_set_sfdr_threshold(qm, 128);
+	/* clear stale PEBI bit from interrupt status register */
+	qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
+	/* corenet initiator settings */
+	qm_set_corenet_initiator(qm);
+	/* HID settings */
+	qm_set_hid(qm);
+	/* Set scheduling weights to defaults */
+	for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
+		qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
+	/* We are not prepared to accept ERNs for hardware enqueues */
+	qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
+	qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
+	/* Initialise Error Interrupt Handler */
+	ret = __bind_irq(node);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+	static int done;
+	static u32 liodn_offset;
+	u32 before, after;
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+	if (!qman_have_ccsr())
+		return;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		before = qm_in(REV3_QCSP_LIO_CFG(idx));
+	else
+		before = qm_in(QCSP_LIO_CFG(idx));
+	if (!done) {
+		liodn_offset = before & LIO_CFG_LIODN_MASK;
+		done = 1;
+		return;
+	}
+	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_out(REV3_QCSP_LIO_CFG(idx), after);
+	else
+		qm_out(QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+	u32 before, after;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		before = qm_in(REV3_QCSP_IO_CFG(idx));
+		/* Each pair of vcpu share the same SRQ(SDEST) */
+		cpu_idx /= 2;
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_out(REV3_QCSP_IO_CFG(idx), after);
+	} else {
+		before = qm_in(QCSP_IO_CFG(idx));
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_out(QCSP_IO_CFG(idx), after);
+	}
+	return 0;
+}
+
+#define MISC_CFG_WPM_MASK 0x00000002
+int qm_set_wpm(int wpm)
+{
+	u32 before;
+	u32 after;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	before = qm_in(MISC_CFG);
+	after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
+	qm_out(MISC_CFG, after);
+	return 0;
+}
+
+int qm_get_wpm(int *wpm)
+{
+	u32 before;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	before = qm_in(MISC_CFG);
+	*wpm = (before & MISC_CFG_WPM_MASK) >> 1;
+	return 0;
+}
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME	"fsl-qman"
+
+static ssize_t show_pfdr_fpc(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
+};
+
+static ssize_t show_dlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data;
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+		return -EINVAL;
+	data = qm_in(DCP_DLM_AVG(i));
+	return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+			(data & 0x000000ff)*390625);
+};
+
+static ssize_t set_dlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+		return -EINVAL;
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(DCP_DLM_AVG(i), val);
+	return count;
+};
+
+static ssize_t show_pfdr_cfg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
+};
+
+static ssize_t set_pfdr_cfg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(PFDR_CFG, val);
+	return count;
+};
+
+static ssize_t show_sfdr_in_use(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
+};
+
+static ssize_t show_idle_stat(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
+};
+
+static ssize_t show_ci_rlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data = qm_in(CI_RLM_AVG);
+
+	return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+			(data & 0x000000ff)*390625);
+};
+
+static ssize_t set_ci_rlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(CI_RLM_AVG, val);
+	return count;
+};
+
+static ssize_t show_err_isr(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
+};
+
+
+static ssize_t show_sbec(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
+		show_ci_rlm_avg, set_ci_rlm_avg);
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
+
+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *qman_dev_attributes[] = {
+	&dev_attr_pfdr_fpc.attr,
+	&dev_attr_pfdr_cfg.attr,
+	&dev_attr_idle_stat.attr,
+	&dev_attr_ci_rlm_avg.attr,
+	&dev_attr_err_isr.attr,
+	&dev_attr_dcp0_dlm_avg.attr,
+	&dev_attr_dcp1_dlm_avg.attr,
+	&dev_attr_dcp2_dlm_avg.attr,
+	&dev_attr_dcp3_dlm_avg.attr,
+	/* sfdr_in_use will be added if necessary */
+	NULL
+};
+
+static struct attribute *qman_dev_ecr_attributes[] = {
+	&dev_attr_sbec_0.attr,
+	&dev_attr_sbec_1.attr,
+	&dev_attr_sbec_2.attr,
+	&dev_attr_sbec_3.attr,
+	&dev_attr_sbec_4.attr,
+	&dev_attr_sbec_5.attr,
+	&dev_attr_sbec_6.attr,
+	&dev_attr_sbec_7.attr,
+	&dev_attr_sbec_8.attr,
+	&dev_attr_sbec_9.attr,
+	&dev_attr_sbec_10.attr,
+	&dev_attr_sbec_11.attr,
+	&dev_attr_sbec_12.attr,
+	&dev_attr_sbec_13.attr,
+	&dev_attr_sbec_14.attr,
+	NULL
+};
+
+/* root level */
+static const struct attribute_group qman_dev_attr_grp = {
+	.name = NULL,
+	.attrs = qman_dev_attributes
+};
+static const struct attribute_group qman_dev_ecr_grp = {
+	.name = "error_capture",
+	.attrs = qman_dev_ecr_attributes
+};
+
+static int of_fsl_qman_remove(struct platform_device *ofdev)
+{
+	sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+	return 0;
+};
+
+static int of_fsl_qman_probe(struct platform_device *ofdev)
+{
+	int ret;
+	struct device *dev = &ofdev->dev;
+
+	ret = sysfs_create_group(&dev->kobj, &qman_dev_attr_grp);
+	if (ret)
+		goto done;
+	ret = sysfs_add_file_to_group(&dev->kobj,
+		&dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
+	if (ret)
+		goto del_group_0;
+	ret = sysfs_create_group(&dev->kobj, &qman_dev_ecr_grp);
+	if (ret)
+		goto del_group_0;
+
+	goto done;
+
+del_group_0:
+	sysfs_remove_group(&dev->kobj, &qman_dev_attr_grp);
+done:
+	if (ret)
+		dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+	return ret;
+};
+
+static const struct of_device_id of_fsl_qman_ids[] = {
+	{
+		.compatible = "fsl,qman",
+	},
+	{}
+};
+
+static struct platform_driver of_fsl_qman_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_fsl_qman_ids,
+	},
+	.probe = of_fsl_qman_probe,
+	.remove	= of_fsl_qman_remove,
+};
+
+builtin_platform_driver(of_fsl_qman_driver);
+
+#endif /* CONFIG_SYSFS */
diff --git a/linux/drivers/soc/fsl/qbman/qman.h b/linux/drivers/soc/fsl/qbman/qman.h
new file mode 100644
index 0000000..331db7c
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman.h
@@ -0,0 +1,1133 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x0000
+#define QM_REG_EQCR_CI_CINH	0x0004
+#define QM_REG_EQCR_ITR		0x0008
+#define QM_REG_DQRR_PI_CINH	0x0040
+#define QM_REG_DQRR_CI_CINH	0x0044
+#define QM_REG_DQRR_ITR		0x0048
+#define QM_REG_DQRR_DCAP	0x0050
+#define QM_REG_DQRR_SDQCR	0x0054
+#define QM_REG_DQRR_VDQCR	0x0058
+#define QM_REG_DQRR_PDQCR	0x005c
+#define QM_REG_MR_PI_CINH	0x0080
+#define QM_REG_MR_CI_CINH	0x0084
+#define QM_REG_MR_ITR		0x0088
+#define QM_REG_CFG		0x0100
+#define QM_REG_ISR		0x0e00
+#define QM_REG_IIR		0x0e0c
+#define QM_REG_ITPR		0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3100
+#define QM_CL_DQRR_PI_CENA	0x3200
+#define QM_CL_DQRR_CI_CENA	0x3300
+#define QM_CL_MR_PI_CENA	0x3400
+#define QM_CL_MR_CI_CENA	0x3500
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o)		__raw_readl((qm)->addr_ci + (o))
+#define __qm_out(qm, o, val)	__raw_writel((val), (qm)->addr_ci + (o))
+#define qm_in(reg)		__qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val)	__qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
+#define __qm_cl_in(qm, o)	__raw_readl((qm)->addr_ce + (o))
+#define __qm_cl_out(qm, o, val) \
+	do { \
+		u32 *__tmpclout = (qm)->addr_ce + (o); \
+		__raw_writel((val), __tmpclout); \
+		dcbf(__tmpclout); \
+	} while (0)
+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg)	    __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+	__qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)	((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {		/* matches QCSP_CFG::EPM */
+	qm_eqcr_pci = 0,	/* PI index, cache-inhibited */
+	qm_eqcr_pce = 1,	/* PI index, cache-enabled */
+	qm_eqcr_pvb = 2		/* valid-bit */
+};
+enum qm_dqrr_dmode {		/* matches QCSP_CFG::DP */
+	qm_dqrr_dpush = 0,	/* SDQCR  + VDQCR */
+	qm_dqrr_dpull = 1	/* PDQCR */
+};
+enum qm_dqrr_pmode {		/* s/w-only */
+	qm_dqrr_pci,		/* reads DQRR_PI_CINH */
+	qm_dqrr_pce,		/* reads DQRR_PI_CENA */
+	qm_dqrr_pvb		/* reads valid-bit */
+};
+enum qm_dqrr_cmode {		/* matches QCSP_CFG::DCM */
+	qm_dqrr_cci = 0,	/* CI index, cache-inhibited */
+	qm_dqrr_cce = 1,	/* CI index, cache-enabled */
+	qm_dqrr_cdc = 2		/* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {		/* s/w-only */
+	qm_mr_pci,		/* reads MR_PI_CINH */
+	qm_mr_pce,		/* reads MR_PI_CENA */
+	qm_mr_pvb		/* reads valid-bit */
+};
+enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
+	qm_mr_cci = 0,		/* CI index, cache-inhibited */
+	qm_mr_cce = 1		/* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE		8
+#define QM_DQRR_SIZE		16
+#define QM_MR_SIZE		8
+
+struct qm_eqcr {
+	struct qm_eqcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	u32 busy;
+	enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+	const struct qm_dqrr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum qm_dqrr_dmode dmode;
+	enum qm_dqrr_pmode pmode;
+	enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+	const struct qm_mr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum qm_mr_pmode pmode;
+	enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+	struct qm_mc_command *cr;
+	struct qm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum {
+		/* Can be _mc_start()ed */
+		qman_mc_idle,
+		/* Can be _mc_commit()ed or _mc_abort()ed */
+		qman_mc_user,
+		/* Can only be _mc_retry()ed */
+		qman_mc_hw
+	} state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+	void __iomem *addr_ce;	/* cache-enabled */
+	void __iomem *addr_ci;	/* cache-inhibited */
+};
+
+struct qm_portal {
+	/* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
+	 * and including 'mc' fits within a cacheline (yay!). The 'config' part
+	 * is setup-only, so isn't a cause for a concern. In other words, don't
+	 * rearrange this structure on a whim, there be dragons ... */
+	struct qm_addr addr;
+	struct qm_eqcr eqcr;
+	struct qm_dqrr dqrr;
+	struct qm_mr mr;
+	struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* --- EQCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+	/* NB: this is odd-looking, but experiments show that it generates fast
+	 * code with essentially no branching overheads. We increment to the
+	 * next EQCR pointer and handle overflow and 'vbit'. */
+	struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+	eqcr->cursor = EQCR_CARRYCLEAR(partial);
+	if (partial != eqcr->cursor)
+		eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+				enum qm_eqcr_pmode pmode,
+				unsigned int eq_stash_thresh,
+				int eq_stash_prio)
+{
+	/* This use of 'register', as well as all other occurrences, is because
+	 * it has been observed to generate much faster code with gcc than is
+	 * otherwise the case. */
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u32 cfg;
+	u8 pi;
+
+	eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
+	eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(EQCR_CI);
+	pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	eqcr->cursor = eqcr->ring + pi;
+	eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+			QM_EQCR_VERB_VBIT : 0;
+	eqcr->available = QM_EQCR_SIZE - 1 -
+			qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+	eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+	eqcr->pmode = pmode;
+#endif
+	cfg = (qm_in(CFG) & 0x00ffffff) |
+		(eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+		(eq_stash_prio << 26)	| /* QCSP_CFG: EP */
+		((pmode & 0x3) << 24);	/* QCSP_CFG::EPM */
+	qm_out(CFG, cfg);
+	return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+	return (qm_in(CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+	DPA_ASSERT(!eqcr->busy);
+	if (pi != EQCR_PTR2IDX(eqcr->cursor))
+		pr_crit("losing uncommited EQCR entries\n");
+	if (ci != eqcr->ci)
+		pr_crit("missing existing EQCR completions\n");
+	if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+		pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+								 *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(!eqcr->busy);
+	if (!eqcr->available)
+		return NULL;
+
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+								*portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci;
+
+	DPA_ASSERT(!eqcr->busy);
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+		diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return NULL;
+	}
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+					struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->busy);
+	DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+	if (eqcr->available == 1)
+		return NULL;
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	dcbf(eqcr->cursor);
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+	DPA_ASSERT(eqcr->busy); \
+	DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+	DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbf(eqcr->cursor);
+	hwsync();
+	qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+	qm_cl_invalidate(EQCR_PI);
+	qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbf(eqcr->cursor);
+	lwsync();
+	qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eqcursor;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+	lwsync();
+	eqcursor = eqcr->cursor;
+	eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	dcbf(eqcursor);
+	EQCR_INC(eqcr);
+	eqcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(EQCR_CI);
+	diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	eqcr->ithresh = ithresh;
+	qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+/* FIXME: many possible improvements;
+ * - look at changing the API to use pointer rather than index parameters now
+ *   that 'cursor' is a pointer,
+ * - consider moving other parameters to pointer if it could help (ci)
+ */
+
+#define DQRR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline const struct qm_dqrr_entry *DQRR_INC(
+						const struct qm_dqrr_entry *e)
+{
+	return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+	qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+		((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+				const struct qm_portal_config *config,
+				enum qm_dqrr_dmode dmode,
+				__maybe_unused enum qm_dqrr_pmode pmode,
+				enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u32 cfg;
+
+	/* Make sure the DQRR will be idle when we enable */
+	qm_out(DQRR_SDQCR, 0);
+	qm_out(DQRR_VDQCR, 0);
+	qm_out(DQRR_PDQCR, 0);
+	dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
+	dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->cursor = dqrr->ring + dqrr->ci;
+	dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+	dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+			QM_DQRR_VERB_VBIT : 0;
+	dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	dqrr->dmode = dmode;
+	dqrr->pmode = pmode;
+	dqrr->cmode = cmode;
+#endif
+	/* Invalidate every ring entry before beginning */
+	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+		dcbi(qm_cl(dqrr->ring, cfg));
+	cfg = (qm_in(CFG) & 0xff000f00) |
+		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+		((dmode & 1) << 18) |			/* DP */
+		((cmode & 3) << 16) |			/* DCM */
+#ifndef __rtems__
+		0xa0 |					/* RE+SE */
+#endif /* __rtems__ */
+		(0 ? 0x40 : 0) |			/* Ignore RP */
+		(0 ? 0x10 : 0);				/* Ignore SP */
+	qm_out(CFG, cfg);
+	qm_dqrr_set_maxfill(portal, max_fill);
+	return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if ((dqrr->cmode != qm_dqrr_cdc) &&
+			(dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+		pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+						struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (!dqrr->fill)
+		return NULL;
+	return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->fill);
+	dqrr->cursor = DQRR_INC(dqrr->cursor);
+	return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 diff, old_pi = dqrr->pi;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+	dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+	dqrr->fill += diff;
+	return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+	qm_cl_invalidate(DQRR_PI);
+	qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 diff, old_pi = dqrr->pi;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+	dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+	diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+	dqrr->fill += diff;
+	return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+	/* when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads". */
+#ifdef __rtems__
+	dcbi(res);
+#endif /* __rtems__ */
+	if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+		if (!dqrr->pi)
+			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+		dqrr->fill++;
+	}
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+	dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+	qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+	dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+	qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	qm_cl_invalidate(DQRR_CI);
+	qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+	qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+	qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+					int park)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(DQRR_DCAP, (0 << 8) |	/* S */
+		((park ? 1 : 0) << 6) |	/* PK */
+		idx);			/* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+					const struct qm_dqrr_entry *dq,
+					int park)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 idx = DQRR_PTR2IDX(dq);
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPA_ASSERT((dqrr->ring + idx) == dq);
+	DPA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* DQRR_DCAP::S */
+		((park ? 1 : 0) << 6) |		/* DQRR_DCAP::PK */
+		idx);				/* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (1 << 8) |		/* DQRR_DCAP::S */
+		((u32)bitmask << 16));		/* DQRR_DCAP::DCAP_CI */
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_cl_invalidate(DQRR_CI);
+	qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* S */
+		(1 << 6) |			/* PK */
+		(idx & (QM_DQRR_SIZE - 1)));	/* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* S */
+		(1 << 6) |			/* PK */
+		DQRR_PTR2IDX(dqrr->cursor));	/* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+	qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+	qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_VDQCR);
+}
+
+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
+{
+	qm_out(DQRR_PDQCR, pdqcr);
+}
+
+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_PDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+	return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+	return MR_CARRYCLEAR(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+		enum qm_mr_cmode cmode)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u32 cfg;
+
+	mr->ring = portal->addr.addr_ce + QM_CL_MR;
+	mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+	mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+	mr->cursor = mr->ring + mr->ci;
+	mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+	mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+	mr->ithresh = qm_in(MR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mr->pmode = pmode;
+	mr->cmode = cmode;
+#endif
+	cfg = (qm_in(CFG) & 0xfffff0ff) |
+		((cmode & 1) << 8);		/* QCSP_CFG:MM */
+	qm_out(CFG, cfg);
+	return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	if (mr->ci != MR_PTR2IDX(mr->cursor))
+		pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	if (!mr->fill)
+		return NULL;
+	return mr->cursor;
+}
+
+static inline u8 qm_mr_cursor(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return MR_PTR2IDX(mr->cursor);
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->fill);
+	mr->cursor = MR_INC(mr->cursor);
+	return --mr->fill;
+}
+
+static inline u8 qm_mr_pci_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u8 diff, old_pi = mr->pi;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pci);
+	mr->pi = qm_in(MR_PI_CINH);
+	diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+	mr->fill += diff;
+	return diff;
+}
+
+static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pce);
+	qm_cl_invalidate(MR_PI);
+	qm_cl_touch_ro(MR_PI);
+}
+
+static inline u8 qm_mr_pce_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u8 diff, old_pi = mr->pi;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pce);
+	mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
+	diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+	mr->fill += diff;
+	return diff;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+	DPA_ASSERT(mr->pmode == qm_mr_pvb);
+	/* when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads". */
+	if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+		if (!mr->pi)
+			mr->vbit ^= QM_MR_VERB_VBIT;
+		mr->fill++;
+		res = MR_INC(res);
+	}
+	dcbit_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = MR_PTR2IDX(mr->cursor);
+	qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	qm_cl_invalidate(MR_CI);
+	qm_cl_touch_rw(MR_CI);
+}
+
+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	mr->ci = MR_PTR2IDX(mr->cursor);
+	qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline u8 qm_mr_get_ci(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return mr->ci;
+}
+
+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return mr->ithresh;
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.addr_ce + QM_CL_CR;
+	mc->rr = portal->addr.addr_ce + QM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+			QM_MCC_VERB_VBIT) ?  0 : 1;
+	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (mc->state != qman_mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_user;
+#endif
+	dcbz_64(mc->cr);
+	return mc->cr;
+}
+
+static inline void qm_mc_abort(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_mc *mc = &portal->mc;
+	struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == qman_mc_user);
+	lwsync();
+	mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+	dcbf(mc->cr);
+	dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+	struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == qman_mc_hw);
+	/* The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering... */
+	if (!__raw_readb(&rr->verb)) {
+		dcbit_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int qm_isr_init(__always_unused struct qm_portal *portal)
+{
+	return 0;
+}
+
+static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
+{
+}
+
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+	qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+	return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+					u32 val)
+{
+	__qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_api.c b/linux/drivers/soc/fsl/qbman/qman_api.c
new file mode 100644
index 0000000..e838d08
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_api.c
@@ -0,0 +1,3026 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman.h"
+
+/* Compilation constants */
+#define DQRR_MAXFILL	15
+#define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
+#define IRQNAME		"QMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
+ * positive, and rounding to the closest value if it's zero. NB, this macro
+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
+ * that are compatible with this. NB, these arguments should not be expressions
+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
+ * in "some_value++" as a parameter to the macro! */
+#define ROUNDING(n, d, r) \
+	(((r) < 0) ? div64_u64((n), (d)) : \
+	(((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
+	div64_u64(((n) + ((d) / 2)), (d))))
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...). */
+#define FQLOCK(fq) \
+	do { \
+		struct qman_fq *__fq478 = (fq); \
+		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+			spin_lock(&__fq478->fqlock); \
+	} while (0)
+#define FQUNLOCK(fq) \
+	do { \
+		struct qman_fq *__fq478 = (fq); \
+		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+			spin_unlock(&__fq478->fqlock); \
+	} while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+	set_bits(mask, &fq->flags);
+}
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+	clear_bits(mask, &fq->flags);
+}
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+	return fq->flags & mask;
+}
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+	return !(fq->flags & mask);
+}
+
+struct qman_portal {
+	struct qm_portal p;
+	unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
+	unsigned long irq_sources;
+	u32 use_eqcr_ci_stashing;
+	u32 slowpoll;	/* only used when interrupts are off */
+	struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spinlock_t sharing_lock; /* only used if is_shared */
+#ifndef __rtems__
+	int is_shared;
+	struct qman_portal *sharing_redirect;
+#endif /* __rtems__ */
+#endif
+	u32 sdqcr;
+	int dqrr_disable_ref;
+	/* A portal-specific handler for DCP ERNs. If this is NULL, the global
+	 * handler is called instead. */
+	qman_cb_dc_ern cb_dc_ern;
+	/* When the cpu-affine portal is activated, this is non-NULL */
+	const struct qm_portal_config *config;
+#ifndef __rtems__
+	/* This is needed for providing a non-NULL device to dma_map_***() */
+	struct platform_device *pdev;
+#endif /* __rtems__ */
+	struct dpa_rbtree retire_table;
+	char irqname[MAX_IRQNAME];
+	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+	struct qman_cgrs *cgrs;
+	/* linked-list of CSCN handlers. */
+	struct list_head cgr_cbs;
+	/* list lock */
+	spinlock_t cgr_lock;
+	/* track if memory was allocated by the driver */
+	u8 alloced;
+};
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+		else \
+			local_irq_save(irqflags); \
+	} while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+						   irqflags); \
+		else \
+			local_irq_restore(irqflags); \
+	} while (0)
+#else /* __rtems__ */
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+    raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+    raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags)
+#endif /* __rtems__ */
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler. */
+static qman_cb_dc_ern cb_dc_ern;
+
+#ifndef __rtems__
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+#endif /* __rtems__ */
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+#ifndef __rtems__
+void *affine_portals[NR_CPUS];
+#endif /* __rtems__ */
+
+/* "raw" gets the cpu-local struct whether it's a redirect or not. */
+static inline struct qman_portal *get_raw_affine_portal(void)
+{
+	return &get_cpu_var(qman_affine_portal);
+}
+/* For ops that can redirect, this obtains the portal to use */
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct qman_portal *get_affine_portal(void)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+
+#ifndef __rtems__
+	if (p->sharing_redirect)
+		return p->sharing_redirect;
+#endif /* __rtems__ */
+	return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+/* For every "get", there must be a "put" */
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(qman_affine_portal);
+}
+/* Exception: poll functions assume the caller is cpu-affine and in no risk of
+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
+ * context to remain as non-atomic during poll-triggered callbacks as it was
+ * when the poll API was first called (eg. NAPI), so we go out of our way in
+ * this case to not disable pre-emption. */
+static inline struct qman_portal *get_poll_portal(void)
+{
+	return this_cpu_ptr(&qman_affine_portal);
+}
+#define put_poll_portal()
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table. */
+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
+
+/* This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on. */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+	int ret = fqtree_push(&p->retire_table, fq);
+
+	if (ret)
+		pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+	return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+	fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+	return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+	num_entries++;
+	/* Allocate 1 more entry since the first entry is not used */
+	qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
+	if (!qman_fq_lookup_table)
+		return -ENOMEM;
+	qman_fq_lookup_table_size = num_entries;
+	pr_info("Allocated lookup table at %p, entry count %lu\n",
+		qman_fq_lookup_table, (unsigned long)qman_fq_lookup_table_size);
+	return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+	u32 i;
+
+	spin_lock(&fq_hash_table_lock);
+	/* Can't use index zero because this has special meaning
+	 * in context_b field. */
+	for (i = 1; i < qman_fq_lookup_table_size; i++) {
+		if (qman_fq_lookup_table[i] == NULL) {
+			*entry = i;
+			qman_fq_lookup_table[i] = fq;
+			spin_unlock(&fq_hash_table_lock);
+			return 0;
+		}
+	}
+	spin_unlock(&fq_hash_table_lock);
+	return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+	spin_lock(&fq_hash_table_lock);
+	BUG_ON(entry >= qman_fq_lookup_table_size);
+	qman_fq_lookup_table[entry] = NULL;
+	spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+	BUG_ON(entry >= qman_fq_lookup_table_size);
+	return qman_fq_lookup_table[entry];
+}
+#endif
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do. */
+#define SLOW_POLL_IDLE	 1000
+#define SLOW_POLL_BUSY	 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+	struct qman_portal *p = ptr;
+	/*
+	 * The CSCI source is cleared inside __poll_portal_slow(), because
+	 * it could race against a Query Congestion State command also given
+	 * as part of the handling of this interrupt source. We mustn't
+	 * clear it a second time in this top-level function.
+	 */
+	u32 clear = QM_DQAVAIL_MASK;
+	u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+	/* DQRR-handling if it's interrupt-driven */
+	if (is & QM_PIRQ_DQRI) {
+		clear |= QM_PIRQ_DQRI;
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+	}
+	/* Handling of anything else that's interrupt-driven */
+	clear |= __poll_portal_slow(p, is);
+	qm_isr_status_clear(&p->p, clear);
+	return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues(). */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+	unsigned long irqflags __maybe_unused;
+	PORTAL_IRQ_LOCK(p, irqflags);
+	if (!(p->dqrr_disable_ref++))
+		qm_dqrr_set_maxfill(&p->p, 0);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+	const struct qm_mr_entry *msg;
+loop:
+	msg = qm_mr_current(p);
+	if (!msg) {
+		/* if MR was full and h/w had other FQRNI entries to produce, we
+		 * need to allow it time to produce those entries once the
+		 * existing entries are consumed. A worst-case situation
+		 * (fully-loaded system) means h/w sequencers may have to do 3-4
+		 * other things before servicing the portal's MR pump, each of
+		 * which (if slow) may take ~50 qman cycles (which is ~200
+		 * processor cycles). So rounding up and then multiplying this
+		 * worst-case estimate by a factor of 10, just to be
+		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+		 * one entry at a time, so h/w has an opportunity to produce new
+		 * entries well before the ring has been fully consumed, so
+		 * we're being *really* paranoid here. */
+		u64 now, then = mfatb();
+
+		do {
+			now = mfatb();
+		} while ((then + 10000) > now);
+		msg = qm_mr_current(p);
+		if (!msg)
+			return 0;
+	}
+	if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+		/* We aren't draining anything but FQRNIs */
+		pr_err("Found verb 0x%x in MR\n", msg->verb);
+		return -1;
+	}
+	qm_mr_next(p);
+	qm_mr_cci_consume(p, 1);
+	goto loop;
+}
+
+struct qman_portal *qman_create_portal(
+			struct qman_portal *portal,
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs)
+{
+	struct qm_portal *__p;
+#ifndef __rtems__
+	char buf[16];
+#endif /* __rtems__ */
+	int ret;
+	u32 isdr;
+
+	if (!portal) {
+		portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+		if (!portal)
+			return portal;
+		portal->alloced = 1;
+	} else
+		portal->alloced = 0;
+
+	__p = &portal->p;
+
+#ifndef __rtems__
+	portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
+								1 : 0);
+#else /* __rtems__ */
+	portal->use_eqcr_ci_stashing = 0;
+#endif /* __rtems__ */
+
+	/* prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference... */
+	__p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+	__p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+	/*
+	 * If CI-stashing is used, the current defaults use a threshold of 3,
+	 * and stash with high-than-DQRR priority.
+	 */
+	if (qm_eqcr_init(__p, qm_eqcr_pvb,
+			portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+		pr_err("EQCR initialisation failed\n");
+		goto fail_eqcr;
+	}
+	if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
+			qm_dqrr_cdc, DQRR_MAXFILL)) {
+		pr_err("DQRR initialisation failed\n");
+		goto fail_dqrr;
+	}
+	if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
+		pr_err("MR initialisation failed\n");
+		goto fail_mr;
+	}
+	if (qm_mc_init(__p)) {
+		pr_err("MC initialisation failed\n");
+		goto fail_mc;
+	}
+	if (qm_isr_init(__p)) {
+		pr_err("ISR initialisation failed\n");
+		goto fail_isr;
+	}
+	/* static interrupt-gating controls */
+	qm_dqrr_set_ithresh(__p, QMAN_PIRQ_DQRR_ITHRESH);
+	qm_mr_set_ithresh(__p, QMAN_PIRQ_MR_ITHRESH);
+	qm_isr_set_iperiod(__p, QMAN_PIRQ_IPERIOD);
+	portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+	if (!portal->cgrs)
+		goto fail_cgrs;
+	/* initial snapshot is no-depletion */
+	qman_cgrs_init(&portal->cgrs[1]);
+	if (cgrs)
+		portal->cgrs[0] = *cgrs;
+	else
+		/* if the given mask is NULL, assume all CGRs can be seen */
+		qman_cgrs_fill(&portal->cgrs[0]);
+	INIT_LIST_HEAD(&portal->cgr_cbs);
+	spin_lock_init(&portal->cgr_lock);
+	portal->bits = 0;
+	portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	portal->eqci_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spin_lock_init(&portal->sharing_lock);
+#ifndef __rtems__
+	portal->is_shared = config->public_cfg.is_shared;
+	portal->sharing_redirect = NULL;
+#endif /* __rtems__ */
+#endif
+	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+	portal->dqrr_disable_ref = 0;
+	portal->cb_dc_ern = NULL;
+#ifndef __rtems__
+	sprintf(buf, "qportal-%d", config->public_cfg.channel);
+	portal->pdev = platform_device_alloc(buf, -1);
+	if (!portal->pdev)
+		goto fail_devalloc;
+	if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+		goto fail_devadd;
+	ret = platform_device_add(portal->pdev);
+	if (ret)
+		goto fail_devadd;
+#endif /* __rtems__ */
+	dpa_rbtree_init(&portal->retire_table);
+	isdr = 0xffffffff;
+	qm_isr_disable_write(__p, isdr);
+	portal->irq_sources = 0;
+	qm_isr_enable_write(__p, portal->irq_sources);
+	qm_isr_status_clear(__p, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+	if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+				portal)) {
+		pr_err("request_irq() failed\n");
+		goto fail_irq;
+	}
+#ifndef __rtems__
+	if ((config->public_cfg.cpu != -1) &&
+			irq_can_set_affinity(config->public_cfg.irq) &&
+			irq_set_affinity(config->public_cfg.irq,
+				cpumask_of(config->public_cfg.cpu))) {
+		pr_err("irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+#endif /* __rtems__ */
+
+	/* Need EQCR to be empty before continuing */
+	isdr ^= QM_PIRQ_EQCI;
+	qm_isr_disable_write(__p, isdr);
+	ret = qm_eqcr_get_fill(__p);
+	if (ret) {
+		pr_err("EQCR unclean\n");
+		goto fail_eqcr_empty;
+	}
+	isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
+	qm_isr_disable_write(__p, isdr);
+	if (qm_dqrr_current(__p) != NULL) {
+		pr_err("DQRR unclean\n");
+		qm_dqrr_cdc_consume_n(__p, 0xffff);
+	}
+	if (qm_mr_current(__p) != NULL) {
+		/* special handling, drain just in case it's a few FQRNIs */
+		if (drain_mr_fqrni(__p)) {
+			const struct qm_mr_entry *e = qm_mr_current(__p);
+
+			pr_err("MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
+			       e->verb, e->ern.rc, e->ern.fd.addr_lo);
+			goto fail_dqrr_mr_empty;
+		}
+	}
+	/* Success */
+	portal->config = config;
+	qm_isr_disable_write(__p, 0);
+	qm_isr_uninhibit(__p);
+	/* Write a sane SDQCR */
+	qm_dqrr_sdqcr_set(__p, portal->sdqcr);
+	return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+#ifndef __rtems__
+fail_affinity:
+#endif /* __rtems__ */
+	free_irq(config->public_cfg.irq, portal);
+fail_irq:
+#ifndef __rtems__
+	platform_device_del(portal->pdev);
+fail_devadd:
+	platform_device_put(portal->pdev);
+fail_devalloc:
+#endif /* __rtems__ */
+	kfree(portal->cgrs);
+fail_cgrs:
+	qm_isr_finish(__p);
+fail_isr:
+	qm_mc_finish(__p);
+fail_mc:
+	qm_mr_finish(__p);
+fail_mr:
+	qm_dqrr_finish(__p);
+fail_dqrr:
+	qm_eqcr_finish(__p);
+fail_eqcr:
+	return NULL;
+}
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs)
+{
+	struct qman_portal *res;
+	struct qman_portal *portal;
+
+	portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
+	res = qman_create_portal(portal, config, cgrs);
+	if (res) {
+#ifndef __rtems__
+		spin_lock(&affine_mask_lock);
+		cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+		affine_channels[config->public_cfg.cpu] =
+			config->public_cfg.channel;
+		affine_portals[config->public_cfg.cpu] = portal;
+		spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+	}
+	return res;
+}
+
+#ifndef __rtems__
+/* These checks are BUG_ON()s because the driver is already supposed to avoid
+ * these cases. */
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+								int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+	struct qman_portal *p = &per_cpu(qman_affine_portal, cpu);
+
+	/* Check that we don't already have our own portal */
+	BUG_ON(p->config);
+	/* Check that we aren't already slaving to another portal */
+	BUG_ON(p->is_shared);
+	/* Check that 'redirect' is prepared to have us */
+	BUG_ON(!redirect->config->public_cfg.is_shared);
+	/* These are the only elements to initialise when redirecting */
+	p->irq_sources = 0;
+	p->sharing_redirect = redirect;
+	affine_portals[cpu] = p;
+	return p;
+#else
+	BUG();
+	return NULL;
+#endif
+}
+#endif /* __rtems__ */
+
+void qman_destroy_portal(struct qman_portal *qm)
+{
+	const struct qm_portal_config *pcfg;
+
+	/* Stop dequeues on the portal */
+	qm_dqrr_sdqcr_set(&qm->p, 0);
+
+	/* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+	 * something related to QM_PIRQ_EQCI, this may need fixing.
+	 * Also, due to the prefetching model used for CI updates in the enqueue
+	 * path, this update will only invalidate the CI cacheline *after*
+	 * working on it, so we need to call this twice to ensure a full update
+	 * irrespective of where the enqueue processing was at when the teardown
+	 * began. */
+	qm_eqcr_cce_update(&qm->p);
+	qm_eqcr_cce_update(&qm->p);
+	pcfg = qm->config;
+
+	free_irq(pcfg->public_cfg.irq, qm);
+
+	kfree(qm->cgrs);
+	qm_isr_finish(&qm->p);
+	qm_mc_finish(&qm->p);
+	qm_mr_finish(&qm->p);
+	qm_dqrr_finish(&qm->p);
+	qm_eqcr_finish(&qm->p);
+
+#ifndef __rtems__
+	platform_device_del(qm->pdev);
+	platform_device_put(qm->pdev);
+#endif /* __rtems__ */
+
+	qm->config = NULL;
+	if (qm->alloced)
+		kfree(qm);
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+	/* We don't want to redirect if we're a slave, use "raw" */
+	struct qman_portal *qm = get_raw_affine_portal();
+	const struct qm_portal_config *pcfg;
+#ifndef __rtems__
+	int cpu;
+#endif /* __rtems__ */
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (qm->sharing_redirect) {
+		qm->sharing_redirect = NULL;
+		put_affine_portal();
+		return NULL;
+	}
+	qm->is_shared = 0;
+#endif /* __rtems__ */
+#endif
+	pcfg = qm->config;
+#ifndef __rtems__
+	cpu = pcfg->public_cfg.cpu;
+#endif /* __rtems__ */
+
+	qman_destroy_portal(qm);
+
+#ifndef __rtems__
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+#endif /* __rtems__ */
+	put_affine_portal();
+	return pcfg;
+}
+
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
+{
+	return &p->config->public_cfg;
+}
+EXPORT_SYMBOL(qman_p_get_portal_config);
+
+const struct qman_portal_config *qman_get_portal_config(void)
+{
+	struct qman_portal *p = get_affine_portal();
+	const struct qman_portal_config *ret = qman_p_get_portal_config(p);
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_get_portal_config);
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_mr_entry *msg, u8 verb)
+{
+	FQLOCK(fq);
+	switch (verb) {
+	case QM_MR_VERB_FQRL:
+		DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+		fq_clear(fq, QMAN_FQ_STATE_ORL);
+		table_del_fq(p, fq);
+		break;
+	case QM_MR_VERB_FQRN:
+		DPA_ASSERT((fq->state == qman_fq_state_parked) ||
+			(fq->state == qman_fq_state_sched));
+		DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		else
+			table_del_fq(p, fq);
+		fq->state = qman_fq_state_retired;
+		break;
+	case QM_MR_VERB_FQPN:
+		DPA_ASSERT(fq->state == qman_fq_state_sched);
+		DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+		fq->state = qman_fq_state_parked;
+	}
+	FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+	const struct qm_mr_entry *msg;
+
+	if (is & QM_PIRQ_CSCI) {
+		struct qman_cgrs rr, c;
+		struct qm_mc_result *mcr;
+		struct qman_cgr *cgr;
+		unsigned long irqflags __maybe_unused;
+
+		spin_lock_irqsave(&p->cgr_lock, irqflags);
+		/*
+		 * The CSCI bit must be cleared _before_ issuing the
+		 * Query Congestion State command, to ensure that a long
+		 * CGR State Change callback cannot miss an intervening
+		 * state change.
+		 */
+		qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+		qm_mc_start(&p->p);
+		qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+		while (!(mcr = qm_mc_result(&p->p)))
+			cpu_relax();
+		/* mask out the ones I'm not interested in */
+		qman_cgrs_and(&rr, (const struct qman_cgrs *)
+			&mcr->querycongestion.state, &p->cgrs[0]);
+		/* check previous snapshot for delta, enter/exit congestion */
+		qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+		/* update snapshot */
+		qman_cgrs_cp(&p->cgrs[1], &rr);
+		/* Invoke callback */
+		list_for_each_entry(cgr, &p->cgr_cbs, node)
+			if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+				cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+		spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+	}
+
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (is & QM_PIRQ_EQCI) {
+		unsigned long irqflags;
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		p->eqci_owned = NULL;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+	}
+#endif
+
+	if (is & QM_PIRQ_EQRI) {
+		unsigned long irqflags __maybe_unused;
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		qm_eqcr_cce_update(&p->p);
+		qm_eqcr_set_ithresh(&p->p, 0);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+	}
+
+	if (is & QM_PIRQ_MRI) {
+		struct qman_fq *fq;
+		u8 verb, num = 0;
+mr_loop:
+		qm_mr_pvb_update(&p->p);
+		msg = qm_mr_current(&p->p);
+		if (!msg)
+			goto mr_done;
+		verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+		/* The message is a software ERN iff the 0x20 bit is set */
+		if (verb & 0x20) {
+			switch (verb) {
+			case QM_MR_VERB_FQRNI:
+				/* nada, we drop FQRNIs on the floor */
+				break;
+			case QM_MR_VERB_FQRN:
+			case QM_MR_VERB_FQRL:
+				/* Lookup in the retirement table */
+				fq = table_find_fq(p, msg->fq.fqid);
+				BUG_ON(!fq);
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_FQPN:
+				/* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+				fq = get_fq_table_entry(msg->fq.contextB);
+#else
+				fq = (void *)(uintptr_t)msg->fq.contextB;
+#endif
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_DC_ERN:
+				/* DCP ERN */
+				if (p->cb_dc_ern)
+					p->cb_dc_ern(p, msg);
+				else if (cb_dc_ern)
+					cb_dc_ern(p, msg);
+				else
+					pr_crit_once("Leaking DCP ERNs!\n");
+				break;
+			default:
+				pr_crit("Invalid MR verb 0x%02x\n", verb);
+			}
+		} else {
+			/* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+			fq = get_fq_table_entry(msg->ern.tag);
+#else
+			fq = (void *)(uintptr_t)msg->ern.tag;
+#endif
+			fq->cb.ern(p, fq, msg);
+		}
+		num++;
+		qm_mr_next(&p->p);
+		goto mr_loop;
+mr_done:
+		qm_mr_cci_consume(&p->p, num);
+	}
+	/*
+	 * QM_PIRQ_CSCI has already been cleared, as part of its specific
+	 * processing. If that interrupt source has meanwhile been re-asserted,
+	 * we mustn't clear it here (or in the top-level interrupt handler).
+	 */
+	return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/* remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined. */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+	p->vdqcr_owned = NULL;
+	FQLOCK(fq);
+	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+	FQUNLOCK(fq);
+	wake_up(&affine_queue);
+}
+
+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
+ * that would conflict with other things if they ran at the same time on the
+ * same cpu are;
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *	 vdqcr_owned field (which it does before setting VDQCR), and
+ *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *	 done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *	 with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API *except* qman_poll() (as that's the
+ * sole API that could be invoking the callback through this function).
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit)
+{
+	const struct qm_dqrr_entry *dq;
+	struct qman_fq *fq;
+	enum qman_cb_dqrr_result res;
+	unsigned int limit = 0;
+
+loop:
+	qm_dqrr_pvb_update(&p->p);
+	dq = qm_dqrr_current(&p->p);
+	if (!dq)
+		goto done;
+	if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+		/* VDQCR: don't trust contextB as the FQ may have been
+		 * configured for h/w consumption and we're draining it
+		 * post-retirement. */
+		fq = p->vdqcr_owned;
+		/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
+		 * to check for clearing it when doing volatile dequeues. It's
+		 * one less thing to check in the critical path (SDQCR). */
+		if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+			fq_clear(fq, QMAN_FQ_STATE_NE);
+		/* this is duplicated from the SDQCR code, but we have stuff to
+		 * do before *and* after this callback, and we don't want
+		 * multiple if()s in the critical path (SDQCR). */
+		res = fq->cb.dqrr(p, fq, dq);
+		if (res == qman_cb_dqrr_stop)
+			goto done;
+		/* Check for VDQCR completion */
+		if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+			clear_vdqcr(p, fq);
+	} else {
+		/* SDQCR: contextB points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		fq = get_fq_table_entry(dq->contextB);
+#else
+		fq = (void *)(uintptr_t)dq->contextB;
+#endif
+		/* Now let the callback do its stuff */
+		res = fq->cb.dqrr(p, fq, dq);
+		/* The callback can request that we exit without consuming this
+		 * entry nor advancing; */
+		if (res == qman_cb_dqrr_stop)
+			goto done;
+	}
+	/* Interpret 'dq' from a driver perspective. */
+	/* Parking isn't possible unless HELDACTIVE was set. NB,
+	 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+	 * check for HELDACTIVE to cover both. */
+	DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+		(res != qman_cb_dqrr_park));
+	/* Defer just means "skip it, I'll consume it myself later on" */
+	if (res != qman_cb_dqrr_defer)
+		qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
+	/* Move forward */
+	qm_dqrr_next(&p->p);
+	/* Entry processed and consumed, increment our counter. The callback can
+	 * request that we exit after consuming the entry, and we also exit if
+	 * we reach our processing limit, so loop back only if neither of these
+	 * conditions is met. */
+	if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
+		goto loop;
+done:
+	return limit;
+}
+
+u32 qman_irqsource_get(void)
+{
+	/* "irqsource" and "poll" APIs mustn't redirect when sharing, they
+	 * should shut the user out if they are not the primary CPU hosting the
+	 * portal. That's why we use the "raw" interface. */
+	struct qman_portal *p = get_raw_affine_portal();
+	u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_get);
+
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
+{
+	__maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (p->sharing_redirect)
+		return -EINVAL;
+#endif /* __rtems__ */
+#endif
+	PORTAL_IRQ_LOCK(p, irqflags);
+	set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+	qm_isr_enable_write(&p->p, p->irq_sources);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+int qman_irqsource_add(u32 bits __maybe_unused)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+	int ret;
+
+	ret = qman_p_irqsource_add(p, bits);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_add);
+
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+	__maybe_unused unsigned long irqflags;
+	u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (p->sharing_redirect) {
+		put_affine_portal();
+		return -EINVAL;
+	}
+#endif /* __rtems__ */
+#endif
+	/* Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert. */
+	PORTAL_IRQ_LOCK(p, irqflags);
+	bits &= QM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	qm_isr_enable_write(&p->p, p->irq_sources);
+	ier = qm_isr_enable_read(&p->p);
+	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering. */
+	qm_isr_status_clear(&p->p, ~ier);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+int qman_irqsource_remove(u32 bits)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+	int ret;
+
+	ret = qman_p_irqsource_remove(p, bits);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_remove);
+
+#ifndef __rtems__
+const cpumask_t *qman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+	if (cpu < 0) {
+		struct qman_portal *portal = get_raw_affine_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+		BUG_ON(portal->sharing_redirect);
+#endif /* __rtems__ */
+#endif
+		cpu = portal->config->public_cfg.cpu;
+		put_affine_portal();
+	}
+	BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
+	return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+#endif /* __rtems__ */
+
+void *qman_get_affine_portal(int cpu)
+{
+#ifndef __rtems__
+	return affine_portals[cpu];
+#else /* __rtems__ */
+	return &per_cpu(qman_affine_portal, cpu);
+#endif /* __rtems__ */
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+	int ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (unlikely(p->sharing_redirect))
+		ret = -EINVAL;
+	else
+#endif /* __rtems__ */
+#endif
+	{
+		BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
+		ret = __poll_portal_fast(p, limit);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+int qman_poll_dqrr(unsigned int limit)
+{
+	struct qman_portal *p = get_poll_portal();
+	int ret;
+
+	ret = qman_p_poll_dqrr(p, limit);
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_poll_dqrr);
+
+u32 qman_p_poll_slow(struct qman_portal *p)
+{
+	u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (unlikely(p->sharing_redirect))
+		ret = (u32)-1;
+	else
+#endif /* __rtems__ */
+#endif
+	{
+		u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+
+		ret = __poll_portal_slow(p, is);
+		qm_isr_status_clear(&p->p, ret);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_slow);
+
+u32 qman_poll_slow(void)
+{
+	struct qman_portal *p = get_poll_portal();
+	u32 ret;
+
+	ret = qman_p_poll_slow(p);
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_poll_slow);
+
+/* Legacy wrapper */
+void qman_p_poll(struct qman_portal *p)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+#ifndef __rtems__
+	if (unlikely(p->sharing_redirect))
+		return;
+#endif /* __rtems__ */
+#endif
+	if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+		if (!(p->slowpoll--)) {
+			u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+			u32 active = __poll_portal_slow(p, is);
+
+			if (active) {
+				qm_isr_status_clear(&p->p, active);
+				p->slowpoll = SLOW_POLL_BUSY;
+			} else
+				p->slowpoll = SLOW_POLL_IDLE;
+		}
+	}
+	if ((~p->irq_sources) & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+}
+EXPORT_SYMBOL(qman_p_poll);
+
+void qman_poll(void)
+{
+	struct qman_portal *p = get_poll_portal();
+
+	qman_p_poll(p);
+	put_poll_portal();
+}
+EXPORT_SYMBOL(qman_poll);
+
+void qman_p_stop_dequeues(struct qman_portal *p)
+{
+	qman_stop_dequeues_ex(p);
+}
+EXPORT_SYMBOL(qman_p_stop_dequeues);
+
+void qman_stop_dequeues(void)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_stop_dequeues(p);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_stop_dequeues);
+
+void qman_p_start_dequeues(struct qman_portal *p)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	DPA_ASSERT(p->dqrr_disable_ref > 0);
+	if (!(--p->dqrr_disable_ref))
+		qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_start_dequeues);
+
+void qman_start_dequeues(void)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_start_dequeues(p);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_start_dequeues);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	pools &= p->config->public_cfg.pools;
+	p->sdqcr |= pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+void qman_static_dequeue_add(u32 pools)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_static_dequeue_add(p, pools);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_add);
+
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	pools &= p->config->public_cfg.pools;
+	p->sdqcr &= ~pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_del);
+
+void qman_static_dequeue_del(u32 pools)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_static_dequeue_del(p, pools);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_del);
+
+u32 qman_p_static_dequeue_get(struct qman_portal *p)
+{
+	return p->sdqcr;
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_get);
+
+u32 qman_static_dequeue_get(void)
+{
+	struct qman_portal *p = get_affine_portal();
+	u32 ret = qman_p_static_dequeue_get(p);
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_static_dequeue_get);
+
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+						int park_request)
+{
+	qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+EXPORT_SYMBOL(qman_p_dca);
+
+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_dca(p, dq, park_request);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_dca);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+	switch (result) {
+	case QM_MCR_RESULT_NULL:
+		return "QM_MCR_RESULT_NULL";
+	case QM_MCR_RESULT_OK:
+		return "QM_MCR_RESULT_OK";
+	case QM_MCR_RESULT_ERR_FQID:
+		return "QM_MCR_RESULT_ERR_FQID";
+	case QM_MCR_RESULT_ERR_FQSTATE:
+		return "QM_MCR_RESULT_ERR_FQSTATE";
+	case QM_MCR_RESULT_ERR_NOTEMPTY:
+		return "QM_MCR_RESULT_ERR_NOTEMPTY";
+	case QM_MCR_RESULT_PENDING:
+		return "QM_MCR_RESULT_PENDING";
+	case QM_MCR_RESULT_ERR_BADCOMMAND:
+		return "QM_MCR_RESULT_ERR_BADCOMMAND";
+	}
+	return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+	struct qm_fqd fqd;
+	struct qm_mcr_queryfq_np np;
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+		int ret = qman_alloc_fqid(&fqid);
+
+		if (ret)
+			return ret;
+	}
+	spin_lock_init(&fq->fqlock);
+	fq->fqid = fqid;
+	fq->flags = flags;
+	fq->state = qman_fq_state_oos;
+	fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
+		return -ENOMEM;
+#endif
+	if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+		return 0;
+	/* Everything else is AS_IS support */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+		goto err;
+	}
+	fqd = mcr->queryfq.fqd;
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+		goto err;
+	}
+	np = mcr->queryfq_np;
+	/* Phew, have queryfq and queryfq_np results, stitch together
+	 * the FQ object from those. */
+	fq->cgr_groupid = fqd.cgid;
+	switch (np.state & QM_MCR_NP_STATE_MASK) {
+	case QM_MCR_NP_STATE_OOS:
+		break;
+	case QM_MCR_NP_STATE_RETIRED:
+		fq->state = qman_fq_state_retired;
+		if (np.frm_cnt)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		break;
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+		fq->state = qman_fq_state_sched;
+		if (np.state & QM_MCR_NP_STATE_R)
+			fq_set(fq, QMAN_FQ_STATE_CHANGING);
+		break;
+	case QM_MCR_NP_STATE_PARKED:
+		fq->state = qman_fq_state_parked;
+		break;
+	default:
+		DPA_ASSERT(NULL == "invalid FQ state");
+	}
+	if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+		fq->state |= QMAN_FQ_STATE_CGR_EN;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+err:
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+		qman_release_fqid(fqid);
+	return -EIO;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+
+	/* We don't need to lock the FQ as it is a pre-condition that the FQ be
+	 * quiesced. Instead, run some checks. */
+	switch (fq->state) {
+	case qman_fq_state_parked:
+		DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+	case qman_fq_state_oos:
+		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+			qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		clear_fq_table_entry(fq->key);
+#endif
+		return;
+	default:
+		break;
+	}
+	DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+	return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+	if (state)
+		*state = fq->state;
+	if (flags)
+		*flags = fq->flags;
+}
+EXPORT_SYMBOL(qman_fq_state);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+		QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+	if ((fq->state != qman_fq_state_oos) &&
+			(fq->state != qman_fq_state_parked))
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+		/* And can't be set at the same time as TDTHRESH */
+		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+			return -EINVAL;
+	}
+	/* Issue an INITFQ_[PARKED|SCHED] management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			((fq->state != qman_fq_state_oos) &&
+				(fq->state != qman_fq_state_parked)))) {
+		FQUNLOCK(fq);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EBUSY;
+	}
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initfq = *opts;
+	mcc->initfq.fqid = fq->fqid;
+	mcc->initfq.count = 0;
+	/* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+	 * demux pointer. Otherwise, the caller-provided value is allowed to
+	 * stand, don't overwrite it. */
+	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+		dma_addr_t phys_fq;
+
+		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		mcc->initfq.fqd.context_b = fq->key;
+#else
+		mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+		/* and the physical address - NB, if the user wasn't trying to
+		 * set CONTEXTA, clear the stashing settings. */
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			memset(&mcc->initfq.fqd.context_a, 0,
+				sizeof(mcc->initfq.fqd.context_a));
+		} else {
+#ifndef __rtems__
+			phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+						DMA_TO_DEVICE);
+#else /* __rtems__ */
+			phys_fq = (dma_addr_t)fq;
+#endif /* __rtems__ */
+			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+		}
+	}
+	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+		mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+			mcc->initfq.fqd.dest.wq = 4;
+		}
+	}
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		FQUNLOCK(fq);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EIO;
+	}
+	if (opts) {
+		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+			else
+				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+		}
+		if (opts->we_mask & QM_INITFQ_WE_CGID)
+			fq->cgr_groupid = opts->fqd.cgid;
+	}
+	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+			qman_fq_state_sched : qman_fq_state_parked;
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+
+	if (fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	/* Issue a ALTERFQ_SCHED management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state != qman_fq_state_parked))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_sched;
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int rval;
+	u8 res;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_sched))
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state == qman_fq_state_retired) ||
+				(fq->state == qman_fq_state_oos))) {
+		rval = -EBUSY;
+		goto out;
+	}
+	rval = table_push_fq(p, fq);
+	if (rval)
+		goto out;
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+	res = mcr->result;
+	/* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
+	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
+	 * friendly, otherwise the caller doesn't necessarily have a fully
+	 * "retired" FQ on return even if the retirement was immediate. However
+	 * this does mean some code duplication between here and
+	 * fq_state_change(). */
+	if (likely(res == QM_MCR_RESULT_OK)) {
+		rval = 0;
+		/* Process 'fq' right away, we'll ignore FQRNI */
+		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		else
+			table_del_fq(p, fq);
+		if (flags)
+			*flags = fq->flags;
+		fq->state = qman_fq_state_retired;
+		if (fq->cb.fqs) {
+			/* Another issue with supporting "immediate" retirement
+			 * is that we're forced to drop FQRNIs, because by the
+			 * time they're seen it may already be "too late" (the
+			 * fq may have been OOS'd and free()'d already). But if
+			 * the upper layer wants a callback whether it's
+			 * immediate or not, we have to fake a "MR" entry to
+			 * look like an FQRNI... */
+			struct qm_mr_entry msg;
+
+			msg.verb = QM_MR_VERB_FQRNI;
+			msg.fq.fqs = mcr->alterfq.fqs;
+			msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+			msg.fq.contextB = fq->key;
+#else
+			msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+			fq->cb.fqs(p, fq, &msg);
+		}
+	} else if (res == QM_MCR_RESULT_PENDING) {
+		rval = 1;
+		fq_set(fq, QMAN_FQ_STATE_CHANGING);
+	} else {
+		rval = -EIO;
+		table_del_fq(p, fq);
+	}
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return rval;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+
+	if (fq->state != qman_fq_state_retired)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+			(fq->state != qman_fq_state_retired))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_oos;
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+	u8 myverb;
+
+	if ((fq->state == qman_fq_state_oos) ||
+		(fq->state == qman_fq_state_retired) ||
+		(fq->state == qman_fq_state_parked))
+		return -EINVAL;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	/* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state == qman_fq_state_parked) ||
+			(fq->state == qman_fq_state_oos) ||
+			(fq->state == qman_fq_state_retired))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	mcc->alterfq.count = 0;
+	myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_fq_flow_control);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*fqd = mcr->queryfq.fqd;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK)
+		return -EIO;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_fq);
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*np = mcr->queryfq_np;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res == QM_MCR_RESULT_ERR_FQID)
+		return -ERANGE;
+	else if (res != QM_MCR_RESULT_OK)
+		return -EIO;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res, myverb;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+				 QM_MCR_VERB_QUERYWQ;
+	mcc = qm_mc_start(&p->p);
+	mcc->querywq.channel.id = wq->channel.id;
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*wq = mcr->querywq;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_wq);
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+			struct qm_mcr_cgrtestwrite *result)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->cgrtestwrite.cgid = cgr->cgrid;
+	mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+	mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+	qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*result = mcr->cgrtestwrite;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_testwrite_cgr);
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->querycgr.cgid = cgr->cgrid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*cgrd = mcr->querycgr;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr);
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	qm_mc_start(&p->p);
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			QM_MCC_VERB_QUERYCONGESTION);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*congestion = mcr->querycongestion;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_congestion);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+	unsigned long irqflags __maybe_unused;
+	int ret = -EBUSY;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	if (!p->vdqcr_owned) {
+		FQLOCK(fq);
+		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+			goto escape;
+		fq_set(fq, QMAN_FQ_STATE_VDQCR);
+		FQUNLOCK(fq);
+		p->vdqcr_owned = fq;
+		ret = 0;
+	}
+escape:
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	if (!ret)
+		qm_dqrr_vdqcr_set(&p->p, vdqcr);
+	return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+	int ret;
+
+	*p = get_affine_portal();
+	ret = set_p_vdqcr(*p, fq, vdqcr);
+	put_affine_portal();
+	return ret;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+#ifndef __rtems__
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!(ret = set_p_vdqcr(p, fq, vdqcr)));
+	else
+#endif /* __rtems__ */
+		wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
+	return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+#ifndef __rtems__
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!(ret = set_vdqcr(p, fq, vdqcr)));
+	else
+#endif /* __rtems__ */
+		wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
+	return ret;
+}
+#endif
+
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+					u32 flags __maybe_unused, u32 vdqcr)
+{
+	int ret;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_retired))
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
+	else
+#endif
+		ret = set_p_vdqcr(p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+#ifndef __rtems__
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/* NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue. */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_volatile_dequeue);
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+				u32 vdqcr)
+{
+	struct qman_portal *p;
+	int ret;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_retired))
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+	else
+#endif
+		ret = set_vdqcr(&p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+#ifndef __rtems__
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/* NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue. */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+	if (avail)
+		qm_eqcr_cce_prefetch(&p->p);
+	else
+		qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qman_portal *p = get_affine_portal();
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	update_eqcr_ci(p, 0);
+	avail = qm_eqcr_get_fill(&p->p);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return avail == 0;
+}
+EXPORT_SYMBOL(qman_eqcr_is_empty);
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+	if (affine) {
+		unsigned long irqflags __maybe_unused;
+		struct qman_portal *p = get_affine_portal();
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		p->cb_dc_ern = handler;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+	} else
+		cb_dc_ern = handler;
+}
+EXPORT_SYMBOL(qman_set_dc_ern);
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (p->eqci_owned) {
+			PORTAL_IRQ_UNLOCK(p, (*irqflags));
+			return NULL;
+		}
+		p->eqci_owned = fq;
+	}
+#endif
+	if (p->use_eqcr_ci_stashing) {
+		/*
+		 * The stashing case is easy, only update if we need to in
+		 * order to try and liberate ring entries.
+		 */
+		eq = qm_eqcr_start_stash(&p->p);
+	} else {
+		/*
+		 * The non-stashing case is harder, need to prefetch ahead of
+		 * time.
+		 */
+		avail = qm_eqcr_get_avail(&p->p);
+		if (avail < 2)
+			update_eqcr_ci(p, avail);
+		eq = qm_eqcr_start_no_stash(&p->p);
+	}
+
+	if (unlikely(!eq)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+		if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+				(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
+			p->eqci_owned = NULL;
+#endif
+		PORTAL_IRQ_UNLOCK(p, (*irqflags));
+		return NULL;
+	}
+	if (flags & QMAN_ENQUEUE_FLAG_DCA)
+		eq->dca = QM_EQCR_DCA_ENABLE |
+			((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+					QM_EQCR_DCA_PARK : 0) |
+			((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+	eq->fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	eq->tag = fq->key;
+#else
+	eq->tag = (u32)(uintptr_t)fq;
+#endif
+	eq->fd = *fd;
+	return eq;
+}
+
+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+	*p = get_affine_portal();
+	eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
+	if (!eq)
+		put_affine_portal();
+	return eq;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
+
+	if (!eq)
+		qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
+	return eq;
+}
+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+#ifndef __rtems__
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+	else
+#endif /* __rtems__ */
+		wait_event(affine_queue,
+			(eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+	return eq;
+}
+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
+
+	if (!eq)
+		qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
+	return eq;
+}
+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+#ifndef __rtems__
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+	else
+#endif /* __rtems__ */
+		wait_event(affine_queue,
+			(eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+	return eq;
+}
+#endif
+
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue);
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				struct qman_fq *orp, u16 orp_seqnum)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Process ORP-specifics here */
+	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+	else {
+		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+		if (flags & QMAN_ENQUEUE_FLAG_NESN)
+			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+		else
+			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+	}
+	eq->seqnum = orp_seqnum;
+	eq->orp = orp->fqid;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_orp);
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+			struct qman_fq *orp, u16 orp_seqnum)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Process ORP-specifics here */
+	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+	else {
+		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+		if (flags & QMAN_ENQUEUE_FLAG_NESN)
+			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+		else
+			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+	}
+	eq->seqnum = orp_seqnum;
+	eq->orp = orp->fqid;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_orp);
+
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				qman_cb_precommit cb, void *cb_arg)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* invoke user supplied callback function before writing commit verb */
+	if (cb(cb_arg)) {
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		return -EINVAL;
+	}
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_precommit);
+
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+		u32 flags, qman_cb_precommit cb, void *cb_arg)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* invoke user supplied callback function before writing commit verb */
+	if (cb(cb_arg)) {
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EINVAL;
+	}
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+#ifndef __rtems__
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+#endif /* __rtems__ */
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_precommit);
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+	u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initcgr = *opts;
+	mcc->initcgr.cgid = cgr->cgrid;
+	if (flags & QMAN_CGR_FLAG_USE_INIT)
+		verb = QM_MCC_VERB_INITCGR;
+	qm_mc_commit(&p->p, verb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+	res = mcr->result;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(qman_modify_cgr);
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
+					QM_CHANNEL_SWPORTAL0))
+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
+
+static u8 qman_cgr_cpus[__CGR_NUM];
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret;
+	struct qman_portal *p;
+
+	/* We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC. */
+	if (cgr->cgrid >= __CGR_NUM)
+		return -EINVAL;
+
+	preempt_disable();
+	p = get_affine_portal();
+	qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+	preempt_enable();
+
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	cgr->chan = p->config->public_cfg.channel;
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+
+	/* if no opts specified, just add it to the list */
+	if (!opts)
+		goto add_list;
+
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)
+		goto release_lock;
+	if (opts)
+		local_opts = *opts;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl =
+			QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+	else
+		/* Overwrite TARG */
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+							TARG_MASK(p);
+	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+	/* send init if flags indicate so */
+	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+	else
+		ret = qman_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		goto release_lock;
+add_list:
+	list_add(&cgr->node, &p->cgr_cbs);
+
+	/* Determine if newly added object requires its callback to be called */
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret) {
+		/* we can't go back, so proceed and return success, but screen
+		 * and wail to the log file */
+		pr_crit("CGR HW state partially modified\n");
+		ret = 0;
+		goto release_lock;
+	}
+	if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+							cgr->cgrid))
+		cgr->cb(p, cgr, 1);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+					struct qm_mcc_initcgr *opts)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcc_initcgr local_opts;
+	int ret;
+
+	if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+		pr_warn("This version doesn't support to send CSCN to DCP portal\n");
+		return -EINVAL;
+	}
+	/* We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC.
+	 */
+	if (cgr->cgrid >= __CGR_NUM)
+		return -EINVAL;
+
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	if (opts)
+		local_opts = *opts;
+
+	local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+				QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+	/* send init if flags indicate so */
+	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+							&local_opts);
+	else
+		ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr_to_dcp);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret = 0;
+	struct qman_cgr *i;
+	struct qman_portal *p = get_affine_portal();
+
+	if (cgr->chan != p->config->public_cfg.channel) {
+		pr_crit("Attempting to delete cgr from different portal "
+			"than it was create: create 0x%x, delete 0x%x\n",
+			cgr->chan, p->config->public_cfg.channel);
+		ret = -EINVAL;
+		goto put_portal;
+	}
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+	list_del(&cgr->node);
+	/*
+	 * If there are no other CGR objects for this CGRID in the list, update
+	 * CSCN_TARG accordingly
+	 */
+	list_for_each_entry(i, &p->cgr_cbs, node)
+		if ((i->cgrid == cgr->cgrid) && i->cb)
+			goto release_lock;
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)  {
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+		goto release_lock;
+	}
+	/* Overwrite TARG */
+	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+	else
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+							 ~(TARG_MASK(p));
+	ret = qman_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+#ifndef __rtems__
+struct cgr_comp {
+	struct qman_cgr *cgr;
+	struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+	struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+	int res;
+
+	res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
+	complete(&cgr_comp->completion);
+
+	return res;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+	struct task_struct *thread;
+	struct cgr_comp cgr_comp;
+
+	preempt_disable();
+	if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+		init_completion(&cgr_comp.completion);
+		cgr_comp.cgr = cgr;
+		thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+					"cgr_del");
+
+		if (likely(!IS_ERR(thread))) {
+			kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+			wake_up_process(thread);
+			wait_for_completion(&cgr_comp.completion);
+			preempt_enable();
+			return;
+		}
+	}
+	qman_delete_cgr(cgr);
+	preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+#endif /* __rtems__ */
+
+int qman_set_wpm(int wpm_enable)
+{
+	return qm_set_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_set_wpm);
+
+int qman_get_wpm(int *wpm_enable)
+{
+	return qm_get_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_get_wpm);
+
+
+/* Cleanup FQs */
+static int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
+				 u32 fqid)
+{
+
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	u8 state;
+	int orl_empty, fq_empty, i, drain = 0;
+	u32 result;
+	u32 channel, wq;
+
+	/* Determine the state of the FQID */
+	mcc = qm_mc_start(portal[0]);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(portal[0])))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+	if (state == QM_MCR_NP_STATE_OOS)
+		return 0; /* Already OOS, no need to do anymore checks */
+
+	/* Query which channel the FQ is using */
+	mcc = qm_mc_start(portal[0]);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(portal[0])))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+	/* Need to store these since the MCR gets reused */
+	channel = mcr->queryfq.fqd.dest.channel;
+	wq = mcr->queryfq.fqd.dest.wq;
+
+	switch (state) {
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+	case QM_MCR_NP_STATE_PARKED:
+		orl_empty = 0;
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_RETIRE);
+		result = mcr->result; /* Make a copy as we reuse MCR below */
+
+		if (result == QM_MCR_RESULT_PENDING) {
+			/* Need to wait for the FQRN in the message ring, which
+			   will only occur once the FQ has been drained.  In
+			   order for the FQ to drain the portal needs to be set
+			   to dequeue from the channel the FQ is scheduled on */
+			const struct qm_mr_entry *msg;
+			const struct qm_dqrr_entry *dqrr = NULL;
+			int found_fqrn = 0;
+			u16 dequeue_wq = 0;
+
+			/* Flag that we need to drain FQ */
+			drain = 1;
+
+			if (channel >= qm_channel_pool1 &&
+			    channel < (qm_channel_pool1 + 15)) {
+				/* Pool channel, enable the bit in the portal */
+				dequeue_wq = (channel -
+					      qm_channel_pool1 + 1)<<4 | wq;
+			} else if (channel < qm_channel_pool1) {
+				/* Dedicated channel */
+				dequeue_wq = wq;
+			} else {
+				pr_info("Cannot recover FQ 0x%x, it is "
+					"scheduled on channel 0x%x",
+					fqid, channel);
+				return -EBUSY;
+			}
+			/* Set the sdqcr to drain this channel */
+			if (channel < qm_channel_pool1)
+				for (i = 0; i < portal_count; i++)
+					qm_dqrr_sdqcr_set(portal[i],
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_DEDICATED);
+			else
+				for (i = 0; i < portal_count; i++)
+					qm_dqrr_sdqcr_set(
+						portal[i],
+						QM_SDQCR_TYPE_ACTIVE |
+						QM_SDQCR_CHANNELS_POOL_CONV
+						(channel));
+			while (!found_fqrn) {
+				/* Keep draining DQRR while checking the MR*/
+				for (i = 0; i < portal_count; i++) {
+					qm_dqrr_pvb_update(portal[i]);
+					dqrr = qm_dqrr_current(portal[i]);
+					while (dqrr) {
+						qm_dqrr_cdc_consume_1ptr(
+							portal[i], dqrr, 0);
+						qm_dqrr_pvb_update(portal[i]);
+						qm_dqrr_next(portal[i]);
+						dqrr = qm_dqrr_current(
+							portal[i]);
+					}
+					/* Process message ring too */
+					qm_mr_pvb_update(portal[i]);
+					msg = qm_mr_current(portal[i]);
+					while (msg) {
+						if ((msg->verb &
+						     QM_MR_VERB_TYPE_MASK)
+						    == QM_MR_VERB_FQRN)
+							found_fqrn = 1;
+						qm_mr_next(portal[i]);
+						qm_mr_cci_consume_to_current(
+							portal[i]);
+						qm_mr_pvb_update(portal[i]);
+						msg = qm_mr_current(portal[i]);
+					}
+					cpu_relax();
+				}
+			}
+		}
+		if (result != QM_MCR_RESULT_OK &&
+		    result !=  QM_MCR_RESULT_PENDING) {
+			/* error */
+			pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
+			       fqid, result);
+			return -1;
+		}
+		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+			/* ORL had no entries, no need to wait until the
+			   ERNs come in */
+			orl_empty = 1;
+		}
+		/* Retirement succeeded, check to see if FQ needs
+		   to be drained */
+		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+			/* FQ is Not Empty, drain using volatile DQ commands */
+			fq_empty = 0;
+			do {
+				const struct qm_dqrr_entry *dqrr = NULL;
+				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+				qm_dqrr_vdqcr_set(portal[0], vdqcr);
+
+				/* Wait for a dequeue to occur */
+				while (dqrr == NULL) {
+					qm_dqrr_pvb_update(portal[0]);
+					dqrr = qm_dqrr_current(portal[0]);
+					if (!dqrr)
+						cpu_relax();
+				}
+				/* Process the dequeues, making sure to
+				   empty the ring completely */
+				while (dqrr) {
+					if (dqrr->fqid == fqid &&
+					    dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+						fq_empty = 1;
+					qm_dqrr_cdc_consume_1ptr(portal[0],
+								 dqrr, 0);
+					qm_dqrr_pvb_update(portal[0]);
+					qm_dqrr_next(portal[0]);
+					dqrr = qm_dqrr_current(portal[0]);
+				}
+			} while (fq_empty == 0);
+		}
+		for (i = 0; i < portal_count; i++)
+			qm_dqrr_sdqcr_set(portal[i], 0);
+
+		/* Wait for the ORL to have been completely drained */
+		while (orl_empty == 0) {
+			const struct qm_mr_entry *msg;
+
+			qm_mr_pvb_update(portal[0]);
+			msg = qm_mr_current(portal[0]);
+			while (msg) {
+				if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+				    QM_MR_VERB_FQRL)
+					orl_empty = 1;
+				qm_mr_next(portal[0]);
+				qm_mr_cci_consume_to_current(portal[0]);
+				qm_mr_pvb_update(portal[0]);
+				msg = qm_mr_current(portal[0]);
+			}
+			cpu_relax();
+		}
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result != QM_MCR_RESULT_OK) {
+			pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+			       fqid, mcr->result);
+			return -1;
+		}
+		return 0;
+	case QM_MCR_NP_STATE_RETIRED:
+		/* Send OOS Command */
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result) {
+			pr_err("OOS Failed on FQID 0x%x\n", fqid);
+			return -1;
+		}
+		return 0;
+	case QM_MCR_NP_STATE_OOS:
+		/*  Done */
+		return 0;
+	}
+	return -1;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret;
+	struct qm_portal *low_p;
+
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	low_p = &p->p;
+	ret = qm_shutdown_fq(&low_p, 1, fqid);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal)
+{
+#ifndef __rtems__
+	return portal->sharing_redirect ? NULL : portal->config;
+#else /* __rtems__ */
+	return portal->config;
+#endif /* __rtems__ */
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_driver.c b/linux/drivers/soc/fsl/qbman/qman_driver.c
new file mode 100644
index 0000000..6923504
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_driver.c
@@ -0,0 +1,87 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <linux/time.h>
+
+static int __init early_qman_init(void)
+{
+	struct device_node *dn;
+	u32 is_portal_available;
+
+	qman_init();
+
+	is_portal_available = 0;
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (of_device_is_available(dn)) {
+			is_portal_available = 1;
+			break;
+		}
+	}
+
+	if (!qman_have_ccsr() && is_portal_available) {
+		struct qman_fq fq = {.fqid = 1};
+		struct qm_mcr_queryfq_np np;
+		int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
+		struct timespec nowts, diffts, startts = current_kernel_time();
+
+		/* Loop while querying given fqid succeeds or time out */
+		while (1) {
+			err = qman_query_fq_np(&fq, &np);
+			if (!err) {
+				/* success, control-plane has configured QMan */
+				break;
+			} else if (err != -ERANGE) {
+				pr_err("I/O error, continuing anyway\n");
+				break;
+			}
+			nowts = current_kernel_time();
+			diffts = timespec_sub(nowts, startts);
+			if (diffts.tv_sec > 0) {
+				if (!retry--) {
+					pr_err("Time out, control-plane dead?\n");
+					break;
+				}
+				pr_warn("Polling for the control-plane (%d)\n",
+					retry);
+			}
+		}
+	}
+
+	qman_resource_init();
+
+	return 0;
+}
+subsys_initcall(early_qman_init);
diff --git a/linux/drivers/soc/fsl/qbman/qman_portal.c b/linux/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000..c74059b
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,796 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+#ifdef __rtems__
+#include <bsp/qoriq.h>
+#endif /* __rtems__ */
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available) */
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+EXPORT_SYMBOL(qm_channel_pme);
+u16 qm_channel_dce = QMAN_CHANNEL_DCE;
+EXPORT_SYMBOL(qm_channel_dce);
+u16 qman_portal_max;
+EXPORT_SYMBOL(qman_portal_max);
+
+#ifndef __rtems__
+/* For these variables, and the portal-initialisation logic, the
+ * comments in bman_driver.c apply here so won't be repeated. */
+static struct qman_portal *shared_portals[NR_CPUS];
+static int num_shared_portals;
+static int shared_portals_idx;
+static LIST_HEAD(unused_pcfgs);
+#endif /* __rtems__ */
+
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 pools_sdqcr;
+
+#define STR_ERR_NOPROP	    "No '%s' property in node %s\n"
+#define STR_ERR_CELL	    "'%s' is not a %d-cell range in node %s\n"
+#define STR_FQID_RANGE	    "fsl,fqid-range"
+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
+#define STR_CGRID_RANGE	     "fsl,cgrid-range"
+
+/* A "fsl,fqid-range" node; release the given range to the allocator */
+static __init int fsl_fqid_range_init(struct device_node *node)
+{
+	int ret;
+	const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
+
+	if (!range) {
+		pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_fqid_range(range[0], range[1]);
+	pr_info("FQID allocator includes range %d:%d\n",
+		range[0], range[1]);
+	return 0;
+}
+
+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
+{
+	int ret;
+	const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+	if (!chanid) {
+		pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+		return -EINVAL;
+	}
+	for (ret = 0; ret < chanid[1]; ret++)
+		pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
+	return 0;
+}
+
+/* A "fsl,pool-channel-range" node; release the given range to the allocator */
+static __init int fsl_pool_channel_range_init(struct device_node *node)
+{
+	int ret;
+	const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+	if (!chanid) {
+		pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_pool_range(chanid[0], chanid[1]);
+	pr_info("Pool channel allocator includes range %d:%d\n",
+		chanid[0], chanid[1]);
+	return 0;
+}
+
+/* A "fsl,cgrid-range" node; release the given range to the allocator */
+static __init int fsl_cgrid_range_init(struct device_node *node)
+{
+	struct qman_cgr cgr;
+	int ret, errors = 0;
+	const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
+
+	if (!range) {
+		pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_cgrid_range(range[0], range[1]);
+	pr_info("CGRID allocator includes range %d:%d\n",
+		range[0], range[1]);
+	for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
+		ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
+		if (ret)
+			errors++;
+	}
+	if (errors)
+		pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
+			errors, (errors > 1) ? "s" : "", range[0], range[1]);
+	return 0;
+}
+
+static void qman_get_ip_revision(struct device_node *dn)
+{
+#ifdef __rtems__
+	struct device_node of_dns;
+#endif /* __rtems__ */
+	u16 ip_rev = 0;
+
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (!of_device_is_available(dn))
+			continue;
+		if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
+			pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+			BUG_ON(1);
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
+			ip_rev = QMAN_REV11;
+			qman_portal_max = 10;
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
+			ip_rev = QMAN_REV12;
+			qman_portal_max = 10;
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
+			ip_rev = QMAN_REV20;
+			qman_portal_max = 3;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.0.0")) {
+			ip_rev = QMAN_REV30;
+			qman_portal_max = 50;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.0.1")) {
+			ip_rev = QMAN_REV30;
+			qman_portal_max = 25;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.0")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 50;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.1")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 25;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.2")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 18;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.3")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 10;
+		} else {
+			pr_warn("Unknown version in portal node, default to rev1.1\n");
+			ip_rev = QMAN_REV11;
+			qman_portal_max = 10;
+		}
+
+		if (!qman_ip_rev) {
+			if (ip_rev) {
+				qman_ip_rev = ip_rev;
+			} else {
+				pr_warn("Unknown version, default to rev1.1\n");
+				qman_ip_rev = QMAN_REV11;
+			}
+		} else if (ip_rev && (qman_ip_rev != ip_rev))
+			pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
+				qman_ip_rev, dn->full_name, ip_rev);
+		if (qman_ip_rev == ip_rev)
+			break;
+	}
+}
+
+#ifndef __rtems__
+/* Parse a portal node, perform generic mapping duties and return the config. It
+ * is not known at this stage for what purpose (or even if) the portal will be
+ * used. */
+static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+	struct qm_portal_config *pcfg;
+	const u32 *channel;
+	int irq, ret;
+	struct resource res;
+
+	pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return NULL;
+
+	/*
+	 * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
+	 * 'struct device' in order to get the PAMU stashing setup and the QMan
+	 * portal [driver] won't function at all without ring stashing
+	 *
+	 * Making the QMan portal driver nice and proper is part of the
+	 * upstreaming effort
+	 */
+	pcfg->dev.bus = &platform_bus_type;
+	pcfg->dev.of_node = node;
+#ifdef CONFIG_IOMMU_API
+	pcfg->dev.archdata.iommu_domain = NULL;
+#endif
+
+	ret = of_address_to_resource(node, DPA_PORTAL_CE,
+				&pcfg->addr_phys[DPA_PORTAL_CE]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
+		goto err;
+	}
+	ret = of_address_to_resource(node, DPA_PORTAL_CI,
+				&pcfg->addr_phys[DPA_PORTAL_CI]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
+		goto err;
+	}
+
+	channel = of_get_property(node, "fsl,qman-channel-id", &ret);
+	if (!channel || (ret != 4)) {
+		pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
+		       node->full_name);
+		goto err;
+	}
+	pcfg->public_cfg.channel = *channel;
+	pcfg->public_cfg.cpu = -1;
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		pr_err("Can't get %s property 'interrupts'\n", node->full_name);
+		goto err;
+	}
+	pcfg->public_cfg.irq = irq;
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	/* We need the same LIODN offset for all portals */
+	qman_liodn_fixup(pcfg->public_cfg.channel);
+#endif
+
+	pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CE].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
+				0);
+	pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CI].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
+				_PAGE_GUARDED | _PAGE_NO_CACHE);
+
+	return pcfg;
+err:
+	kfree(pcfg);
+	return NULL;
+}
+
+static struct qm_portal_config *get_pcfg(struct list_head *list)
+{
+	struct qm_portal_config *pcfg;
+
+	if (list_empty(list))
+		return NULL;
+	pcfg = list_entry(list->prev, struct qm_portal_config, list);
+	list_del(&pcfg->list);
+	return pcfg;
+}
+#endif /* __rtems__ */
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+	int ret;
+	int window_count = 1;
+	struct iommu_domain_geometry geom_attr;
+	struct pamu_stash_attribute stash_attr;
+
+	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+	if (!pcfg->iommu_domain) {
+		pr_err("%s(): iommu_domain_alloc() failed", __func__);
+		goto _no_iommu;
+	}
+	geom_attr.aperture_start = 0;
+	geom_attr.aperture_end =
+		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+	geom_attr.force_aperture = true;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+				    &geom_attr);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+				    &window_count);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+		goto _iommu_domain_free;
+	}
+	stash_attr.cpu = cpu;
+	stash_attr.cache = PAMU_ATTR_CACHE_L1;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_STASH,
+				    &stash_attr);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+					 IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_window_enable() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+	if (ret < 0) {
+		pr_err("%s(): iommu_device_attach() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
+				    &window_count);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d",
+			   __func__, ret);
+		goto _iommu_detach_device;
+	}
+
+_no_iommu:
+#endif
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+		pr_warn("Failed to set the stash request queue\n");
+
+	return;
+
+#ifdef CONFIG_FSL_PAMU
+_iommu_detach_device:
+	iommu_detach_device(pcfg->iommu_domain, NULL);
+_iommu_domain_free:
+	iommu_domain_free(pcfg->iommu_domain);
+	pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+	struct qman_portal *p;
+
+#ifndef __rtems__
+	pcfg->iommu_domain = NULL;
+#endif /* __rtems__ */
+	portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+	p = qman_create_affine_portal(pcfg, NULL);
+	if (p) {
+		u32 irq_sources = 0;
+		/* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+		irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+			       QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+		irq_sources |= QM_PIRQ_DQRI;
+#endif
+		qman_p_irqsource_add(p, irq_sources);
+		pr_info("Portal %sinitialised, cpu %d\n",
+#ifndef __rtems__
+			pcfg->public_cfg.is_shared ? "(shared) " : "",
+#else /* __rtems__ */
+			"",
+#endif /* __rtems__ */
+			pcfg->public_cfg.cpu);
+	} else
+		pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
+	return p;
+}
+
+#ifndef __rtems__
+static void init_slave(int cpu)
+{
+	struct qman_portal *p;
+	struct cpumask oldmask = *tsk_cpus_allowed(current);
+
+	set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
+	p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+	if (!p)
+		pr_err("Slave portal failure on cpu %d\n", cpu);
+	else
+		pr_info("Portal (slave) initialised, cpu %d\n", cpu);
+	set_cpus_allowed_ptr(current, &oldmask);
+	if (shared_portals_idx >= num_shared_portals)
+		shared_portals_idx = 0;
+}
+
+static struct cpumask want_unshared __initdata;
+static struct cpumask want_shared __initdata;
+
+static int __init parse_qportals(char *str)
+{
+	return parse_portals_bootarg(str, &want_shared, &want_unshared,
+				     "qportals");
+}
+__setup("qportals=", parse_qportals);
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+							unsigned int cpu)
+{
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	if (pcfg->iommu_domain) {
+		stash_attr.cpu = cpu;
+		stash_attr.cache = PAMU_ATTR_CACHE_L1;
+		ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+		if (ret < 0) {
+			pr_err("Failed to update pamu stash setting\n");
+			return;
+		}
+	}
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+		pr_warn("Failed to update portal's stash request queue\n");
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = (struct qman_portal *)affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+			qman_portal_update_sdest(pcfg, 0);
+		}
+	}
+}
+#endif /* __rtems__ */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void qman_online_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = (struct qman_portal *)affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+			qman_portal_update_sdest(pcfg, cpu);
+		}
+	}
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+				     unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		qman_online_cpu(cpu);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		qman_offline_cpu(cpu);
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+	.notifier_call = qman_hotplug_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef __rtems__
+#include <bsp/fdt.h>
+static struct qm_portal_config qman_configs[NR_CPUS];
+static void
+qman_init_portals(void)
+{
+	const char *fdt = bsp_fdt_get();
+	struct device_node dn;
+	const char *name;
+	int cpu_count = (int)rtems_get_processor_count();
+	int cpu;
+	int ret;
+	int node;
+	int parent;
+
+	memset(&dn, 0, sizeof(dn));
+
+	name = "fsl,qman-portal";
+	node = fdt_node_offset_by_compatible(fdt, 0, name);
+	if (node < 0)
+		panic("qman: no portals in FDT");
+	parent = fdt_parent_offset(fdt, node);
+	if (parent < 0)
+		panic("qman: no parent of portals in FDT");
+	node = fdt_first_subnode(fdt, parent);
+
+	dn.full_name = name;
+	dn.offset = node;
+
+	qoriq_clear_ce_portal(&qoriq_qman_portal[0][0],
+	    sizeof(qoriq_qman_portal[0]));
+	qoriq_clear_ci_portal(&qoriq_qman_portal[1][0],
+	    sizeof(qoriq_qman_portal[1]));
+
+	for (cpu = 0; cpu < cpu_count; ++cpu) {
+		struct qm_portal_config *pcfg = &qman_configs[cpu];
+		struct qman_portal *portal;
+		struct resource res;
+		const u32 *channel;
+
+		if (node < 0)
+			panic("qman: missing portal in FDT");
+
+		ret = of_address_to_resource(&dn, 0, &res);
+		if (ret != 0)
+			panic("qman: no portal CE address");
+		pcfg->addr_virt[0] = (__iomem void *)
+		    ((uintptr_t)&qoriq_qman_portal[0][0] + (uintptr_t)res.start);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] >=
+		    (uintptr_t)&qoriq_qman_portal[0][0]);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[0] <
+		    (uintptr_t)&qoriq_qman_portal[1][0]);
+
+		ret = of_address_to_resource(&dn, 1, &res);
+		if (ret != 0)
+			panic("qman: no portal CI address");
+		pcfg->addr_virt[1] = (__iomem void *)
+		    ((uintptr_t)&qoriq_qman_portal[0][0] + (uintptr_t)res.start);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] >=
+		    (uintptr_t)&qoriq_qman_portal[1][0]);
+		BSD_ASSERT((uintptr_t)pcfg->addr_virt[1] <
+		    (uintptr_t)&qoriq_qman_portal[2][0]);
+
+		pcfg->public_cfg.irq = of_irq_to_resource(&dn, 0, NULL);
+		if (pcfg->public_cfg.irq == NO_IRQ)
+			panic("qman: no portal interrupt");
+
+		channel = of_get_property(&dn, "fsl,qman-channel-id", &ret);
+		if (channel == NULL || ret != 4)
+			panic("qman: no portal channel ID");
+		pcfg->public_cfg.channel = *channel;
+
+		pcfg->public_cfg.cpu = cpu;
+		pcfg->public_cfg.pools = pools_sdqcr;
+
+		portal = init_pcfg(pcfg);
+		if (portal == NULL)
+			panic("qman: cannot create portal");
+
+		node = fdt_next_subnode(fdt, node);
+		dn.offset = node;
+	}
+}
+#endif /* __rtems__ */
+#ifndef __rtems__
+__init int qman_init(void)
+{
+	struct cpumask slave_cpus;
+	struct cpumask unshared_cpus = *cpu_none_mask;
+	struct cpumask shared_cpus = *cpu_none_mask;
+	LIST_HEAD(unshared_pcfgs);
+	LIST_HEAD(shared_pcfgs);
+	struct device_node *dn;
+	struct qm_portal_config *pcfg;
+	struct qman_portal *p;
+	int cpu, ret;
+	struct cpumask offline_cpus;
+
+	/* Initialise the QMan (CCSR) device */
+	for_each_compatible_node(dn, NULL, "fsl,qman") {
+		if (!qman_init_ccsr(dn))
+			pr_info("Err interrupt handler present\n");
+		else
+			pr_err("CCSR setup failed\n");
+	}
+#else /* __rtems__ */
+int
+qman_init(struct device_node *dn)
+{
+	struct device_node of_dns;
+	int ret;
+#endif /* __rtems__ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	/* Setup lookup table for FQ demux */
+	ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
+	if (ret)
+		return ret;
+#endif
+
+	/* Get qman ip revision */
+	qman_get_ip_revision(dn);
+	if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+		qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+		qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+	}
+
+	/* Parse pool channels into the SDQCR mask. (Must happen before portals
+	 * are initialised.) */
+	for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+		ret = fsl_pool_channel_range_sdqcr(dn);
+		if (ret)
+			return ret;
+	}
+
+#ifndef __rtems__
+	memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
+	/* Initialise portals. See bman_driver.c for comments */
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (!of_device_is_available(dn))
+			continue;
+		pcfg = parse_pcfg(dn);
+		if (pcfg) {
+			pcfg->public_cfg.pools = pools_sdqcr;
+			list_add_tail(&pcfg->list, &unused_pcfgs);
+		}
+	}
+	for_each_possible_cpu(cpu) {
+		if (cpumask_test_cpu(cpu, &want_shared)) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+			cpumask_set_cpu(cpu, &shared_cpus);
+		}
+		if (cpumask_test_cpu(cpu, &want_unshared)) {
+			if (cpumask_test_cpu(cpu, &shared_cpus))
+				continue;
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+		for_each_possible_cpu(cpu) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+	cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+	if (cpumask_empty(&slave_cpus)) {
+		if (!list_empty(&shared_pcfgs)) {
+			cpumask_or(&unshared_cpus, &unshared_cpus,
+				   &shared_cpus);
+			cpumask_clear(&shared_cpus);
+			list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+			INIT_LIST_HEAD(&shared_pcfgs);
+		}
+	} else {
+		if (list_empty(&shared_pcfgs)) {
+			pcfg = get_pcfg(&unshared_pcfgs);
+			if (!pcfg) {
+				pr_crit("No portals available!\n");
+				return 0;
+			}
+			cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+			cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+		}
+	}
+	list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 0;
+		p = init_pcfg(pcfg);
+	}
+	list_for_each_entry(pcfg, &shared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 1;
+		p = init_pcfg(pcfg);
+		if (p)
+			shared_portals[num_shared_portals++] = p;
+	}
+	if (!cpumask_empty(&slave_cpus))
+		for_each_cpu(cpu, &slave_cpus)
+			init_slave(cpu);
+#else /* __rtems__ */
+	qman_init_portals();
+#endif /* __rtems__ */
+	pr_info("Portals initialised\n");
+#ifndef __rtems__
+	cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+	for_each_cpu(cpu, &offline_cpus)
+		qman_offline_cpu(cpu);
+#endif /* __rtems__ */
+#ifdef CONFIG_HOTPLUG_CPU
+	register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+#endif
+	return 0;
+}
+
+__init int qman_resource_init(void)
+{
+#ifdef __rtems__
+	struct device_node of_dns;
+#endif /* __rtems__ */
+	struct device_node *dn;
+	int ret;
+
+	/* Initialise FQID allocation ranges */
+	for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
+		ret = fsl_fqid_range_init(dn);
+		if (ret)
+			return ret;
+	}
+	/* Initialise CGRID allocation ranges */
+	for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
+		ret = fsl_cgrid_range_init(dn);
+		if (ret)
+			return ret;
+	}
+	/* Parse pool channels into the allocator. (Must happen after portals
+	 * are initialised.) */
+	for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+		ret = fsl_pool_channel_range_init(dn);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_priv.h b/linux/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000..f04bd47
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,293 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+#include <asm/fsl_pamu_stash.h>
+
+/* Congestion Groups */
+
+/* This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+	struct __qm_mcr_querycongestion q;
+};
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+	memset(c, 0xff, sizeof(*c));
+}
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+	return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+	c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+	c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+	while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
+		;
+	return num;
+}
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+				const struct qman_cgrs *src)
+{
+	*dest = *src;
+}
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.__state;
+	const u32 *_a = a->q.__state;
+	const u32 *_b = b->q.__state;
+
+	for (ret = 0; ret < 8; ret++)
+		*(_d++) = *(_a++) & *(_b++);
+}
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.__state;
+	const u32 *_a = a->q.__state;
+	const u32 *_b = b->q.__state;
+
+	for (ret = 0; ret < 8; ret++)
+		*(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+	qm_isr_status = 0,
+	qm_isr_enable = 1,
+	qm_isr_disable = 2,
+	qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+	/* Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited. */
+	__iomem void *addr_virt[2];
+#ifndef __rtems__
+	struct resource addr_phys[2];
+	struct device dev;
+	struct iommu_domain *iommu_domain;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+#endif /* __rtems__ */
+	/* User-visible portal configuration settings */
+	struct qman_portal_config public_cfg;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+extern u16 qman_portal_max;
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+/* Hooks from qman_driver.c to qman_config.c */
+int qman_init_ccsr(struct device_node *node);
+void qman_liodn_fixup(u16 channel);
+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
+size_t qman_fqd_size(void);
+#endif
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+/* Hooks from qman_driver.c in to qman_high.c */
+struct qman_portal *qman_create_portal(
+			struct qman_portal *portal,
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+								int cpu);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+void qman_destroy_portal(struct qman_portal *qm);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally. */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+	struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required. */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+
+/*************************************************/
+/*   QMan s/w corenet portal, low-level i/face	 */
+/*************************************************/
+
+/* Note: most functions are only used by the high-level interface, so are
+ * inlined from qman.h. The stuff below is for use by other parts of the
+ * driver. */
+
+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS	0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
+#define QM_SDQCR_COUNT_EXACT1		0x0
+#define QM_SDQCR_COUNT_UPTO3		0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_SDQCR_TYPE_MASK		0x03000000
+#define QM_SDQCR_TYPE_NULL		0x0
+#define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_SDQCR_TYPE_ACTIVE		0x03000000
+#define QM_SDQCR_TOKEN_MASK		0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK		0x00ffffff
+#define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
+
+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
+ * If MODE==SCHEDULED
+ *   Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
+ *   If CHANNELS,
+ *     Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
+ *     You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *     priority.
+ *   If SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ * If MODE==UNSCHEDULED
+ *     Choose FQID().
+ */
+#define QM_PDQCR_MODE_SCHEDULED		0x0
+#define QM_PDQCR_MODE_UNSCHEDULED	0x80000000
+#define QM_PDQCR_SCHEDULED_CHANNELS	0x0
+#define QM_PDQCR_SCHEDULED_SPECIFICWQ	0x40000000
+#define QM_PDQCR_COUNT_EXACT1		0x0
+#define QM_PDQCR_COUNT_UPTO3		0x20000000
+#define QM_PDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_PDQCR_TYPE_MASK		0x03000000
+#define QM_PDQCR_TYPE_NULL		0x0
+#define QM_PDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_PDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_PDQCR_TYPE_ACTIVE		0x03000000
+#define QM_PDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_PDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+#define QM_PDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_PDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_PDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_PDQCR_SPECIFICWQ_WQ(n)	(n)
+#define QM_PDQCR_FQID(n)		((n) & 0xffffff)
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL	0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
+#define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK		0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define qm_isr_status_read(qm)		__qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m)	__qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm)		__qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v)	__qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm)		__qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v)	__qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm)		__qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm)		__qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+int qman_have_ccsr(void);
+#else
+#define qman_have_ccsr	0
+#endif
+
+#ifndef __rtems__
+__init int qman_init(void);
+#else /* __rtems__ */
+int qman_init(struct device_node *dn);
+#endif /* __rtems__ */
+__init int qman_resource_init(void);
+
+extern void *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.c b/linux/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 0000000..18c0448
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,61 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+	int loop = 1;
+
+	while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+		qman_test_stash();
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+		qman_test_api();
+#endif
+	}
+	return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test.h b/linux/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 0000000..0b34a67
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,44 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+
+#include <soc/fsl/qman.h>
+
+void qman_test_stash(void);
+void qman_test_api(void);
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_api.c b/linux/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 0000000..63a6d11
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,222 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define CGR_ID		27
+#define POOL_ID		2
+#define FQ_FLAGS	QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES	10
+#define NUM_PARTIAL	4
+#define PORTAL_SDQCR	(QM_SDQCR_SOURCE_CHANNELS | \
+			QM_SDQCR_TYPE_PRIO_QOS | \
+			QM_SDQCR_TOKEN_SET(0x98) | \
+			QM_SDQCR_CHANNELS_DEDICATED | \
+			QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE	((void *)0xf00dbeef)
+#define VDQCR_FLAGS	(QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+/*************************************/
+/* Predeclarations (eg. for fq_base) */
+/*************************************/
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+					struct qman_fq *,
+					const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+			const struct qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+			const struct qm_mr_entry *);
+
+/***************/
+/* global vars */
+/***************/
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+	.cb.dqrr = cb_dqrr,
+	.cb.ern = cb_ern,
+	.cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/**********************/
+/* internal functions */
+/**********************/
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *__fd)
+{
+	qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
+	__fd->format = qm_fd_contig_big;
+	__fd->length29 = 0x0000ffff;
+	__fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *__fd)
+{
+	u64 t = qm_fd_addr_get64(__fd);
+	int z = t >> 40;
+
+	t <<= 1;
+	if (z)
+		t |= 1;
+	qm_fd_addr_set64(__fd, t);
+	__fd->length29--;
+	__fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+	int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+	if (!r)
+		r = a->format - b->format;
+	if (!r)
+		r = a->opaque - b->opaque;
+	if (!r)
+		r = a->cmd - b->cmd;
+	return r;
+}
+
+/********/
+/* test */
+/********/
+
+static void do_enqueues(struct qman_fq *fq)
+{
+	unsigned int loop;
+
+	for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+		if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
+				(((loop + 1) == NUM_ENQUEUES) ?
+				QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
+			panic("qman_enqueue() failed\n");
+		fd_inc(&fd);
+	}
+}
+
+void qman_test_api(void)
+{
+	u32 flags;
+	int res;
+	struct qman_fq *fq = &fq_base;
+
+	pr_info("%s(): Starting\n", __func__);
+	fd_init(&fd);
+	fd_init(&fd_dq);
+
+	/* Initialise (parked) FQ */
+	if (qman_create_fq(0, FQ_FLAGS, fq))
+		panic("qman_create_fq() failed\n");
+	if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
+		panic("qman_init_fq() failed\n");
+
+	/* Do enqueues + VDQCR, twice. (Parked FQ) */
+	do_enqueues(fq);
+	pr_info("VDQCR (till-empty);\n");
+	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+			QM_VDQCR_NUMFRAMES_TILLEMPTY))
+		panic("qman_volatile_dequeue() failed\n");
+	do_enqueues(fq);
+	pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+			QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
+		panic("qman_volatile_dequeue() failed\n");
+	pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+					NUM_ENQUEUES);
+	if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+			QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
+		panic("qman_volatile_dequeue() failed\n");
+
+	do_enqueues(fq);
+	pr_info("scheduled dequeue (till-empty)\n");
+	if (qman_schedule_fq(fq))
+		panic("qman_schedule_fq() failed\n");
+	wait_event(waitqueue, sdqcr_complete);
+
+	/* Retire and OOS the FQ */
+	res = qman_retire_fq(fq, &flags);
+	if (res < 0)
+		panic("qman_retire_fq() failed\n");
+	wait_event(waitqueue, retire_complete);
+	if (flags & QMAN_FQ_STATE_BLOCKOOS)
+		panic("leaking frames\n");
+	if (qman_oos_fq(fq))
+		panic("qman_oos_fq() failed\n");
+	qman_destroy_fq(fq, 0);
+	pr_info("%s(): Finished\n", __func__);
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dq)
+{
+	if (fd_cmp(&fd_dq, &dq->fd)) {
+		pr_err("BADNESS: dequeued frame doesn't match;\n");
+		BUG();
+	}
+	fd_inc(&fd_dq);
+	if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+		sdqcr_complete = 1;
+		wake_up(&waitqueue);
+	}
+	return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+			const struct qm_mr_entry *msg)
+{
+	panic("cb_ern() unimplemented");
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+			const struct qm_mr_entry *msg)
+{
+	u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+	if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
+		panic("unexpected FQS message");
+#ifndef __rtems__
+	pr_info("Retirement message received\n");
+#endif /* __rtems__ */
+	retire_complete = 1;
+	wake_up(&waitqueue);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_test_stash.c b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 0000000..a3ca660
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,540 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#ifdef __rtems__
+#include <rtems/malloc.h>
+#undef msleep
+#define	msleep(x) usleep((x) * 1000)
+#define	L1_CACHE_BYTES 64
+#endif /* __rtems__ */
+
+/* Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ *    into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ *    handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ *    and advance the iterator for the next loop. This includes a final fixup,
+ *    which connects the last handler to the first (and which is why phase 2
+ *    and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ *    hp_cpu's 'iterator' to point to its first handler. With each loop,
+ *    initialise FQ objects and advance the iterator for the next loop.
+ *    Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ *    initialisation targets the correct cpu.
+ */
+
+/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive). */
+struct bstrap {
+	void (*fn)(void);
+	atomic_t started;
+};
+static int bstrap_fn(void *__bstrap)
+{
+	struct bstrap *bstrap = __bstrap;
+
+	atomic_inc(&bstrap->started);
+	bstrap->fn();
+	while (!kthread_should_stop())
+		msleep(1);
+	return 0;
+}
+static int on_all_cpus(void (*fn)(void))
+{
+	int cpu;
+
+#ifndef __rtems__
+	for_each_cpu(cpu, cpu_online_mask) {
+#else /* __rtems__ */
+	for (cpu = 0; cpu < (int)rtems_get_processor_count(); ++cpu) {
+#endif /* __rtems__ */
+		struct bstrap bstrap = {
+			.fn = fn,
+			.started = ATOMIC_INIT(0)
+		};
+		struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+			"hotpotato%d", cpu);
+		int ret;
+
+#ifndef __rtems__
+		if (IS_ERR(k))
+#else /* __rtems__ */
+		if (k == NULL)
+#endif /* __rtems__ */
+			return -ENOMEM;
+		kthread_bind(k, cpu);
+		wake_up_process(k);
+		/* If we call kthread_stop() before the "wake up" has had an
+		 * effect, then the thread may exit with -EINTR without ever
+		 * running the function. So poll until it's started before
+		 * requesting it to stop. */
+		while (!atomic_read(&bstrap.started))
+			msleep(10);
+		ret = kthread_stop(k);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+struct hp_handler {
+
+	/* The following data is stashed when 'rx' is dequeued; */
+	/* -------------- */
+	/* The Rx FQ, dequeues of which will stash the entire hp_handler */
+	struct qman_fq rx;
+	/* The Tx FQ we should forward to */
+	struct qman_fq tx;
+	/* The value we XOR post-dequeue, prior to validating */
+	u32 rx_mixer;
+	/* The value we XOR pre-enqueue, after validating */
+	u32 tx_mixer;
+	/* what the hotpotato address should be on dequeue */
+	dma_addr_t addr;
+	u32 *frame_ptr;
+
+	/* The following data isn't (necessarily) stashed on dequeue; */
+	/* -------------- */
+	u32 fqid_rx, fqid_tx;
+	/* list node for linking us into 'hp_cpu' */
+	struct list_head node;
+	/* Just to check ... */
+	unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+	/* identify the cpu we run on; */
+	unsigned int processor_id;
+	/* root node for the per-cpu list of handlers */
+	struct list_head handlers;
+	/* list node for linking us into 'hp_cpu_list' */
+	struct list_head node;
+	/* when repeatedly scanning 'hp_list', each time linking the n'th
+	 * handlers together, this is used as per-cpu iterator state */
+	struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+#ifndef __rtems__
+static void *__frame_ptr;
+#endif /* __rtems__ */
+static u32 *frame_ptr;
+#ifndef __rtems__
+static dma_addr_t frame_dma;
+#endif /* __rtems__ */
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU	2
+#define HP_LOOPS	8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS	80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD	0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+	return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static void allocate_frame_data(void)
+{
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+#ifndef __rtems__
+	struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+	if (!pdev)
+		panic("platform_device_alloc() failed");
+	if (platform_device_add(pdev))
+		panic("platform_device_add() failed");
+	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+	if (!__frame_ptr)
+		panic("kmalloc() failed");
+	frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
+				~(unsigned long)63);
+#else /* __rtems__ */
+	frame_ptr = rtems_heap_allocate_aligned_with_boundary(4 * HP_NUM_WORDS, 64, 0);
+	if (frame_ptr == NULL)
+		panic("rtems_heap_allocate_aligned_with_boundary() failed");
+#endif /* __rtems__ */
+	for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+		frame_ptr[loop] = lfsr;
+		lfsr = do_lfsr(lfsr);
+	}
+#ifndef __rtems__
+	frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+					DMA_BIDIRECTIONAL);
+	platform_device_del(pdev);
+	platform_device_put(pdev);
+#endif /* __rtems__ */
+}
+
+static void deallocate_frame_data(void)
+{
+#ifndef __rtems__
+	kfree(__frame_ptr);
+#endif /* __rtems__ */
+}
+
+static inline void process_frame_data(struct hp_handler *handler,
+				const struct qm_fd *fd)
+{
+	u32 *p = handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+
+	if (qm_fd_addr_get64(fd) != handler->addr)
+		panic("bad frame address");
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		*p ^= handler->rx_mixer;
+		if (*p != lfsr)
+			panic("corrupt frame data");
+		*p ^= handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	process_frame_data(handler, &dqrr->fd);
+	if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+		panic("qman_enqueue() failed");
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr)
+{
+	struct hp_handler *handler = (struct hp_handler *)fq;
+
+	process_frame_data(handler, &dqrr->fd);
+	if (++loop_counter < HP_LOOPS) {
+		if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+			panic("qman_enqueue() failed");
+	} else {
+		pr_info("Received final (%dth) frame\n", loop_counter);
+		wake_up(&queue);
+	}
+	return qman_cb_dqrr_consume;
+}
+
+static void create_per_cpu_handlers(void)
+{
+	struct hp_handler *handler;
+	int loop;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	hp_cpu->processor_id = smp_processor_id();
+	spin_lock(&hp_lock);
+	list_add_tail(&hp_cpu->node, &hp_cpu_list);
+	hp_cpu_list_length++;
+	spin_unlock(&hp_lock);
+	INIT_LIST_HEAD(&hp_cpu->handlers);
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+		if (!handler)
+			panic("kmem_cache_alloc() failed");
+		handler->processor_id = hp_cpu->processor_id;
+#ifndef __rtems__
+		handler->addr = frame_dma;
+#else /* __rtems__ */
+		handler->addr = (dma_addr_t)frame_ptr;
+#endif /* __rtems__ */
+		handler->frame_ptr = frame_ptr;
+		list_add_tail(&handler->node, &hp_cpu->handlers);
+	}
+}
+
+static void destroy_per_cpu_handlers(void)
+{
+	struct list_head *loop, *tmp;
+	struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+	spin_lock(&hp_lock);
+	list_del(&hp_cpu->node);
+	spin_unlock(&hp_lock);
+	list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+		u32 flags;
+		struct hp_handler *handler = list_entry(loop, struct hp_handler,
+							node);
+		if (qman_retire_fq(&handler->rx, &flags))
+			panic("qman_retire_fq(rx) failed");
+		BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+		if (qman_oos_fq(&handler->rx))
+			panic("qman_oos_fq(rx) failed");
+		qman_destroy_fq(&handler->rx, 0);
+		qman_destroy_fq(&handler->tx, 0);
+		qman_release_fqid(handler->fqid_rx);
+		list_del(&handler->node);
+		kmem_cache_free(hp_handler_slab, handler);
+	}
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+	u8 res = (offset + (L1_CACHE_BYTES - 1))
+			 / (L1_CACHE_BYTES);
+	if (res > 3)
+		return 3;
+	return res;
+}
+#define STASH_DATA_CL \
+	num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+	num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static void init_handler(void *__handler)
+{
+	struct qm_mcc_initfq opts;
+	struct hp_handler *handler = __handler;
+
+	BUG_ON(handler->processor_id != smp_processor_id());
+	/* Set up rx */
+	memset(&handler->rx, 0, sizeof(handler->rx));
+	if (handler == special_handler)
+		handler->rx.cb.dqrr = special_dqrr;
+	else
+		handler->rx.cb.dqrr = normal_dqrr;
+	if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
+		panic("qman_create_fq(rx) failed");
+	memset(&opts, 0, sizeof(opts));
+	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+	opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
+	opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
+	if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+				QMAN_INITFQ_FLAG_LOCAL, &opts))
+		panic("qman_init_fq(rx) failed");
+	/* Set up tx */
+	memset(&handler->tx, 0, sizeof(handler->tx));
+	if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+				&handler->tx))
+		panic("qman_create_fq(tx) failed");
+}
+
+static void init_phase2(void)
+{
+	int loop;
+	u32 fqid = 0;
+	u32 lfsr = 0xdeadbeef;
+	struct hp_cpu *hp_cpu;
+	struct hp_handler *handler;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			int ret;
+
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			/* Rx FQID is the previous handler's Tx FQID */
+			hp_cpu->iterator->fqid_rx = fqid;
+			/* Allocate new FQID for Tx */
+			ret = qman_alloc_fqid(&fqid);
+			if (ret)
+				panic("qman_alloc_fqid() failed");
+			hp_cpu->iterator->fqid_tx = fqid;
+			/* Rx mixer is the previous handler's Tx mixer */
+			hp_cpu->iterator->rx_mixer = lfsr;
+			/* Get new mixer for Tx */
+			lfsr = do_lfsr(lfsr);
+			hp_cpu->iterator->tx_mixer = lfsr;
+		}
+	}
+	/* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+	hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+	handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+	BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
+	handler->fqid_rx = fqid;
+	handler->rx_mixer = lfsr;
+	/* and tag it as our "special" handler */
+	special_handler = handler;
+}
+
+static void init_phase3(void)
+{
+	int loop;
+	struct hp_cpu *hp_cpu;
+
+	for (loop = 0; loop < HP_PER_CPU; loop++) {
+		list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+			if (!loop)
+				hp_cpu->iterator = list_first_entry(
+						&hp_cpu->handlers,
+						struct hp_handler, node);
+			else
+				hp_cpu->iterator = list_entry(
+						hp_cpu->iterator->node.next,
+						struct hp_handler, node);
+			preempt_disable();
+			if (hp_cpu->processor_id == smp_processor_id())
+				init_handler(hp_cpu->iterator);
+			else
+				smp_call_function_single(hp_cpu->processor_id,
+					init_handler, hp_cpu->iterator, 1);
+			preempt_enable();
+		}
+	}
+}
+
+static void send_first_frame(void *ignore)
+{
+	u32 *p = special_handler->frame_ptr;
+	u32 lfsr = HP_FIRST_WORD;
+	int loop;
+	struct qm_fd fd;
+
+	BUG_ON(special_handler->processor_id != smp_processor_id());
+	memset(&fd, 0, sizeof(fd));
+	qm_fd_addr_set64(&fd, special_handler->addr);
+	fd.format = qm_fd_contig_big;
+	fd.length29 = HP_NUM_WORDS * 4;
+	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+		if (*p != lfsr)
+			panic("corrupt frame data");
+		*p ^= special_handler->tx_mixer;
+		lfsr = do_lfsr(lfsr);
+	}
+	pr_info("Sending first frame\n");
+	if (qman_enqueue(&special_handler->tx, &fd, 0))
+		panic("qman_enqueue() failed");
+}
+
+void qman_test_stash(void)
+{
+#ifndef __rtems__
+	if (cpumask_weight(cpu_online_mask) < 2) {
+		pr_info("%s(): skip - only 1 CPU\n", __func__);
+		return;
+	}
+#endif /* __rtems__ */
+
+	pr_info("%s(): Starting\n", __func__);
+
+	hp_cpu_list_length = 0;
+	loop_counter = 0;
+	hp_handler_slab = kmem_cache_create("hp_handler_slab",
+			sizeof(struct hp_handler), L1_CACHE_BYTES,
+			SLAB_HWCACHE_ALIGN, NULL);
+	if (!hp_handler_slab)
+		panic("kmem_cache_create() failed");
+
+	allocate_frame_data();
+
+	/* Init phase 1 */
+	pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+	if (on_all_cpus(create_per_cpu_handlers))
+		panic("on_each_cpu() failed");
+	pr_info("Number of cpus: %d, total of %d handlers\n",
+		hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+	init_phase2();
+
+	init_phase3();
+
+	preempt_disable();
+	if (special_handler->processor_id == smp_processor_id())
+		send_first_frame(NULL);
+	else
+		smp_call_function_single(special_handler->processor_id,
+			send_first_frame, NULL, 1);
+	preempt_enable();
+
+	wait_event(queue, loop_counter == HP_LOOPS);
+	deallocate_frame_data();
+	if (on_all_cpus(destroy_per_cpu_handlers))
+		panic("on_each_cpu() failed");
+	kmem_cache_destroy(hp_handler_slab);
+	pr_info("%s(): Finished\n", __func__);
+}
diff --git a/linux/drivers/soc/fsl/qbman/qman_utils.c b/linux/drivers/soc/fsl/qbman/qman_utils.c
new file mode 100644
index 0000000..5b85f03
--- /dev/null
+++ b/linux/drivers/soc/fsl/qbman/qman_utils.c
@@ -0,0 +1,309 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* --- FQID Pool --- */
+
+struct qman_fqid_pool {
+	/* Base and size of the FQID range */
+	u32 fqid_base;
+	u32 total;
+	/* Number of FQIDs currently "allocated" */
+	u32 used;
+	/* Allocation optimisation. When 'used<total', it is the index of an
+	 * available FQID. Otherwise there are no available FQIDs, and this
+	 * will be set when the next deallocation occurs. */
+	u32 next;
+	/* A bit-field representation of the FQID range. */
+	unsigned long *bits;
+};
+
+#define QLONG_BYTES	sizeof(unsigned long)
+#define QLONG_BITS	(QLONG_BYTES * 8)
+/* Number of 'longs' required for the given number of bits */
+#define QNUM_LONGS(b)	(((b) + QLONG_BITS - 1) / QLONG_BITS)
+/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
+#define QNUM_BYTES(b)	(QNUM_LONGS(b) * QLONG_BYTES)
+/* And in bits */
+#define QNUM_BITS(b)	(QNUM_LONGS(b) * QLONG_BITS)
+
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
+{
+	struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	unsigned int i;
+
+	BUG_ON(!num);
+	if (!pool)
+		return NULL;
+	pool->fqid_base = fqid_start;
+	pool->total = num;
+	pool->used = 0;
+	pool->next = 0;
+	pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
+	if (!pool->bits) {
+		kfree(pool);
+		return NULL;
+	}
+	/* If num is not an even multiple of QLONG_BITS (or even 8, for
+	 * byte-oriented searching) then we fill the trailing bits with 1, to
+	 * make them look allocated (permanently). */
+	for (i = num + 1; i < QNUM_BITS(num); i++)
+		set_bit(i, pool->bits);
+	return pool;
+}
+EXPORT_SYMBOL(qman_fqid_pool_create);
+
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
+{
+	int ret = pool->used;
+
+	kfree(pool->bits);
+	kfree(pool);
+	return ret;
+}
+EXPORT_SYMBOL(qman_fqid_pool_destroy);
+
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
+{
+	int ret;
+
+	if (pool->used == pool->total)
+		return -ENOMEM;
+	*fqid = pool->fqid_base + pool->next;
+	ret = test_and_set_bit(pool->next, pool->bits);
+	BUG_ON(ret);
+	if (++pool->used == pool->total)
+		return 0;
+	pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
+	if (pool->next >= pool->total)
+		pool->next = find_first_zero_bit(pool->bits, pool->total);
+	BUG_ON(pool->next >= pool->total);
+	return 0;
+}
+EXPORT_SYMBOL(qman_fqid_pool_alloc);
+
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
+{
+	int ret;
+
+	fqid -= pool->fqid_base;
+	ret = test_and_clear_bit(fqid, pool->bits);
+	BUG_ON(!ret);
+	if (pool->used-- == pool->total)
+		pool->next = fqid;
+}
+EXPORT_SYMBOL(qman_fqid_pool_free);
+
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
+{
+	return pool->used;
+}
+EXPORT_SYMBOL(qman_fqid_pool_used);
+
+static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */
+static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */
+static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */
+
+/* FQID allocator front-end */
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&fqalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+static int fq_cleanup(u32 fqid)
+{
+	return qman_shutdown_fq(fqid) == 0;
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&fqalloc,
+						  fqid, count, fq_cleanup);
+
+	if (total_invalid)
+		pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
+			fqid, fqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_fqid_range);
+
+int qman_reserve_fqid_range(u32 fqid, u32 count)
+{
+	return dpaa_resource_reserve(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_reserve_fqid_range);
+
+void qman_seed_fqid_range(u32 fqid, u32 count)
+{
+	dpaa_resource_seed(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_seed_fqid_range);
+
+/* Pool-channel allocator front-end */
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&qpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+static int qpool_cleanup(u32 qp)
+{
+	/* We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose destination channel is the pool-channel being released.
+	 * When a non-OOS FQD is found we attempt to clean it up */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 1;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			BUG_ON(err);
+			if (fqd.dest.channel == qp) {
+				/* The channel is the FQ's target, clean it */
+				if (qman_shutdown_fq(fq.fqid) != 0)
+					/* Couldn't shut down the FQ
+					   so the pool must be leaked */
+					return 0;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+void qman_release_pool_range(u32 qp, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&qpalloc,
+						  qp, count, qpool_cleanup);
+
+	if (total_invalid) {
+		/* Pool channels are almost always used individually */
+		if (count == 1)
+			pr_err("Pool channel 0x%x had %d leaks\n",
+				qp, total_invalid);
+		else
+			pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
+				qp, qp + count - 1, count, total_invalid);
+	}
+}
+EXPORT_SYMBOL(qman_release_pool_range);
+
+void qman_seed_pool_range(u32 poolid, u32 count)
+{
+	dpaa_resource_seed(&qpalloc, poolid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_pool_range);
+
+int qman_reserve_pool_range(u32 poolid, u32 count)
+{
+	return dpaa_resource_reserve(&qpalloc, poolid, count);
+}
+EXPORT_SYMBOL(qman_reserve_pool_range);
+
+
+/* CGR ID allocator front-end */
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&cgralloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+static int cqr_cleanup(u32 cgrid)
+{
+	/* We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose CGR is the CGR being released.
+	 */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 1;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			BUG_ON(err);
+			if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+			    (fqd.cgid == cgrid)) {
+				pr_err("CRGID 0x%x is being used by FQID 0x%x,"
+				       " CGR will be leaked\n",
+				       cgrid, fq.fqid);
+				return 1;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&cgralloc,
+						  cgrid, count, cqr_cleanup);
+	if (total_invalid)
+		pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
+			cgrid, cgrid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_cgrid_range);
+
+void qman_seed_cgrid_range(u32 cgrid, u32 count)
+{
+	dpaa_resource_seed(&cgralloc, cgrid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_cgrid_range);
diff --git a/linux/include/soc/fsl/bman.h b/linux/include/soc/fsl/bman.h
new file mode 100644
index 0000000..16f4efa
--- /dev/null
+++ b/linux/include/soc/fsl/bman.h
@@ -0,0 +1,524 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Enable blocking waits */
+#define FSL_DPA_CAN_WAIT       1
+#define FSL_DPA_CAN_WAIT_SYNC  1
+
+/* Last updated for v00.79 of the BG */
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI	0x00000002	/* RCR Ring (below threshold) */
+#define BM_PIRQ_BSCN	0x00000001	/* Buffer depletion State Change */
+
+/* This wrapper represents a bit-array for the depletion state of the 64 BMan
+ * buffer pools. */
+struct bman_depletion {
+	u32 __state[2];
+};
+#define BMAN_DEPLETION_EMPTY { { 0x00000000, 0x00000000 } }
+#define BMAN_DEPLETION_FULL { { 0xffffffff, 0xffffffff } }
+#define __bmdep_word(x) ((x) >> 5)
+#define __bmdep_shift(x) ((x) & 0x1f)
+#define __bmdep_bit(x) (0x80000000 >> __bmdep_shift(x))
+static inline void bman_depletion_init(struct bman_depletion *c)
+{
+	c->__state[0] = c->__state[1] = 0;
+}
+static inline void bman_depletion_fill(struct bman_depletion *c)
+{
+	c->__state[0] = c->__state[1] = ~0;
+}
+static inline int bman_depletion_get(const struct bman_depletion *c, u8 bpid)
+{
+	return c->__state[__bmdep_word(bpid)] & __bmdep_bit(bpid);
+}
+static inline void bman_depletion_set(struct bman_depletion *c, u8 bpid)
+{
+	c->__state[__bmdep_word(bpid)] |= __bmdep_bit(bpid);
+}
+static inline void bman_depletion_unset(struct bman_depletion *c, u8 bpid)
+{
+	c->__state[__bmdep_word(bpid)] &= ~__bmdep_bit(bpid);
+}
+
+/* --- BMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct bm_rcr_entry;	/* RCR (Release Command Ring) entries */
+struct bm_mc_command;	/* MC (Management Command) command */
+struct bm_mc_result;	/* MC result */
+
+/* Code-reduction, define a wrapper for 48-bit buffers. In cases where a buffer
+ * pool id specific to this buffer is needed (BM_RCR_VERB_CMD_BPID_MULTI,
+ * BM_MCC_VERB_ACQUIRE), the 'bpid' field is used. */
+struct bm_buffer {
+	union {
+		struct {
+			u8 __reserved1;
+			u8 bpid;
+			u16 hi; /* High 16-bits of 48-bit address */
+			u32 lo; /* Low 32-bits of 48-bit address */
+		};
+		struct {
+			u64 __notaddress:16;
+			u64 addr:48;
+		};
+	};
+} __aligned(8);
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+	return buf->addr;
+}
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+	return (dma_addr_t)buf->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define bm_buffer_set64(buf, v) \
+	do { \
+		struct bm_buffer *__buf931 = (buf); \
+		__buf931->hi = upper_32_bits(v); \
+		__buf931->lo = lower_32_bits(v); \
+	} while (0)
+
+/* See 1.5.3.5.4: "Release Command" */
+struct bm_rcr_entry {
+	union {
+		struct {
+			u8 __dont_write_directly__verb;
+			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+			u8 __reserved1[62];
+		};
+		struct bm_buffer bufs[8];
+	};
+} __packed;
+#define BM_RCR_VERB_VBIT		0x80
+#define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI	0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
+
+/* See 1.5.3.1: "Acquire Command" */
+/* See 1.5.3.2: "Query Command" */
+struct bm_mcc_acquire {
+	u8 bpid;
+	u8 __reserved1[62];
+} __packed;
+struct bm_mcc_query {
+	u8 __reserved2[63];
+} __packed;
+struct bm_mc_command {
+	u8 __dont_write_directly__verb;
+	union {
+		struct bm_mcc_acquire acquire;
+		struct bm_mcc_query query;
+	};
+} __packed;
+#define BM_MCC_VERB_VBIT		0x80
+#define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE		0x10
+#define BM_MCC_VERB_CMD_QUERY		0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
+
+/* See 1.5.3.3: "Acquire Response" */
+/* See 1.5.3.4: "Query Response" */
+struct bm_pool_state {
+	u8 __reserved1[32];
+	/* "availability state" and "depletion state" */
+	struct {
+		u8 __reserved1[8];
+		/* Access using bman_depletion_***() */
+		struct bman_depletion state;
+	} as, ds;
+};
+struct bm_mc_result {
+	union {
+		struct {
+			u8 verb;
+			u8 __reserved1[63];
+		};
+		union {
+			struct {
+				u8 __reserved1;
+				u8 bpid;
+				u8 __reserved2[62];
+			};
+			struct bm_buffer bufs[8];
+		} acquire;
+		struct bm_pool_state query;
+	};
+} __packed;
+#define BM_MCR_VERB_VBIT		0x80
+#define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID	0x60
+#define BM_MCR_VERB_CMD_ERR_ECC		0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+/* Determine the "availability state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_AVAILABILITY(r, p)	\
+		bman_depletion_get(&r->query.as.state, p)
+/* Determine the "depletion state" of pool 'p' from a query result 'r' */
+#define BM_MCR_QUERY_DEPLETION(r, p)	\
+		bman_depletion_get(&r->query.ds.state, p)
+
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+
+/* Represents a managed portal */
+struct bman_portal;
+
+/* This object type represents BMan buffer pools. */
+struct bman_pool;
+
+struct bman_portal_config {
+	/* This is used for any "core-affine" portals, ie. default portals
+	 * associated to the corresponding cpu. -1 implies that there is no core
+	 * affinity configured. */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+#ifndef __rtems__
+	/* Is this portal shared? (If so, it has coarser locking and demuxes
+	 * processing on behalf of other CPUs.) */
+	int is_shared;
+#endif /* __rtems__ */
+	/* These are the buffer pool IDs that may be used via this portal. */
+	struct bman_depletion mask;
+};
+
+/* This callback type is used when handling pool depletion entry/exit. The
+ * 'cb_ctx' value is the opaque value associated with the pool object in
+ * bman_new_pool(). 'depleted' is non-zero on depletion-entry, and zero on
+ * depletion-exit. */
+typedef void (*bman_cb_depletion)(struct bman_portal *bm,
+			struct bman_pool *pool, void *cb_ctx, int depleted);
+
+/* This struct specifies parameters for a bman_pool object. */
+struct bman_pool_params {
+	/* index of the buffer pool to encapsulate (0-63), ignored if
+	 * BMAN_POOL_FLAG_DYNAMIC_BPID is set. */
+	u32 bpid;
+	/* bit-mask of BMAN_POOL_FLAG_*** options */
+	u32 flags;
+	/* depletion-entry/exit callback, if BMAN_POOL_FLAG_DEPLETION is set */
+	bman_cb_depletion cb;
+	/* opaque user value passed as a parameter to 'cb' */
+	void *cb_ctx;
+	/* depletion-entry/exit thresholds, if BMAN_POOL_FLAG_THRESH is set. NB:
+	 * this is only allowed if BMAN_POOL_FLAG_DYNAMIC_BPID is used *and*
+	 * when run in the control plane (which controls BMan CCSR). This array
+	 * matches the definition of bm_pool_set(). */
+	u32 thresholds[4];
+};
+
+/* Flags to bman_new_pool() */
+#define BMAN_POOL_FLAG_NO_RELEASE    0x00000001 /* can't release to pool */
+#define BMAN_POOL_FLAG_ONLY_RELEASE  0x00000002 /* can only release to pool */
+#define BMAN_POOL_FLAG_DEPLETION     0x00000004 /* track depletion entry/exit */
+#define BMAN_POOL_FLAG_DYNAMIC_BPID  0x00000008 /* (de)allocate bpid */
+#define BMAN_POOL_FLAG_THRESH	     0x00000010 /* set depletion thresholds */
+#define BMAN_POOL_FLAG_STOCKPILE     0x00000020 /* stockpile to reduce hw ops */
+
+/* Flags to bman_release() */
+#ifdef FSL_DPA_CAN_WAIT
+#define BMAN_RELEASE_FLAG_WAIT	     0x00000001 /* wait if RCR is full */
+#ifndef __rtems__
+#define BMAN_RELEASE_FLAG_WAIT_INT   0x00000002 /* if we wait, interruptible? */
+#endif /* __rtems__ */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+#define BMAN_RELEASE_FLAG_WAIT_SYNC  0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define BMAN_RELEASE_FLAG_NOW	     0x00000008 /* issue immediate release */
+
+/* Flags to bman_acquire() */
+#define BMAN_ACQUIRE_FLAG_STOCKPILE  0x00000001 /* no hw op, stockpile only */
+
+/* Portal Management */
+
+/**
+ * bman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct bman_portal_config *bman_get_portal_config(void);
+
+/**
+ * bman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of BM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The bman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 bman_irqsource_get(void);
+
+/**
+ * bman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via bman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_add(u32 bits);
+
+/**
+ * bman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of BM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via bman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU. */
+int bman_irqsource_remove(u32 bits);
+
+#ifndef __rtems__
+/**
+ * bman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *bman_affine_cpus(void);
+#endif /* __rtems__ */
+
+/**
+ * bman_poll_slow - process anything that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return -EINVAL, otherwise the return value is a bitmask of BM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ *
+ * NB, unlike the legacy wrapper bman_poll(), this function will
+ * deterministically check for the presence of portal processing work and do it,
+ * which implies some latency even if there's nothing to do. The bman_poll()
+ * wrapper on the other hand (like the qman_poll() wrapper) attenuates this by
+ * checking for (and doing) portal processing infrequently. Ie. such that
+ * qman_poll() and bman_poll() can be called from core-processing loops. Use
+ * bman_poll_slow() when you yourself are deciding when to incur the overhead of
+ * processing.
+ */
+u32 bman_poll_slow(void);
+
+/**
+ * bman_poll - process anything that isn't interrupt-driven.
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. This function does whatever processing is not triggered by
+ * interrupts. This is a legacy wrapper that can be used in core-processing
+ * loops but mitigates the performance overhead of portal processing by
+ * adaptively bypassing true portal processing most of the time. (Processing is
+ * done once every 10 calls if the previous processing revealed that work needed
+ * to be done, or once very 1000 calls if the previous processing revealed no
+ * work needed doing.) If you wish to control this yourself, call
+ * bman_poll_slow() instead, which always checks for portal processing work.
+ */
+void bman_poll(void);
+
+/**
+ * bman_rcr_is_empty - Determine if portal's RCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * releases for the local portal have been processed by BMan but can't use the
+ * BMAN_RELEASE_FLAG_WAIT_SYNC flag to do this from the final bman_release().
+ * The function forces tracking of RCR consumption (which normally doesn't
+ * happen until release processing needs to find space to put new release
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int bman_rcr_is_empty(void);
+
+/**
+ * bman_alloc_bpid_range - Allocate a contiguous range of BPIDs
+ * @result: is set by the API to the base BPID of the allocated range
+ * @count: the number of BPIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count BPIDs
+ *
+ * Returns the number of buffer pools allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * BPs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int bman_alloc_bpid(u32 *result)
+{
+	int ret = bman_alloc_bpid_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * bman_release_bpid_range - Release the specified range of buffer pool IDs
+ * @bpid: the base BPID of the range to deallocate
+ * @count: the number of BPIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of BPIDs
+ * that it can subsequently allocate from.
+ */
+void bman_release_bpid_range(u32 bpid, u32 count);
+static inline void bman_release_bpid(u32 bpid)
+{
+	bman_release_bpid_range(bpid, 1);
+}
+
+int bman_reserve_bpid_range(u32 bpid, u32 count);
+static inline int bman_reserve_bpid(u32 bpid)
+{
+	return bman_reserve_bpid_range(bpid, 1);
+}
+
+void bman_seed_bpid_range(u32 bpid, u32 count);
+
+
+int bman_shutdown_pool(u32 bpid);
+
+/* Pool management */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ * @params: parameters specifying the buffer pool ID and behaviour
+ *
+ * Creates a pool object for the given @params. A portal and the depletion
+ * callback field of @params are only used if the BMAN_POOL_FLAG_DEPLETION flag
+ * is set. NB, the fields from @params are copied into the new pool object, so
+ * the structure provided by the caller can be released or reused after the
+ * function returns.
+ */
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ *
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_params - Returns a pool object's parameters.
+ * @pool: the pool object
+ *
+ * The returned pointer refers to state within the pool object so must not be
+ * modified and can no longer be read once the pool object is destroyed.
+ */
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds the given buffers to RCR entries. If the portal @p was created with the
+ * "COMPACT" flag, then it will be using a compaction algorithm to improve
+ * utilisation of RCR. As such, these buffers may join an existing ring entry
+ * and/or it may not be issued right away so as to allow future releases to join
+ * the same ring entry. Use the BMAN_RELEASE_FLAG_NOW flag to override this
+ * behaviour by committing the RCR entry (or entries) right away. If the RCR
+ * ring is full, the function will return -EBUSY unless BMAN_RELEASE_FLAG_WAIT
+ * is selected, in which case it will sleep waiting for space to become
+ * available in RCR. If the function receives a signal before such time (and
+ * BMAN_RELEASE_FLAG_WAIT_INT is set), the function returns -EINTR. Otherwise,
+ * it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+			u32 flags);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+			u32 flags);
+
+/**
+ * bman_flush_stockpile - Flush stockpile buffer(s) to the buffer pool
+ * @pool: the buffer pool object the stockpile belongs
+ * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
+ *
+ * Adds stockpile buffers to RCR entries until the stockpile is empty.
+ * The return value will be a negative error code if a h/w error occurred.
+ * If BMAN_RELEASE_FLAG_NOW flag is passed and RCR ring is full,
+ * -EAGAIN will be returned.
+ */
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags);
+
+/**
+ * bman_query_pools - Query all buffer pool states
+ * @state: storage for the queried availability and depletion states
+ */
+int bman_query_pools(struct bm_pool_state *state);
+
+#ifdef CONFIG_FSL_BMAN
+/**
+ * bman_query_free_buffers - Query how many free buffers are in buffer pool
+ * @pool: the buffer pool object to query
+ *
+ * Return the number of the free buffers
+ */
+u32 bman_query_free_buffers(struct bman_pool *pool);
+
+/**
+ * bman_update_pool_thresholds - Change the buffer pool's depletion thresholds
+ * @pool: the buffer pool object to which the thresholds will be set
+ * @thresholds: the new thresholds
+ */
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds);
+#endif
+
+/**
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+*/
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits);
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* __FSL_BMAN_H */
diff --git a/linux/include/soc/fsl/qman.h b/linux/include/soc/fsl/qman.h
new file mode 100644
index 0000000..f63feb8
--- /dev/null
+++ b/linux/include/soc/fsl/qman.h
@@ -0,0 +1,1986 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/bitops.h>
+#include <linux/rbtree.h>
+
+/* Extra lookup is needed on 64 bit machines */
+#if (BITS_PER_LONG == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP 1
+#endif
+
+/* Enable blocking waits */
+#define FSL_DPA_CAN_WAIT       1
+#define FSL_DPA_CAN_WAIT_SYNC  1
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+#define QMAN_CHANNEL_DCE 0x8a0
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+extern u16 qm_channel_dce;
+enum qm_dc_portal {
+	qm_dc_portal_fman0 = 0,
+	qm_dc_portal_fman1 = 1,
+	qm_dc_portal_caam = 2,
+	qm_dc_portal_pme = 3,
+	qm_dc_portal_rman = 4,
+	qm_dc_portal_dce = 5
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
+#define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
+#define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
+/* This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing. */
+#define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+			 QM_PIRQ_MRI)
+
+/* --- Clock speed --- */
+/* A qman driver instance may or may not know the current qman clock speed.
+ * However, certain CEETM calculations may not be possible if this is not known.
+ * The 'set' function will only succeed (return zero) if the driver did not
+ * already know the clock speed. Likewise, the 'get' function will only succeed
+ * if the driver does know the clock speed (either because it knew when booting,
+ * or was told via 'set'). In cases where software is running on a driver
+ * instance that does not know the clock speed (eg. on a hypervised data-plane),
+ * and the user can obtain the current qman clock speed by other means (eg. from
+ * a message sent from the control-plane), then the 'set' function can be used
+ * to enable rate-calculations in a driver where it would otherwise not be
+ * possible. */
+int qm_get_clock(u64 *clock_hz);
+int qm_set_clock(u64 clock_hz);
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID. */
+#define QM_VDQCR_PRECEDENCE_VDQCR	0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
+#define QM_VDQCR_EXACT			0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
+
+
+/* ------------------------------------------------------- */
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry;	/* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry;	/* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry;	/* MR (Message Ring) entries */
+struct qm_mc_command;	/* MC (Management Command) command */
+struct qm_mc_result;	/* MC result */
+
+/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
+#define QM_FD_FORMAT_SG		0x4
+#define QM_FD_FORMAT_LONG	0x2
+#define QM_FD_FORMAT_COMPOUND	0x1
+enum qm_fd_format {
+	/* 'contig' implies a contiguous buffer, whereas 'sg' implies a
+	 * scatter-gather table. 'big' implies a 29-bit length with no offset
+	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+	 * implies a s/g-like table, where each entry itself represents a frame
+	 * (contiguous or scatter-gather) and the 29-bit "length" is
+	 * interpreted purely for congestion calculations, ie. a "congestion
+	 * weight". */
+	qm_fd_contig = 0,
+	qm_fd_contig_big = QM_FD_FORMAT_LONG,
+	qm_fd_sg = QM_FD_FORMAT_SG,
+	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+	qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG	0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG	QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG	(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND	QM_FD_FORMAT_COMPOUND
+
+/* See 1.5.1.1: "Frame Descriptor (FD)" */
+struct qm_fd {
+	union {
+		struct {
+			u8 dd:2;	/* dynamic debug */
+			u8 liodn_offset:6;
+			u8 bpid:8;	/* Buffer Pool ID */
+			u8 eliodn_offset:4;
+			u8 __reserved:4;
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		struct {
+			u64 __notaddress:24;
+			/* More efficient address accessor */
+			u64 addr:40;
+		};
+		u64 opaque_addr;
+	};
+	/* The 'format' field indicates the interpretation of the remaining 29
+	 * bits of the 32-bit word. For packing reasons, it is duplicated in the
+	 * other union elements. Note, union'd structs are difficult to use with
+	 * static initialisation under gcc, in which case use the "opaque" form
+	 * with one of the macros. */
+	union {
+		/* For easier/faster copying of this part of the fd (eg. from a
+		 * DQRR entry to an EQCR entry) copy 'opaque' */
+		u32 opaque;
+		/* If 'format' is _contig or _sg, 20b length and 9b offset */
+		struct {
+			enum qm_fd_format format:3;
+			u16 offset:9;
+			u32 length20:20;
+		};
+		/* If 'format' is _contig_big or _sg_big, 29b length */
+		struct {
+			enum qm_fd_format _format1:3;
+			u32 length29:29;
+		};
+		/* If 'format' is _compound, 29b "congestion weight" */
+		struct {
+			enum qm_fd_format _format2:3;
+			u32 cong_weight:29;
+		};
+	};
+	union {
+		u32 cmd;
+		u32 status;
+	};
+} __aligned(8);
+#define QM_FD_DD_NULL		0x00
+#define QM_FD_PID_MASK		0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+	return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+	return (dma_addr_t)fd->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+	do { \
+		struct qm_fd *__fd931 = (fd); \
+		__fd931->addr = v; \
+	} while (0)
+
+/* For static initialisation of FDs (which is complicated by the use of unions
+ * in "struct qm_fd"), use the following macros. Note that;
+ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
+ *   use-case),
+ * - use capitalised QM_FD_*** formats for static initialisation.
+ */
+#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
+	{ 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+	{ (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
+	{ cmd } }
+#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
+	{ 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+	{ (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
+	{ cmd } }
+
+/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
+struct qm_sg_entry {
+	union {
+		struct {
+			u8 __reserved1[3];
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		struct {
+			u64 __notaddress:24;
+			u64 addr:40;
+		};
+	};
+	u32 extension:1;	/* Extension bit */
+	u32 final:1;		/* Final bit */
+	u32 length:30;
+	u8 __reserved2;
+	u8 bpid;
+	u16 __reserved3:3;
+	u16 offset:13;
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+	return sg->addr;
+}
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+	return (dma_addr_t)sg->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+	do { \
+		struct qm_sg_entry *__sg931 = (sg); \
+		__sg931->addr = v; \
+	} while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct qm_eqcr_entry {
+	u8 __dont_write_directly__verb;
+	u8 dca;
+	u16 seqnum;
+	u32 orp;	/* 24-bit */
+	u32 fqid;	/* 24-bit */
+	u32 tag;
+	struct qm_fd fd;
+	u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT		0x80
+#define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+#define QM_EQCR_VERB_COLOUR_MASK	0x18	/* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN	0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW	0x08
+#define QM_EQCR_VERB_COLOUR_RED		0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE	0x18
+#define QM_EQCR_VERB_INTERRUPT		0x04	/* on command consumption */
+#define QM_EQCR_VERB_ORP		0x02	/* enable order restoration */
+#define QM_EQCR_DCA_ENABLE		0x80
+#define QM_EQCR_DCA_PARK		0x40
+#define QM_EQCR_DCA_IDXMASK		0x0f	/* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN		0x8000	/* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS		0x4000	/* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK		0x3fff	/* sequence number goes here */
+#define QM_EQCR_FQID_NULL		0	/* eg. for an ORP seqnum hole */
+
+/* See 1.5.8.2: "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+	u8 verb;
+	u8 stat;
+	u16 seqnum;	/* 15-bit */
+	u8 tok;
+	u8 __reserved2[3];
+	u32 fqid;	/* 24-bit */
+	u32 contextB;
+	struct qm_fd fd;
+	u8 __reserved4[32];
+};
+#define QM_DQRR_VERB_VBIT		0x80
+#define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
+
+/* See 1.5.8.3: "ERN Message Response" */
+/* See 1.5.8.4: "FQ State Change Notification" */
+struct qm_mr_entry {
+	u8 verb;
+	union {
+		struct {
+			u8 dca;
+			u16 seqnum;
+			u8 rc;		/* Rejection Code */
+			u32 orp:24;
+			u32 fqid;	/* 24-bit */
+			u32 tag;
+			struct qm_fd fd;
+		} __packed ern;
+		struct {
+			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
+			u8 __reserved1:3;
+			enum qm_dc_portal portal:3;
+			u16 __reserved2;
+			u8 rc;		/* Rejection Code */
+			u32 __reserved3:24;
+			u32 fqid;	/* 24-bit */
+			u32 tag;
+			struct qm_fd fd;
+		} __packed dcern;
+		struct {
+			u8 fqs;		/* Frame Queue Status */
+			u8 __reserved1[6];
+			u32 fqid;	/* 24-bit */
+			u32 contextB;
+			u8 __reserved2[16];
+		} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
+	};
+	u8 __reserved2[32];
+} __packed;
+#define QM_MR_VERB_VBIT			0x80
+/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
+ * originating from direct-connect portals ("dcern") use 0x20 as a verb which
+ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
+ * the other MR types by noting if the 0x20 bit is unset. */
+#define QM_MR_VERB_TYPE_MASK		0x27
+#define QM_MR_VERB_DC_ERN		0x20
+#define QM_MR_VERB_FQRN			0x21
+#define QM_MR_VERB_FQRNI		0x22
+#define QM_MR_VERB_FQRL			0x23
+#define QM_MR_VERB_FQPN			0x24
+#define QM_MR_RC_MASK			0xf0	/* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP		0x00
+#define QM_MR_RC_WRED			0x10
+#define QM_MR_RC_ERROR			0x20
+#define QM_MR_RC_ORPWINDOW_EARLY	0x30
+#define QM_MR_RC_ORPWINDOW_LATE		0x40
+#define QM_MR_RC_FQ_TAILDROP		0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED	0x60
+#define QM_MR_RC_ORP_ZERO		0x70
+#define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN	0x00
+#define QM_MR_DCERN_COLOUR_YELLOW	0x01
+#define QM_MR_DCERN_COLOUR_RED		0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE	0x03
+
+/* An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation. */
+struct qm_fqd_stashing {
+	/* See QM_STASHING_EXCL_<...> */
+	u8 exclusive;
+	u8 __reserved1:2;
+	/* Numbers of cachelines */
+	u8 annotation_cl:2;
+	u8 data_cl:2;
+	u8 context_cl:2;
+} __packed;
+struct qm_fqd_taildrop {
+	u16 __reserved1:3;
+	u16 mant:8;
+	u16 exp:5;
+} __packed;
+struct qm_fqd_oac {
+	/* See QM_OAC_<...> */
+	u8 oac:2; /* "Overhead Accounting Control" */
+	u8 __reserved1:6;
+	/* Two's-complement value (-128 to +127) */
+	signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+	union {
+		u8 orpc;
+		struct {
+			u8 __reserved1:2;
+			u8 orprws:3;
+			u8 oa:1;
+			u8 olws:2;
+		} __packed;
+	};
+	u8 cgid;
+	u16 fq_ctrl;	/* See QM_FQCTRL_<...> */
+	union {
+		u16 dest_wq;
+		struct {
+			u16 channel:13; /* qm_channel */
+			u16 wq:3;
+		} __packed dest;
+	};
+	u16 __reserved2:1;
+	u16 ics_cred:15;
+	/* For "Initialize Frame Queue" commands, the write-enable mask
+	 * determines whether 'td' or 'oac_init' is observed. For query
+	 * commands, this field is always 'td', and 'oac_query' (below) reflects
+	 * the Overhead ACcounting values. */
+	union {
+		struct qm_fqd_taildrop td;
+		struct qm_fqd_oac oac_init;
+	};
+	u32 context_b;
+	union {
+		/* Treat it as 64-bit opaque */
+		u64 opaque;
+		struct {
+			u32 hi;
+			u32 lo;
+		};
+		/* Treat it as s/w portal stashing config */
+		/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+		struct {
+			struct qm_fqd_stashing stashing;
+			/* 48-bit address of FQ context to
+			 * stash, must be cacheline-aligned */
+			u16 context_hi;
+			u32 context_lo;
+		} __packed;
+	} context_a;
+	struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+	return ((u64)fqd->context_a.context_hi << 32) |
+		(u64)fqd->context_a.context_lo;
+}
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+	return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+	return ((u64)fqd->context_a.hi << 32) |
+		(u64)fqd->context_a.lo;
+}
+/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
+#define qm_fqd_stashing_set64(fqd, v) \
+	do { \
+		struct qm_fqd *__fqd931 = (fqd); \
+		__fqd931->context_a.context_hi = upper_32_bits(v); \
+		__fqd931->context_a.context_lo = lower_32_bits(v); \
+	} while (0)
+#define qm_fqd_context_a_set64(fqd, v) \
+	do { \
+		struct qm_fqd *__fqd931 = (fqd); \
+		__fqd931->context_a.hi = upper_32_bits(v); \
+		__fqd931->context_a.lo = lower_32_bits(v); \
+	} while (0)
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	if (val > 0xe0000000)
+		return -ERANGE;
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	td->exp = e;
+	td->mant = val;
+	return 0;
+}
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+	return (u32)td->mant << td->exp;
+}
+
+/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
+#define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
+#define QM_FQCTRL_ORP		0x0100	/* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION	0x04
+#define QM_STASHING_EXCL_DATA		0x02
+#define QM_STASHING_EXCL_CTX		0x01
+
+/* See 1.5.5.3: "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
+
+/* See 1.5.8.4: "FQ State Change Notification" */
+/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ *   MaxTH = MA * (2 ^ Mn)
+ *   Slope = SA / (2 ^ Sn)
+ *    MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+	union {
+		u32 word;
+		struct {
+			u32 MA:8;
+			u32 Mn:5;
+			u32 SA:7; /* must be between 64-127 */
+			u32 Sn:6;
+			u32 Pn:6;
+		} __packed;
+	};
+} __packed;
+/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ *   CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+	u16 __reserved:3;
+	u16 TA:8;
+	u16 Tn:5;
+} __packed;
+/* This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct. */
+struct __qm_mc_cgr {
+	struct qm_cgr_wr_parm wr_parm_g;
+	struct qm_cgr_wr_parm wr_parm_y;
+	struct qm_cgr_wr_parm wr_parm_r;
+	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
+	u8 cscn_en;	/* boolean, use QM_CGR_EN */
+	union {
+		struct {
+			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+			u16 cscn_targ_dcp_low;	/* CSCN_TARG_DCP low-16bits */
+		};
+		u32 cscn_targ;	/* use QM_CGR_TARG_* */
+	};
+	u8 cstd_en;	/* boolean, use QM_CGR_EN */
+	u8 cs;		/* boolean, only used in query response */
+	struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+	return (u64)th->TA << th->Tn;
+}
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	th->Tn = e;
+	th->TA = val;
+	return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+	u8 __reserved1;
+	u16 we_mask;	/* Write Enable Mask */
+	u32 fqid;	/* 24-bit */
+	u16 count;	/* Initialises 'count+1' FQDs */
+	struct qm_fqd fqd; /* the FQD fields go here */
+	u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2;
+	u8 count;	/* number of consecutive FQID */
+	u8 __reserved3[10];
+	u32 context_b;	/* frame queue context b */
+	u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+	u8 __reserved1;
+	u16 we_mask;	/* Write Enable Mask */
+	struct __qm_mc_cgr cgr;	/* CGR fields */
+	u8 __reserved2[2];
+	u8 cgid;
+	u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+	u8 __reserved1[2];
+	u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u8 __reserved2[23];
+	u8 cgid;
+	u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+	u8 __reserved1[30];
+	u8 cgid;
+	u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+	u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+	u8 __reserved;
+	/* select channel if verb != QUERYWQ_DEDICATED */
+	union {
+		u16 channel_wq; /* ignores wq (3 lsbits) */
+		struct {
+			u16 id:13; /* qm_channel */
+			u16 __reserved1:3;
+		} __packed channel;
+	};
+	u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+	u8 __dont_write_directly__verb;
+	union {
+		struct qm_mcc_initfq initfq;
+		struct qm_mcc_queryfq queryfq;
+		struct qm_mcc_queryfq_np queryfq_np;
+		struct qm_mcc_alterfq alterfq;
+		struct qm_mcc_initcgr initcgr;
+		struct qm_mcc_cgrtestwrite cgrtestwrite;
+		struct qm_mcc_querycgr querycgr;
+		struct qm_mcc_querycongestion querycongestion;
+		struct qm_mcc_querywq querywq;
+	};
+} __packed;
+#define QM_MCC_VERB_VBIT		0x80
+#define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED	0x40
+#define QM_MCC_VERB_INITFQ_SCHED	0x41
+#define QM_MCC_VERB_QUERYFQ		0x44
+#define QM_MCC_VERB_QUERYFQ_NP		0x45	/* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ		0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED	0x47
+#define QM_MCC_VERB_ALTER_SCHED		0x48	/* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE		0x49	/* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE	0x4a	/* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS		0x4b	/* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON		0x4d	/* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF	0x4e	/* FQ XOFF */
+#define QM_MCC_VERB_INITCGR		0x50
+#define QM_MCC_VERB_MODIFYCGR		0x51
+#define QM_MCC_VERB_CGRTESTWRITE	0x52
+#define QM_MCC_VERB_QUERYCGR		0x58
+#define QM_MCC_VERB_QUERYCONGESTION	0x59
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC		0x0100
+#define QM_INITFQ_WE_ORPC		0x0080
+#define QM_INITFQ_WE_CGID		0x0040
+#define QM_INITFQ_WE_FQCTRL		0x0020
+#define QM_INITFQ_WE_DESTWQ		0x0010
+#define QM_INITFQ_WE_ICSCRED		0x0008
+#define QM_INITFQ_WE_TDTHRESH		0x0004
+#define QM_INITFQ_WE_CONTEXTB		0x0002
+#define QM_INITFQ_WE_CONTEXTA		0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G		0x0400
+#define QM_CGR_WE_WR_PARM_Y		0x0200
+#define QM_CGR_WE_WR_PARM_R		0x0100
+#define QM_CGR_WE_WR_EN_G		0x0080
+#define QM_CGR_WE_WR_EN_Y		0x0040
+#define QM_CGR_WE_WR_EN_R		0x0020
+#define QM_CGR_WE_CSCN_EN		0x0010
+#define QM_CGR_WE_CSCN_TARG		0x0008
+#define QM_CGR_WE_CSTD_EN		0x0004
+#define QM_CGR_WE_CS_THRES		0x0002
+#define QM_CGR_WE_MODE			0x0001
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcr_initfq {
+	u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+	u8 __reserved1[8];
+	struct qm_fqd fqd;	/* the FQD fields are here */
+	u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+	u8 __reserved1;
+	u8 state;	/* QM_MCR_NP_STATE_*** */
+	u8 __reserved2;
+	u32 fqd_link:24;
+	u16 __reserved3:2;
+	u16 odp_seq:14;
+	u16 __reserved4:2;
+	u16 orp_nesn:14;
+	u16 __reserved5:1;
+	u16 orp_ea_hseq:15;
+	u16 __reserved6:1;
+	u16 orp_ea_tseq:15;
+	u8 __reserved7;
+	u32 orp_ea_hptr:24;
+	u8 __reserved8;
+	u32 orp_ea_tptr:24;
+	u8 __reserved9;
+	u32 pfdr_hptr:24;
+	u8 __reserved10;
+	u32 pfdr_tptr:24;
+	u8 __reserved11[5];
+	u8 __reserved12:7;
+	u8 is:1;
+	u16 ics_surp;
+	u32 byte_cnt;
+	u8 __reserved13;
+	u32 frm_cnt:24;
+	u32 __reserved14;
+	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
+	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
+	u16 __reserved15;
+	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
+	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
+	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
+} __packed;
+struct qm_mcr_alterfq {
+	u8 fqs;		/* Frame Queue Status */
+	u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+	u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[3];
+	u32 __reserved3:24;
+	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 __reserved4:24;
+	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	u16 lgt;	/* Last Group Tick */
+	u16 wr_prob_g;
+	u16 wr_prob_y;
+	u16 wr_prob_r;
+	u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[3];
+	u32 __reserved3:24;
+	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 __reserved4:24;
+	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	union {
+		u32 cscn_targ_swp[4];
+		u8 __reserved5[16];
+	};
+} __packed;
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+static inline u64 qm_mcr_cgrtestwrite_i_get64(
+					const struct qm_mcr_cgrtestwrite *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_cgrtestwrite_a_get64(
+					const struct qm_mcr_cgrtestwrite *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_mcr_querycgr_i_set64(q, v) \
+	do { \
+		struct qm_mcr_querycgr *__q931 = (fd); \
+		__q931->i_bcnt_hi = upper_32_bits(v); \
+		__q931->i_bcnt_lo = lower_32_bits(v); \
+	} while (0)
+#define qm_mcr_querycgr_a_set64(q, v) \
+	do { \
+		struct qm_mcr_querycgr *__q931 = (fd); \
+		__q931->a_bcnt_hi = upper_32_bits(v); \
+		__q931->a_bcnt_lo = lower_32_bits(v); \
+	} while (0)
+struct __qm_mcr_querycongestion {
+	u32 __state[8];
+};
+struct qm_mcr_querycongestion {
+	u8 __reserved[30];
+	/* Access this struct using QM_MCR_QUERYCONGESTION() */
+	struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+	union {
+		u16 channel_wq; /* ignores wq (3 lsbits) */
+		struct {
+			u16 id:13; /* qm_channel */
+			u16 __reserved:3;
+		} __packed channel;
+	};
+	u8 __reserved[28];
+	u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+	u8 verb;
+	u8 result;
+	union {
+		struct qm_mcr_initfq initfq;
+		struct qm_mcr_queryfq queryfq;
+		struct qm_mcr_queryfq_np queryfq_np;
+		struct qm_mcr_alterfq alterfq;
+		struct qm_mcr_initcgr initcgr;
+		struct qm_mcr_cgrtestwrite cgrtestwrite;
+		struct qm_mcr_querycgr querycgr;
+		struct qm_mcr_querycongestion querycongestion;
+		struct qm_mcr_querywq querywq;
+	};
+} __packed;
+
+#define QM_MCR_VERB_RRID		0x80
+#define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL		0x00
+#define QM_MCR_RESULT_OK		0xf0
+#define QM_MCR_RESULT_ERR_FQID		0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE	0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
+#define QM_MCR_RESULT_PENDING		0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
+#define QM_MCR_NP_STATE_FE		0x10
+#define QM_MCR_NP_STATE_R		0x08
+#define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS		0x00
+#define QM_MCR_NP_STATE_RETIRED		0x01
+#define QM_MCR_NP_STATE_TEN_SCHED	0x02
+#define QM_MCR_NP_STATE_TRU_SCHED	0x03
+#define QM_MCR_NP_STATE_PARKED		0x04
+#define QM_MCR_NP_STATE_ACTIVE		0x05
+#define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ *   u8 cgr = [...];
+ *   struct qm_mc_result *res = [...];
+ *   printf("congestion group %d congestion state: %d\n", cgr,
+ *	 QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num)		(num >> 5)
+#define __CGR_SHIFT(num)	(num & 0x1f)
+#define __CGR_NUM		(sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+					u8 cgr)
+{
+	return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+
+/*********************/
+/* Utility interface */
+/*********************/
+
+/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
+ * spinlock them yourself if needed. */
+struct qman_fqid_pool;
+
+/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
+ * always succeeds, but returns non-zero if there were "leaked" FQID
+ * allocations. */
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
+/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
+
+/*******************************************************************/
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+/*******************************************************************/
+
+	/* Portal and Frame Queues */
+	/* ----------------------- */
+/* Represents a managed portal */
+struct qman_portal;
+
+/* This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down. */
+struct qman_fq;
+
+/* This object type represents a QMan congestion group, it is defined further
+ * down. */
+struct qman_cgr;
+
+struct qman_portal_config {
+	/* If the caller enables DQRR stashing (and thus wishes to operate the
+	 * portal from only one cpu), this is the logical CPU that the portal
+	 * will stash to. Whether stashing is enabled or not, this setting is
+	 * also used for any "core-affine" portals, ie. default portals
+	 * associated to the corresponding cpu. -1 implies that there is no core
+	 * affinity configured. */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+#ifndef __rtems__
+	/* Is this portal shared? (If so, it has coarser locking and demuxes
+	 * processing on behalf of other CPUs.) */
+	int is_shared;
+#endif /* __rtems__ */
+	/* The portal's dedicated channel id, use this value for initialising
+	 * frame queues to target this portal when scheduled. */
+	u16 channel;
+	/* A mask of which pool channels this portal has dequeue access to
+	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
+	u32 pools;
+};
+
+/* This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
+enum qman_cb_dqrr_result {
+	/* DQRR entry can be consumed */
+	qman_cb_dqrr_consume,
+	/* Like _consume, but requests parking - FQ must be held-active */
+	qman_cb_dqrr_park,
+	/* Does not consume, for DCA mode only. This allows out-of-order
+	 * consumes by explicit calls to qman_dca() and/or the use of implicit
+	 * DCA via EQCR entries. */
+	qman_cb_dqrr_defer,
+	/* Stop processing without consuming this ring entry. Exits the current
+	 * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
+	 * interrupt handler, the callback would typically call
+	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+	 * otherwise the interrupt will reassert immediately. */
+	qman_cb_dqrr_stop,
+	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
+	qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr);
+
+/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns. */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+				const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+				const struct qm_mr_entry *msg);
+
+/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked". */
+enum qman_fq_state {
+	qman_fq_state_oos,
+	qman_fq_state_parked,
+	qman_fq_state_sched,
+	qman_fq_state_retired
+};
+
+/* Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ *     // myfq is allocated and driver_fq callbacks filled in;
+ *     struct my_fq {
+ *	   struct qman_fq base;
+ *	   int an_extra_field;
+ *	   [ ... add other fields to be associated with each FQ ...]
+ *     } *myfq = some_my_fq_allocator();
+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;
+ *     struct my_fq *myfq = (struct my_fq *)fq;
+ *     do_something_with(myfq->an_extra_field);
+ *     [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ *     many cachelines are required to stash 'struct my_fq', to accelerate not
+ *     only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+	qman_cb_dqrr dqrr;	/* for dequeued frames */
+	qman_cb_mr ern;		/* for s/w ERNs */
+	qman_cb_mr fqs;		/* frame-queue state changes*/
+};
+
+struct qman_fq {
+	/* Caller of qman_create_fq() provides these demux callbacks */
+	struct qman_fq_cb cb;
+	/* These are internal to the driver, don't touch. In particular, they
+	 * may change, be removed, or extended (so you shouldn't rely on
+	 * sizeof(qman_fq) being a constant). */
+	spinlock_t fqlock;
+	u32 fqid;
+	volatile unsigned long flags;
+	enum qman_fq_state state;
+	int cgr_groupid;
+	struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	u32 key;
+#endif
+};
+
+/* This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+			struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+	/* Set these prior to qman_create_cgr() */
+	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+	qman_cb_cgr cb;
+	/* These are private to the driver */
+	u16 chan; /* portal channel this object is created on */
+	struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE	     0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY	     0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED	     0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS	     0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED	     0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING	     0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE	     0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL	     0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS	     0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN	     0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR	     0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED	     0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL	     0x00000004 /* set dest portal */
+
+/* Flags to qman_volatile_dequeue() */
+#ifdef FSL_DPA_CAN_WAIT
+#define QMAN_VOLATILE_FLAG_WAIT	     0x00000001 /* wait if VDQCR is in use */
+#ifndef __rtems__
+#define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
+#endif /* __rtems__ */
+#define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
+#endif
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.) */
+#ifdef FSL_DPA_CAN_WAIT
+#define QMAN_ENQUEUE_FLAG_WAIT	     0x00010000 /* wait if EQCR is full */
+#ifndef __rtems__
+#define QMAN_ENQUEUE_FLAG_WAIT_INT   0x00020000 /* if wait, interruptible? */
+#endif /* __rtems__ */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+#define QMAN_ENQUEUE_FLAG_WAIT_SYNC  0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR  0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA	     0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK   0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p)		/* If DCA, p is DQRR entry */ \
+		(((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN    0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW   0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED	     0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ *   of a frame. */
+#define QMAN_ENQUEUE_FLAG_NLIS	     0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ *   would otherwise block it (eg. if a frame has been dropped). */
+#define QMAN_ENQUEUE_FLAG_HOLE	     0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ *   number. */
+#define QMAN_ENQUEUE_FLAG_NESN	     0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT	     0x00000001
+#define QMAN_CGR_MODE_FRAME	     0x00000001
+
+	/* Portal Management */
+	/* ----------------- */
+/**
+ * qman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct qman_portal_config *qman_get_portal_config(void);
+
+/**
+ * qman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The qman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 qman_irqsource_get(void);
+
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
+#ifndef __rtems__
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+#endif /* __rtems__ */
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ */
+void *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ */
+u32 qman_poll_slow(void);
+
+/**
+ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done. Applications can tune this
+ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
+ * rather than going via this wrapper.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(void);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by QMan but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to QMan (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+	/* FQ management */
+	/* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ *     initialised to a value used by the driver for demux.
+ *   - if context_b is initialised for demux, so is context_a in case stashing
+ *     is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ *		to this software portal. Otherwise, query length of WQs in a
+ *		channel	 specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ *	to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when QMan consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, QMan will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until QMan consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+typedef int (*qman_cb_precommit) (void *arg);
+/**
+ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @cb: user supplied callback function to invoke before writing commit verb.
+ * @cb_arg: callback function argument
+ *
+ * This is similar to qman_enqueue except that it will invoke a user supplied
+ * callback function just before writng the commit verb. This is useful
+ * when the user want to do something *just before* enqueuing the request and
+ * the enqueue can't fail.
+ */
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+		u32 flags, qman_cb_precommit cb, void *cb_arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+			struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+	int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, u32 count);
+static inline void qman_release_fqid(u32 fqid)
+{
+	qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, u32 count);
+
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, u32 count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+	return qman_reserve_fqid_range(fqid, 1);
+}
+
+	/* Pool-channel management */
+	/* ----------------------- */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+	int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, u32 count);
+static inline void qman_release_pool(u32 id)
+{
+	qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, u32 count);
+static inline int qman_reserve_pool(u32 id)
+{
+	return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, u32 count);
+
+	/* CGR management */
+	/* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+				struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts);
+
+/**
+* qman_query_cgr - Queries CGR fields
+* @cgr: the 'cgr' object to query
+* @result: storage for the queried congestion group record
+*/
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+	int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, u32 count);
+static inline void qman_release_cgrid(u32 id)
+{
+	qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, u32 count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+	return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, u32 count);
+
+
+	/* Helpers */
+	/* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ *     qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+	struct qm_mcr_queryfq_np np;
+	int err;
+
+	err = qman_query_fq_np(fq, &np);
+	if (err)
+		return err;
+	if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+		return 1;
+	return 0;
+}
+
+/**
+ * qman_set_wpm - Set waterfall power management
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_set_wpm(int wpm_enable);
+
+/**
+ * qman_get_swp - Query the waterfall power management setting
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_get_wpm(int *wpm_enable);
+
+/* The below qman_p_***() variants might be called in a migration situation
+ * (e.g. cpu hotplug). They are used to continue accessing the portal that
+ * execution was affine to prior to migration.
+ * @qman_portal specifies which portal the APIs will use.
+*/
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
+									 *p);
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+u32 qman_p_poll_slow(struct qman_portal *p);
+void qman_p_poll(struct qman_portal *p);
+void qman_p_stop_dequeues(struct qman_portal *p);
+void qman_p_start_dequeues(struct qman_portal *p);
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
+u32 qman_p_static_dequeue_get(struct qman_portal *p);
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+						int park_request);
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+				u32 flags __maybe_unused, u32 vdqcr);
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+					const struct qm_fd *fd, u32 flags);
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				struct qman_fq *orp, u16 orp_seqnum);
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				qman_cb_precommit cb, void *cb_arg);
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* __FSL_QMAN_H */
diff --git a/rtemsbsd/include/bsp/nexus-devices.h b/rtemsbsd/include/bsp/nexus-devices.h
index 082363a..59e9d7f 100644
--- a/rtemsbsd/include/bsp/nexus-devices.h
+++ b/rtemsbsd/include/bsp/nexus-devices.h
@@ -138,7 +138,18 @@ RTEMS_BSD_DRIVER_REPHY;
 
 #elif defined(LIBBSP_POWERPC_QORIQ_BSP_H)
 
-#if !QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT)
+#if QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT)
+
+#include <bsp/irq.h>
+
+RTEMS_BSD_DEFINE_NEXUS_DEVICE(fman, 0, 0, NULL);
+RTEMS_BSD_DEFINE_NEXUS_DEVICE(fman, 1, 0, NULL);
+
+SYSINIT_DRIVER_REFERENCE(fman_mac, fman);
+SYSINIT_DRIVER_REFERENCE(fman_port, fman_mac);
+SYSINIT_DRIVER_REFERENCE(ukphy, miibus);
+
+#else /* QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT) */
 
 #include <bsp/irq.h>
 
@@ -147,7 +158,7 @@ SYSINIT_DRIVER_REFERENCE(simplebus, ofwbus);
 SYSINIT_DRIVER_REFERENCE(tsec, simplebus);
 SYSINIT_DRIVER_REFERENCE(ukphy, miibus);
 
-#endif /* !QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT) */
+#endif /* QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT) */
 
 #endif
 
diff --git a/rtemsbsd/include/rtems/bsd/local/opt_dpaa.h b/rtemsbsd/include/rtems/bsd/local/opt_dpaa.h
new file mode 100644
index 0000000..2543a5d
--- /dev/null
+++ b/rtemsbsd/include/rtems/bsd/local/opt_dpaa.h
@@ -0,0 +1,12 @@
+#undef __ppc_generic
+#define __ppc_generic
+#define KBUILD_MODNAME "dpaa"
+#define CONFIG_FSL_BMAN
+#define CONFIG_FSL_BMAN_PORTAL
+#define CONFIG_FSL_BMAN_TEST_API
+#define CONFIG_FSL_BMAN_TEST_THRESH
+#define CONFIG_FSL_QMAN
+#define CONFIG_FSL_QMAN_CONFIG
+#define CONFIG_FSL_QMAN_PORTAL
+#define CONFIG_FSL_QMAN_TEST_API
+#define CONFIG_FSL_QMAN_TEST_STASH
diff --git a/rtemsbsd/powerpc/include/asm/atomic.h b/rtemsbsd/powerpc/include/asm/atomic.h
new file mode 100644
index 0000000..0db2756
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/atomic.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_ASM_ATOMIC_H_
+#define	_ASM_ATOMIC_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <machine/atomic.h>
+
+typedef struct {
+	volatile u_int counter;
+} atomic_t;
+
+#define	ATOMIC_INIT(v) { (v) }
+
+#define	atomic_add(i, v)		atomic_add_return((i), (v))
+#define	atomic_sub(i, v)		atomic_sub_return((i), (v))
+#define	atomic_inc_return(v)		atomic_add_return(1, (v))
+#define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
+#define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
+#define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
+#define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
+#define atomic_dec_return(v)             atomic_sub_return(1, (v))
+
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+	return i + atomic_fetchadd_int(&v->counter, i);
+}
+
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+	return atomic_fetchadd_int(&v->counter, -i) - i;
+}
+
+static inline void
+atomic_set(atomic_t *v, int i)
+{
+	atomic_store_rel_int(&v->counter, i);
+}
+
+static inline int
+atomic_read(atomic_t *v)
+{
+	return atomic_load_acq_int(&v->counter);
+}
+
+static inline int
+atomic_inc(atomic_t *v)
+{
+	return atomic_fetchadd_int(&v->counter, 1) + 1;
+}
+
+static inline int
+atomic_dec(atomic_t *v)
+{
+	return atomic_fetchadd_int(&v->counter, -1) - 1;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+        int c, old;
+        c = atomic_read(v);
+        for (;;) {
+                if (unlikely(c == (u)))
+                        break;
+                old = atomic_cmpset_int(&v->counter, c, c + (a));
+                if (likely(old == c))
+                        break;
+                c = old;
+        }
+        return c != (u);
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+
+
+
+#endif	/* _ASM_ATOMIC_H_ */
diff --git a/rtemsbsd/powerpc/include/asm/byteorder.h b/rtemsbsd/powerpc/include/asm/byteorder.h
new file mode 100644
index 0000000..7168e49
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/byteorder.h
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_ASM_BYTEORDER_H_
+#define	_ASM_BYTEORDER_H_
+
+#include <sys/types.h>
+#include <sys/endian.h>
+#include <asm/types.h>
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define	__LITTLE_ENDIAN
+#else
+#define	__BIG_ENDIAN
+#endif
+
+#define	cpu_to_le64	htole64
+#define	le64_to_cpu	le64toh
+#define	cpu_to_le32	htole32
+#define	le32_to_cpu	le32toh
+#define	cpu_to_le16	htole16
+#define	le16_to_cpu	le16toh
+#define	cpu_to_be64	htobe64
+#define	be64_to_cpu	be64toh
+#define	cpu_to_be32	htobe32
+#define	be32_to_cpu	be32toh
+#define	cpu_to_be16	htobe16
+#define	be16_to_cpu	be16toh
+#define	__be16_to_cpu	be16toh
+
+#define	cpu_to_le64p(x)	htole64(*((uint64_t *)x))
+#define	le64_to_cpup(x)	le64toh(*((uint64_t *)x))
+#define	cpu_to_le32p(x)	htole32(*((uint32_t *)x))
+#define	le32_to_cpup(x)	le32toh(*((uint32_t *)x))
+#define	cpu_to_le16p(x)	htole16(*((uint16_t *)x))
+#define	le16_to_cpup(x)	le16toh(*((uint16_t *)x))
+#define	cpu_to_be64p(x)	htobe64(*((uint64_t *)x))
+#define	be64_to_cpup(x)	be64toh(*((uint64_t *)x))
+#define	cpu_to_be32p(x)	htobe32(*((uint32_t *)x))
+#define	be32_to_cpup(x)	be32toh(*((uint32_t *)x))
+#define	cpu_to_be16p(x)	htobe16(*((uint16_t *)x))
+#define	be16_to_cpup(x)	be16toh(*((uint16_t *)x))
+
+#define	cpu_to_le64s(x)	do { *((uint64_t *)x) = cpu_to_le64p((x)) } while (0)
+#define	le64_to_cpus(x)	do { *((uint64_t *)x) = le64_to_cpup((x)) } while (0)
+#define	cpu_to_le32s(x)	do { *((uint32_t *)x) = cpu_to_le32p((x)) } while (0)
+#define	le32_to_cpus(x)	do { *((uint32_t *)x) = le32_to_cpup((x)) } while (0)
+#define	cpu_to_le16s(x)	do { *((uint16_t *)x) = cpu_to_le16p((x)) } while (0)
+#define	le16_to_cpus(x)	do { *((uint16_t *)x) = le16_to_cpup((x)) } while (0)
+#define	cpu_to_be64s(x)	do { *((uint64_t *)x) = cpu_to_be64p((x)) } while (0)
+#define	be64_to_cpus(x)	do { *((uint64_t *)x) = be64_to_cpup((x)) } while (0)
+#define	cpu_to_be32s(x)	do { *((uint32_t *)x) = cpu_to_be32p((x)) } while (0)
+#define	be32_to_cpus(x)	do { *((uint32_t *)x) = be32_to_cpup((x)) } while (0)
+#define	cpu_to_be16s(x)	do { *((uint16_t *)x) = cpu_to_be16p((x)) } while (0)
+#define	be16_to_cpus(x)	do { *((uint16_t *)x) = be16_to_cpup((x)) } while (0)
+
+#define	swab16	bswap16
+#define	swab32	bswap32
+#define	swab64	bswap64
+
+static inline void
+be16_add_cpu(u16 *var, u16 val)
+{ 
+	*var = cpu_to_be16(be16_to_cpu(*var) + val);
+}
+
+#endif	/* _ASM_BYTEORDER_H_ */
diff --git a/rtemsbsd/powerpc/include/asm/cache.h b/rtemsbsd/powerpc/include/asm/cache.h
new file mode 100644
index 0000000..9dd32cb
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/cache.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ASM_CACHE_H
+#define __ASM_CACHE_H
+
+#include <rtems/score/cpu.h>
+
+#ifdef PPC_DEFAULT_CACHE_LINE_SIZE
+#define __cacheline_aligned __attribute__((__aligned__(PPC_DEFAULT_CACHE_LINE_SIZE)))
+#define ____cacheline_aligned __cacheline_aligned
+#endif
+
+#endif /* __ASM_CACHE_H */
diff --git a/rtemsbsd/powerpc/include/asm/cacheflush.h b/rtemsbsd/powerpc/include/asm/cacheflush.h
new file mode 100644
index 0000000..5c5bcef
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/cacheflush.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_CACHEFLUSH_H
+#define __ASM_CACHEFLUSH_H
+
+#include <asm/cache.h>
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/rtemsbsd/powerpc/include/asm/fsl_pamu_stash.h b/rtemsbsd/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/asm/mpc85xx.h b/rtemsbsd/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 0000000..5490b2d
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_MPC85XX_H
+#define	_ASM_MPC85XX_H
+
+#include <libcpu/powerpc-utility.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define	SVR_SOC_VER(svr) (((svr) >> 8) & 0xfff7ff)
+#define	SVR_REV(svr)	((svr) & 0xff)
+#define	SVR_MAJ(svr)	(((svr) >> 4) & 0xf)
+#define	SVR_MIN(svr)	((svr) & 0xf)
+
+#define	SVR_B4860	0X868000
+
+#define	SPRN_ATBL	FSL_EIS_ATBL
+#define	SPRN_ATBU	FSL_EIS_ATBU
+#define	SPRN_SVR	FSL_EIS_SVR
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _ASM_MPC85XX_H */
diff --git a/rtemsbsd/powerpc/include/asm/pgtable.h b/rtemsbsd/powerpc/include/asm/pgtable.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/asm/types.h b/rtemsbsd/powerpc/include/asm/types.h
new file mode 100644
index 0000000..fb2fd56
--- /dev/null
+++ b/rtemsbsd/powerpc/include/asm/types.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_ASM_TYPES_H_
+#define	_ASM_TYPES_H_
+
+#ifdef _KERNEL
+
+typedef uint8_t u8;
+typedef uint8_t __u8;
+typedef uint16_t u16;
+typedef uint16_t __u16;
+typedef uint32_t u32;
+typedef uint32_t __u32;
+typedef uint64_t u64;
+typedef uint64_t __u64;
+
+typedef int8_t s8;
+typedef int8_t __s8;
+typedef int16_t s16;
+typedef int16_t __s16;
+typedef int32_t s32;
+typedef int32_t __s32;
+typedef int64_t s64;
+typedef int64_t __s64;
+
+/* DMA addresses come in generic and 64-bit flavours.  */
+typedef vm_paddr_t dma_addr_t;
+typedef vm_paddr_t dma64_addr_t;
+
+typedef unsigned short umode_t;
+
+#endif	/* _KERNEL */
+
+#endif	/* _ASM_TYPES_H_ */
diff --git a/rtemsbsd/powerpc/include/fdt_phy.h b/rtemsbsd/powerpc/include/fdt_phy.h
new file mode 100644
index 0000000..8d98e9c
--- /dev/null
+++ b/rtemsbsd/powerpc/include/fdt_phy.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FDT_PHY
+#define	_FDT_PHY
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct fdt_mdio_device {
+	int (*read)(struct fdt_mdio_device *dev, int phy, int reg);
+	int (*write)(struct fdt_mdio_device *dev, int phy, int reg, int val);
+};
+
+struct fdt_phy_device {
+	int phy;
+  struct fdt_mdio_device *mdio_dev;
+};
+
+struct fdt_phy_device *fdt_phy_obtain(int device_node);
+
+void fdt_phy_release(struct fdt_phy_device *phy_dev);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _FDT_PHY */
+
diff --git a/rtemsbsd/powerpc/include/linux/bitops.h b/rtemsbsd/powerpc/include/linux/bitops.h
new file mode 100644
index 0000000..bd712c9
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/bitops.h
@@ -0,0 +1,481 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_BITOPS_H_
+#define	_LINUX_BITOPS_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#define	BIT(nr)			(1UL << (nr))
+#ifdef __LP64__
+#define	BITS_PER_LONG		64
+#else
+#define	BITS_PER_LONG		32
+#endif
+#define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
+#define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
+#define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
+#define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define	GENMASK(lo, hi)		(((2UL << ((hi) - (lo))) - 1UL) << (lo))
+#define BITS_PER_BYTE           8
+
+static inline int
+__ffs(int mask)
+{
+	return (ffs(mask) - 1);
+}
+
+static inline int
+__fls(int mask)
+{
+	return (fls(mask) - 1);
+}
+
+static inline int
+__ffsl(long mask)
+{
+	return (ffsl(mask) - 1);
+}
+
+static inline int
+__flsl(long mask)
+{
+	return (flsl(mask) - 1);
+}
+
+
+#define	ffz(mask)	__ffs(~(mask))
+
+static inline int get_count_order(unsigned int count)
+{
+        int order;
+
+        order = fls(count) - 1;
+        if (count & (count - 1))
+                order++;
+        return order;
+}
+
+static inline unsigned long
+find_first_bit(unsigned long *addr, unsigned long size)
+{
+	long mask;
+	int bit;
+
+	for (bit = 0; size >= BITS_PER_LONG;
+	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+		if (*addr == 0)
+			continue;
+		return (bit + __ffsl(*addr));
+	}
+	if (size) {
+		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+		if (mask)
+			bit += __ffsl(mask);
+		else
+			bit += size;
+	}
+	return (bit);
+}
+
+static inline unsigned long
+find_first_zero_bit(unsigned long *addr, unsigned long size)
+{
+	long mask;
+	int bit;
+
+	for (bit = 0; size >= BITS_PER_LONG;
+	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+		if (~(*addr) == 0)
+			continue;
+		return (bit + __ffsl(~(*addr)));
+	}
+	if (size) {
+		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+		if (mask)
+			bit += __ffsl(mask);
+		else
+			bit += size;
+	}
+	return (bit);
+}
+
+static inline unsigned long
+find_last_bit(unsigned long *addr, unsigned long size)
+{
+	long mask;
+	int offs;
+	int bit;
+	int pos;
+
+	pos = size / BITS_PER_LONG;
+	offs = size % BITS_PER_LONG;
+	bit = BITS_PER_LONG * pos;
+	addr += pos;
+	if (offs) {
+		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
+		if (mask)
+			return (bit + __flsl(mask));
+	}
+	while (--pos) {
+		addr--;
+		bit -= BITS_PER_LONG;
+		if (*addr)
+			return (bit + __flsl(mask));
+	}
+	return (size);
+}
+
+static inline unsigned long
+find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+	long mask;
+	int offs;
+	int bit;
+	int pos;
+
+	if (offset >= size)
+		return (size);
+	pos = offset / BITS_PER_LONG;
+	offs = offset % BITS_PER_LONG;
+	bit = BITS_PER_LONG * pos;
+	addr += pos;
+	if (offs) {
+		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+		if (mask)
+			return (bit + __ffsl(mask));
+		if (size - bit <= BITS_PER_LONG)
+			return (size);
+		bit += BITS_PER_LONG;
+		addr++;
+	}
+	for (size -= bit; size >= BITS_PER_LONG;
+	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+		if (*addr == 0)
+			continue;
+		return (bit + __ffsl(*addr));
+	}
+	if (size) {
+		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
+		if (mask)
+			bit += __ffsl(mask);
+		else
+			bit += size;
+	}
+	return (bit);
+}
+
+static inline unsigned long
+find_next_zero_bit(unsigned long *addr, unsigned long size,
+    unsigned long offset)
+{
+	long mask;
+	int offs;
+	int bit;
+	int pos;
+
+	if (offset >= size)
+		return (size);
+	pos = offset / BITS_PER_LONG;
+	offs = offset % BITS_PER_LONG;
+	bit = BITS_PER_LONG * pos;
+	addr += pos;
+	if (offs) {
+		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
+		if (mask)
+			return (bit + __ffsl(mask));
+		if (size - bit <= BITS_PER_LONG)
+			return (size);
+		bit += BITS_PER_LONG;
+		addr++;
+	}
+	for (size -= bit; size >= BITS_PER_LONG;
+	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
+		if (~(*addr) == 0)
+			continue;
+		return (bit + __ffsl(~(*addr)));
+	}
+	if (size) {
+		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
+		if (mask)
+			bit += __ffsl(mask);
+		else
+			bit += size;
+	}
+	return (bit);
+}
+
+static inline void
+bitmap_zero(unsigned long *addr, int size)
+{
+	int len;
+
+	len = BITS_TO_LONGS(size) * sizeof(long);
+	memset(addr, 0, len);
+}
+
+static inline void
+bitmap_fill(unsigned long *addr, int size)
+{
+	int tail;
+	int len;
+
+	len = (size / BITS_PER_LONG) * sizeof(long);
+	memset(addr, 0xff, len);
+	tail = size & (BITS_PER_LONG - 1);
+	if (tail) 
+		addr[size / BITS_PER_LONG] = BITMAP_LAST_WORD_MASK(tail);
+}
+
+static inline int
+bitmap_full(unsigned long *addr, int size)
+{
+	unsigned long mask;
+	int tail;
+	int len;
+	int i;
+
+	len = size / BITS_PER_LONG;
+	for (i = 0; i < len; i++)
+		if (addr[i] != ~0UL)
+			return (0);
+	tail = size & (BITS_PER_LONG - 1);
+	if (tail) {
+		mask = BITMAP_LAST_WORD_MASK(tail);
+		if ((addr[i] & mask) != mask)
+			return (0);
+	}
+	return (1);
+}
+
+static inline int
+bitmap_empty(unsigned long *addr, int size)
+{
+	unsigned long mask;
+	int tail;
+	int len;
+	int i;
+
+	len = size / BITS_PER_LONG;
+	for (i = 0; i < len; i++)
+		if (addr[i] != 0)
+			return (0);
+	tail = size & (BITS_PER_LONG - 1);
+	if (tail) {
+		mask = BITMAP_LAST_WORD_MASK(tail);
+		if ((addr[i] & mask) != 0)
+			return (0);
+	}
+	return (1);
+}
+
+#define	set_bits(m, a)							\
+    atomic_set_long((volatile long *)(a), (long)m)
+
+#define	__set_bit(i, a)							\
+    set_bits(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define	set_bit(i, a)							\
+    set_bits(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define	clear_bits(m, a)						\
+    atomic_clear_long((volatile long *)(a), m)
+
+#define	__clear_bit(i, a)						\
+    clear_bits(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define	clear_bit(i, a)							\
+    clear_bits(&((volatile long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define	test_bit(i, a)							\
+    !!(atomic_load_acq_long(&((volatile long *)(a))[BIT_WORD(i)]) &	\
+    BIT_MASK(i))
+
+static inline long
+test_and_clear_bit(long bit, long *var)
+{
+	long val;
+
+	var += BIT_WORD(bit);
+	bit %= BITS_PER_LONG;
+	bit = (1UL << bit);
+	do {
+		val = *(volatile long *)var;
+	} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
+
+	return !!(val & bit);
+}
+
+static inline long
+test_and_set_bit(long bit, long *var)
+{
+	long val;
+
+	var += BIT_WORD(bit);
+	bit %= BITS_PER_LONG;
+	bit = (1UL << bit);
+	do {
+		val = *(volatile long *)var;
+	} while (atomic_cmpset_long(var, val, val | bit) == 0);
+
+	return !!(val & bit);
+}
+
+static inline void
+bitmap_set(unsigned long *map, int start, int nr)
+{
+	unsigned long *p = map + BIT_WORD(start);
+	const int size = start + nr;
+	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+	while (nr - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		nr -= bits_to_set;
+		bits_to_set = BITS_PER_LONG;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (nr) {
+		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static inline void
+bitmap_clear(unsigned long *map, int start, int nr)
+{
+	unsigned long *p = map + BIT_WORD(start);
+	const int size = start + nr;
+	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+	while (nr - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		nr -= bits_to_clear;
+		bits_to_clear = BITS_PER_LONG;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (nr) {
+		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+enum {
+        REG_OP_ISFREE,
+        REG_OP_ALLOC,
+        REG_OP_RELEASE,
+};
+
+static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+{
+        int nbits_reg;
+        int index;
+        int offset;
+        int nlongs_reg;
+        int nbitsinlong;
+        unsigned long mask;
+        int i;
+        int ret = 0;
+
+        nbits_reg = 1 << order;
+        index = pos / BITS_PER_LONG;
+        offset = pos - (index * BITS_PER_LONG);
+        nlongs_reg = BITS_TO_LONGS(nbits_reg);
+        nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
+
+        mask = (1UL << (nbitsinlong - 1));
+        mask += mask - 1;
+        mask <<= offset;
+
+        switch (reg_op) {
+        case REG_OP_ISFREE:
+                for (i = 0; i < nlongs_reg; i++) {
+                        if (bitmap[index + i] & mask)
+                                goto done;
+                }
+                ret = 1;
+                break;
+
+        case REG_OP_ALLOC:
+                for (i = 0; i < nlongs_reg; i++)
+                        bitmap[index + i] |= mask;
+                break;
+
+        case REG_OP_RELEASE:
+                for (i = 0; i < nlongs_reg; i++)
+                        bitmap[index + i] &= ~mask;
+                break;
+        }
+done:
+        return ret;
+}
+
+static inline int 
+bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+{
+        int pos;
+        int end;
+
+        for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
+                if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+                        continue;
+                __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+                return pos;
+        }
+        return -ENOMEM;
+}
+
+static inline int
+bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+{
+        if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+                return -EBUSY;
+        __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+        return 0;
+}
+
+static inline void 
+bitmap_release_region(unsigned long *bitmap, int pos, int order)
+{
+        __reg_op(bitmap, pos, order, REG_OP_RELEASE);
+}
+
+
+#define for_each_set_bit(bit, addr, size) \
+	for ((bit) = find_first_bit((addr), (size));		\
+	     (bit) < (size);					\
+	     (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#endif	/* _LINUX_BITOPS_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/bitrev.h b/rtemsbsd/powerpc/include/linux/bitrev.h
new file mode 100644
index 0000000..489eb0d
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/bitrev.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_BITREV_H
+#define	_LINUX_BITREV_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+extern const uint8_t bitrev_nibbles[16];
+
+static inline uint8_t
+bitrev8(uint8_t x)
+{
+
+	return ((bitrev_nibbles[x & 15] << 4) | bitrev_nibbles[x >> 4]);
+}
+
+static inline uint16_t
+bitrev16(uint16_t x)
+{
+
+	return (uint16_t)((bitrev8((uint8_t)x) << 8) |
+	    bitrev8((uint8_t)(x >> 8)));
+}
+
+static inline uint32_t
+bitrev32(uint32_t x)
+{
+
+	return ((((uint32_t)bitrev16((uint16_t)x) << 16)) |
+	    ((uint32_t)bitrev8((uint16_t)(x >> 16))));
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_BITREV_H */
diff --git a/rtemsbsd/powerpc/include/linux/clk.h b/rtemsbsd/powerpc/include/linux/clk.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/compiler.h b/rtemsbsd/powerpc/include/linux/compiler.h
new file mode 100644
index 0000000..e1d11a4
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/compiler.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_COMPILER_H_
+#define	_LINUX_COMPILER_H_
+
+#include <sys/cdefs.h>
+
+#define __user
+#define __kernel
+#define __safe
+#define __force
+#define __nocast
+#define __iomem
+#define __chk_user_ptr(x)		0
+#define __chk_io_ptr(x)			0
+#define __builtin_warning(x, y...)	(1)
+#define __acquires(x)
+#define __releases(x)
+#define __acquire(x)			0
+#define __release(x)			0
+#define __cond_lock(x,c)		(c)
+#define	__bitwise
+#define __devinitdata
+#define __init
+#define	__initdata
+#define	__devinit
+#define	__devexit
+#define	__percpu
+#define __exit
+#define	__stringify(x)			#x
+#define	__attribute_const__		__attribute__((__const__))
+#undef __always_inline
+#define	__always_inline			inline
+#define	noinline			__noinline
+
+#define	likely(x)			__builtin_expect(!!(x), 1)
+#define	unlikely(x)			__builtin_expect(!!(x), 0)
+#define typeof(x)			__typeof(x)
+
+#define	uninitialized_var(x)		x = x
+#define	__read_mostly __attribute__((__section__(".data.read_mostly")))
+#define	__maybe_unused			__unused
+#define	__always_unused			__unused
+#define	__must_check			__result_use_check
+
+#define	__printf(a,b)			__printflike(a,b)
+
+#define	barrier()			__asm__ __volatile__("": : :"memory")
+
+#endif	/* _LINUX_COMPILER_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/completion.h b/rtemsbsd/powerpc/include/linux/completion.h
new file mode 100644
index 0000000..7cfb10d
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/completion.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_COMPLETION_H_
+#define	_LINUX_COMPLETION_H_
+
+#include <linux/errno.h>
+
+struct completion {
+	unsigned int done;
+};
+
+#define	INIT_COMPLETION(c) \
+	((c).done = 0)
+#define	init_completion(c) \
+	((c)->done = 0)
+#define	complete(c)				\
+	linux_complete_common((c), 0)
+#define	complete_all(c)				\
+	linux_complete_common((c), 1)
+#define	wait_for_completion(c)			\
+	linux_wait_for_common((c), 0)
+#define	wait_for_completion_interuptible(c)	\
+	linux_wait_for_common((c), 1)
+#define	wait_for_completion_timeout(c, timeout)	\
+	linux_wait_for_timeout_common((c), (timeout), 0)
+#define	wait_for_completion_interruptible_timeout(c, timeout)	\
+	linux_wait_for_timeout_common((c), (timeout), 1)
+#define	try_wait_for_completion(c) \
+	linux_try_wait_for_completion(c)
+#define	completion_done(c) \
+	linux_completion_done(c)
+
+extern void linux_complete_common(struct completion *, int);
+extern long linux_wait_for_common(struct completion *, int);
+extern long linux_wait_for_timeout_common(struct completion *, long, int);
+extern int linux_try_wait_for_completion(struct completion *);
+extern int linux_completion_done(struct completion *);
+
+#endif					/* _LINUX_COMPLETION_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/ctype.h b/rtemsbsd/powerpc/include/linux/ctype.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/debugfs.h b/rtemsbsd/powerpc/include/linux/debugfs.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/delay.h b/rtemsbsd/powerpc/include/linux/delay.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/device.h b/rtemsbsd/powerpc/include/linux/device.h
new file mode 100644
index 0000000..5ba628a
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/device.h
@@ -0,0 +1,501 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_DEVICE_H_
+#define	_LINUX_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/kdev_t.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+
+#include <sys/bus.h>
+
+enum irqreturn	{ IRQ_NONE = 0, IRQ_HANDLED, IRQ_WAKE_THREAD, };
+typedef enum irqreturn	irqreturn_t;
+
+#include <stdio.h>
+
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+struct device {
+	struct device_node *of_node;
+	uintptr_t base;
+	void *driver_data;
+};
+
+static inline void *
+devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+
+	(void)dev;
+	return (kmalloc(size, gfp));
+}
+
+static inline void *
+devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+
+	(void)dev;
+	return (kzalloc(size, gfp));
+}
+
+static inline void __iomem *
+devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
+{
+
+	return (void __iomem *)(dev->base + (uintptr_t)offset);
+}
+
+#define	devm_alloc_percpu(dev, type) \
+    devm_kzalloc(dev, sizeof(type) * rtems_get_processor_count(), GFP_KERNEL)
+
+#define	dev_err(dev, fmt, ...) \
+    do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
+
+#define	dev_warn(dev, fmt, ...) \
+    do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
+
+#define	dev_info(dev, fmt, ...) \
+    do { (void)dev; fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
+
+static inline struct device *
+get_device(struct device *dev)
+{
+
+	return (dev);
+}
+
+static inline void
+put_device(struct device *dev)
+{
+
+	(void)dev;
+}
+
+static inline void *
+dev_get_drvdata(const struct device *dev)
+{
+
+	return (dev->driver_data);
+}
+
+static inline void
+dev_set_drvdata(struct device *dev, void *drvdata)
+{
+
+	dev->driver_data = drvdata;
+}
+
+#ifndef __rtems__
+struct class {
+	const char	*name;
+	struct module	*owner;
+	struct kobject	kobj;
+	devclass_t	bsdclass;
+	void		(*class_release)(struct class *class);
+	void		(*dev_release)(struct device *dev);
+	char *		(*devnode)(struct device *dev, umode_t *mode);
+};
+
+struct device {
+	struct device	*parent;
+	struct list_head irqents;
+	device_t	bsddev;
+	dev_t		devt;
+	struct class	*class;
+	void		(*release)(struct device *dev);
+	struct kobject	kobj;
+	uint64_t	*dma_mask;
+	void		*driver_data;
+	unsigned int	irq;
+	unsigned int	msix;
+	unsigned int	msix_max;
+};
+
+extern struct device linux_rootdev;
+extern struct kobject class_root;
+
+struct class_attribute {
+        struct attribute attr;
+        ssize_t (*show)(struct class *, struct class_attribute *, char *);
+        ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t);
+        const void *(*namespace)(struct class *, const struct class_attribute *);
+};
+
+#define	CLASS_ATTR(_name, _mode, _show, _store)				\
+	struct class_attribute class_attr_##_name =			\
+	    { { #_name, NULL, _mode }, _show, _store }
+
+struct device_attribute {
+	struct attribute	attr;
+	ssize_t			(*show)(struct device *,
+					struct device_attribute *, char *);
+	ssize_t			(*store)(struct device *,
+					struct device_attribute *, const char *,
+					size_t);
+};
+
+#define	DEVICE_ATTR(_name, _mode, _show, _store)			\
+	struct device_attribute dev_attr_##_name =			\
+	    { { #_name, NULL, _mode }, _show, _store }
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+	struct class_attribute attr;
+	char *str;
+};
+
+static inline ssize_t
+show_class_attr_string(struct class *class,
+				struct class_attribute *attr, char *buf)
+{
+	struct class_attribute_string *cs;
+	cs = container_of(attr, struct class_attribute_string, attr);
+	return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
+}
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+	{ __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+	struct class_attribute_string class_attr_##_name = \
+		_CLASS_ATTR_STRING(_name, _mode, _str)
+
+#define	dev_err(dev, fmt, ...)	device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define	dev_warn(dev, fmt, ...)	device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define	dev_info(dev, fmt, ...)	device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+#define	dev_printk(lvl, dev, fmt, ...)					\
+	    device_printf((dev)->bsddev, fmt, ##__VA_ARGS__)
+
+static inline void *
+dev_get_drvdata(struct device *dev)
+{
+
+	return dev->driver_data;
+}
+
+static inline void
+dev_set_drvdata(struct device *dev, void *data)
+{
+
+	dev->driver_data = data;
+}
+
+static inline struct device *
+get_device(struct device *dev)
+{
+
+	if (dev)
+		kobject_get(&dev->kobj);
+
+	return (dev);
+}
+
+static inline char *
+dev_name(const struct device *dev)
+{
+
+ 	return kobject_name(&dev->kobj);
+}
+
+#define	dev_set_name(_dev, _fmt, ...)					\
+	kobject_set_name(&(_dev)->kobj, (_fmt), ##__VA_ARGS__)
+
+static inline void
+put_device(struct device *dev)
+{
+
+	if (dev)
+		kobject_put(&dev->kobj);
+}
+
+static inline ssize_t
+class_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct class_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct class_attribute, attr);
+	error = -EIO;
+	if (dattr->show)
+		error = dattr->show(container_of(kobj, struct class, kobj),
+		    dattr, buf);
+	return (error);
+}
+
+static inline ssize_t
+class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+    size_t count)
+{
+	struct class_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct class_attribute, attr);
+	error = -EIO;
+	if (dattr->store)
+		error = dattr->store(container_of(kobj, struct class, kobj),
+		    dattr, buf, count);
+	return (error);
+}
+
+static inline void
+class_release(struct kobject *kobj)
+{
+	struct class *class;
+
+	class = container_of(kobj, struct class, kobj);
+	if (class->class_release)
+		class->class_release(class);
+}
+
+static struct sysfs_ops class_sysfs = {
+	.show  = class_show,
+	.store = class_store,
+};
+static struct kobj_type class_ktype = {
+	.release = class_release,
+	.sysfs_ops = &class_sysfs
+};
+
+static inline int
+class_register(struct class *class)
+{
+
+	class->bsdclass = devclass_create(class->name);
+	kobject_init(&class->kobj, &class_ktype);
+	kobject_set_name(&class->kobj, class->name);
+	kobject_add(&class->kobj, &class_root, class->name);
+
+	return (0);
+}
+
+static inline void
+class_unregister(struct class *class)
+{
+
+	kobject_put(&class->kobj);
+}
+
+static inline void
+device_release(struct kobject *kobj)
+{
+	struct device *dev;
+
+	dev = container_of(kobj, struct device, kobj);
+	/* This is the precedence defined by linux. */
+	if (dev->release)
+		dev->release(dev);
+	else if (dev->class && dev->class->dev_release)
+		dev->class->dev_release(dev);
+}
+
+static inline ssize_t
+dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+	struct device_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct device_attribute, attr);
+	error = -EIO;
+	if (dattr->show)
+		error = dattr->show(container_of(kobj, struct device, kobj),
+		    dattr, buf);
+	return (error);
+}
+
+static inline ssize_t
+dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
+    size_t count)
+{
+	struct device_attribute *dattr;
+	ssize_t error;
+
+	dattr = container_of(attr, struct device_attribute, attr);
+	error = -EIO;
+	if (dattr->store)
+		error = dattr->store(container_of(kobj, struct device, kobj),
+		    dattr, buf, count);
+	return (error);
+}
+
+static struct sysfs_ops dev_sysfs = { .show  = dev_show, .store = dev_store, };
+static struct kobj_type dev_ktype = {
+	.release = device_release,
+	.sysfs_ops = &dev_sysfs
+};
+
+/*
+ * Devices are registered and created for exporting to sysfs.  create
+ * implies register and register assumes the device fields have been
+ * setup appropriately before being called.
+ */
+static inline int
+device_register(struct device *dev)
+{
+	device_t bsddev;
+	int unit;
+
+	bsddev = NULL;
+	if (dev->devt) {
+		unit = MINOR(dev->devt);
+		bsddev = devclass_get_device(dev->class->bsdclass, unit);
+	} else
+		unit = -1;
+	if (bsddev == NULL)
+		bsddev = device_add_child(dev->parent->bsddev,
+		    dev->class->kobj.name, unit);
+	if (bsddev) {
+		if (dev->devt == 0)
+			dev->devt = makedev(0, device_get_unit(bsddev));
+		device_set_softc(bsddev, dev);
+	}
+	dev->bsddev = bsddev;
+	kobject_init(&dev->kobj, &dev_ktype);
+	kobject_add(&dev->kobj, &dev->class->kobj, dev_name(dev));
+
+	return (0);
+}
+
+static inline void
+device_unregister(struct device *dev)
+{
+	device_t bsddev;
+
+	bsddev = dev->bsddev;
+	mtx_lock(&Giant);
+	if (bsddev)
+		device_delete_child(device_get_parent(bsddev), bsddev);
+	mtx_unlock(&Giant);
+	put_device(dev);
+}
+
+struct device *device_create(struct class *class, struct device *parent,
+	    dev_t devt, void *drvdata, const char *fmt, ...);
+
+static inline void
+device_destroy(struct class *class, dev_t devt)
+{
+	device_t bsddev;
+	int unit;
+
+	unit = MINOR(devt);
+	bsddev = devclass_get_device(class->bsdclass, unit);
+	if (bsddev)
+		device_unregister(device_get_softc(bsddev));
+}
+
+static inline void
+class_kfree(struct class *class)
+{
+
+	kfree(class);
+}
+
+static inline struct class *
+class_create(struct module *owner, const char *name)
+{
+	struct class *class;
+	int error;
+
+	class = kzalloc(sizeof(*class), M_WAITOK);
+	class->owner = owner;
+	class->name= name;
+	class->class_release = class_kfree;
+	error = class_register(class);
+	if (error) {
+		kfree(class);
+		return (NULL);
+	}
+
+	return (class);
+}
+
+static inline void
+class_destroy(struct class *class)
+{
+
+	if (class == NULL)
+		return;
+	class_unregister(class);
+}
+
+static inline int
+device_create_file(struct device *dev, const struct device_attribute *attr)
+{
+
+	if (dev)
+		return sysfs_create_file(&dev->kobj, &attr->attr);
+	return -EINVAL;
+}
+
+static inline void
+device_remove_file(struct device *dev, const struct device_attribute *attr)
+{
+
+	if (dev)
+		sysfs_remove_file(&dev->kobj, &attr->attr);
+}
+
+static inline int
+class_create_file(struct class *class, const struct class_attribute *attr)
+{
+
+	if (class)
+		return sysfs_create_file(&class->kobj, &attr->attr);
+	return -EINVAL;
+}
+
+static inline void
+class_remove_file(struct class *class, const struct class_attribute *attr)
+{
+
+	if (class)
+		sysfs_remove_file(&class->kobj, &attr->attr);
+}
+
+static inline int dev_to_node(struct device *dev)
+{
+                return -1;
+}
+
+char *kvasprintf(gfp_t, const char *, va_list);
+char *kasprintf(gfp_t, const char *, ...);
+#endif /* __rtems__ */
+
+#endif	/* _LINUX_DEVICE_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/dma-mapping.h b/rtemsbsd/powerpc/include/linux/dma-mapping.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/err.h b/rtemsbsd/powerpc/include/linux/err.h
new file mode 100644
index 0000000..edb648f
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/err.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_ERR_H
+#define	_LINUX_ERR_H
+
+#include <linux/compiler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define IS_ERR_VALUE(e) unlikely((e) >= (unsigned long)-4095)
+
+static inline void *
+__must_check ERR_PTR(long e)
+{
+
+	return ((void *)e);
+}
+
+static inline long __must_check
+PTR_ERR(__force const void *p)
+{
+
+	return ((long)p);
+}
+
+static inline bool __must_check
+IS_ERR(__force const void *p)
+{
+
+	return (IS_ERR_VALUE((unsigned long)p));
+}
+
+static inline bool __must_check
+IS_ERR_OR_NULL(__force const void *p)
+{
+
+	return (p == NULL || IS_ERR_VALUE((unsigned long)p));
+}
+
+static inline void * __must_check
+ERR_CAST(__force const void *p)
+{
+
+	return (__DECONST(void *, p));
+}
+
+static inline int __must_check
+PTR_ERR_OR_ZERO(__force const void *p)
+{
+
+	return (IS_ERR(p) ? (int)PTR_ERR(p) : 0);
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_ERR_H */
diff --git a/rtemsbsd/powerpc/include/linux/errno.h b/rtemsbsd/powerpc/include/linux/errno.h
new file mode 100644
index 0000000..aff6e26
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/errno.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_ERRNO_H_
+#define	_LINUX_ERRNO_H_
+
+#include <sys/errno.h>
+
+#define	ECHRNG		EDOM
+#ifndef __rtems__
+#define	ETIME		ETIMEDOUT
+#endif /* __rtems__ */
+#define	ECOMM           ESTALE
+#ifndef __rtems__
+#define	ENODATA         ECONNREFUSED
+#endif /* __rtems__ */
+#define	ENOIOCTLCMD     ENOIOCTL
+#define	ERESTARTSYS     ERESTART
+#define	ENOTSUPP        EOPNOTSUPP
+#define	ENONET          EHOSTDOWN
+
+#endif					/* _LINUX_ERRNO_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/etherdevice.h b/rtemsbsd/powerpc/include/linux/etherdevice.h
new file mode 100644
index 0000000..79c0644
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/etherdevice.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_ETHERDEVICE_H
+#define	_LINUX_ETHERDEVICE_H
+
+#include <string.h>
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+static inline void
+ether_addr_copy(u8 dst[6], const u8 src[6])
+{
+
+	memcpy(dst, src, 6);
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/rtemsbsd/powerpc/include/linux/gfp.h b/rtemsbsd/powerpc/include/linux/gfp.h
new file mode 100644
index 0000000..2b57bca
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/gfp.h
@@ -0,0 +1,169 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_GFP_H_
+#define	_LINUX_GFP_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <linux/page.h>
+
+#include <vm/vm_param.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+
+#define	__GFP_NOWARN	0
+#define	__GFP_HIGHMEM	0
+#define	__GFP_ZERO	M_ZERO
+
+#define	GFP_NOWAIT	M_NOWAIT
+#define	GFP_ATOMIC	(M_NOWAIT | M_USE_RESERVE)
+#define	GFP_KERNEL	M_WAITOK
+#define	GFP_USER	M_WAITOK
+#define	GFP_HIGHUSER	M_WAITOK
+#define	GFP_HIGHUSER_MOVABLE	M_WAITOK
+#define	GFP_IOFS	M_NOWAIT
+
+#ifndef __rtems__
+static inline void *
+page_address(struct page *page)
+{
+
+	if (page->object != kmem_object && page->object != kernel_object)
+		return (NULL);
+	return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
+	    IDX_TO_OFF(page->pindex)));
+}
+
+static inline unsigned long
+_get_page(gfp_t mask)
+{
+
+	return kmem_malloc(kmem_arena, PAGE_SIZE, mask);
+}
+
+#define	get_zeroed_page(mask)	_get_page((mask) | M_ZERO)
+#define	alloc_page(mask)	virt_to_page(_get_page((mask)))
+#define	__get_free_page(mask)	_get_page((mask))
+
+static inline void
+free_page(unsigned long page)
+{
+
+	if (page == 0)
+		return;
+	kmem_free(kmem_arena, page, PAGE_SIZE);
+}
+
+static inline void
+__free_page(struct page *m)
+{
+
+	if (m->object != kmem_object)
+		panic("__free_page:  Freed page %p not allocated via wrappers.",
+		    m);
+	kmem_free(kmem_arena, (vm_offset_t)page_address(m), PAGE_SIZE);
+}
+
+static inline void
+__free_pages(struct page *m, unsigned int order)
+{
+	size_t size;
+
+	if (m == NULL)
+		return;
+	size = PAGE_SIZE << order;
+	kmem_free(kmem_arena, (vm_offset_t)page_address(m), size);
+}
+
+static inline void free_pages(uintptr_t addr, unsigned int order)
+{
+	if (addr == 0)
+		return;
+	__free_pages(virt_to_page((void *)addr), order);
+}
+
+/*
+ * Alloc pages allocates directly from the buddy allocator on linux so
+ * order specifies a power of two bucket of pages and the results
+ * are expected to be aligned on the size as well.
+ */
+static inline struct page *
+alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+	unsigned long page;
+	size_t size;
+
+	size = PAGE_SIZE << order;
+	page = kmem_alloc_contig(kmem_arena, size, gfp_mask, 0, -1,
+	    size, 0, VM_MEMATTR_DEFAULT);
+	if (page == 0)
+		return (NULL);
+        return (virt_to_page(page));
+}
+
+static inline uintptr_t __get_free_pages(gfp_t gfp_mask, unsigned int order)
+{
+	struct page *page;
+
+	page = alloc_pages(gfp_mask, order);
+	if (page == NULL)
+		return (0);
+	return ((uintptr_t)page_address(page));
+}
+
+#define alloc_pages_node(node, mask, order)     alloc_pages(mask, order)
+
+#define kmalloc_node(chunk, mask, node)         kmalloc(chunk, mask)
+#else /* __rtems__ */
+#include <machine/rtems-bsd-page.h>
+static inline unsigned long
+get_zeroed_page(gfp_t mask)
+{
+	void *p = rtems_bsd_page_alloc(PAGE_SIZE, (mask & M_WAITOK) != 0);
+
+	if (p != NULL)
+		memset(p, 0, PAGE_SIZE);
+	return ((unsigned long)p);
+}
+
+static inline void
+free_page(unsigned long page)
+{
+
+	rtems_bsd_page_free((void *)page);
+}
+#endif /* __rtems__ */
+
+#endif	/* _LINUX_GFP_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/if_ether.h b/rtemsbsd/powerpc/include/linux/if_ether.h
new file mode 100644
index 0000000..2d68cd3
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/if_ether.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_IF_ETHER_H
+#define	_LINUX_IF_ETHER_H
+
+#include <net/ethernet.h>
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define	ETH_ALEN	ETHER_ADDR_LEN
+#define	ETH_FCS_LEN	ETHER_CRC_LEN
+#define	ETH_DATA_LEN	(ETHER_MAX_LEN - ETHER_HDR_LEN)
+#define	VLAN_ETH_HLEN	(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_IF_ETHER_H */
+
diff --git a/rtemsbsd/powerpc/include/linux/init.h b/rtemsbsd/powerpc/include/linux/init.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/interrupt.h b/rtemsbsd/powerpc/include/linux/interrupt.h
new file mode 100644
index 0000000..9e3aa48
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/interrupt.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_INTERRUPT_H_
+#define	_LINUX_INTERRUPT_H_
+
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <rtems/irq-extension.h>
+
+typedef	irqreturn_t	(*irq_handler_t)(int, void *);
+
+#define	IRQ_RETVAL(x)	((x) != IRQ_NONE)
+
+#define	IRQF_SHARED	RTEMS_INTERRUPT_SHARED
+#define	IRQF_NO_SUSPEND	0
+
+#define	NO_IRQ (-1U)
+
+int __must_check request_irq(unsigned int irq, irq_handler_t handler,
+    unsigned long flags, const char *name, void *arg);
+
+static inline void
+free_irq(unsigned int irq, void *device)
+{
+
+	panic("free_irq()");
+}
+
+static inline int
+__must_check devm_request_irq(struct device *dev, unsigned int irq,
+    irq_handler_t handler, unsigned long flags, const char *name,
+    void *arg)
+{
+
+	(void)dev;
+	return (request_irq(irq, handler, flags, name, arg));
+}
+
+#endif	/* _LINUX_INTERRUPT_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/io.h b/rtemsbsd/powerpc/include/linux/io.h
new file mode 100644
index 0000000..43f9c51
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/io.h
@@ -0,0 +1,171 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_IO_H_
+#define	_LINUX_IO_H_
+
+#include <machine/vm.h>
+#include <sys/endian.h>
+#include <linux/types.h>
+
+static inline uint8_t
+__raw_readb(const volatile void *addr)
+{
+	return *(const volatile uint8_t *)addr;
+}
+
+static inline void
+__raw_writeb(uint8_t b, volatile void *addr)
+{
+	*(volatile uint8_t *)addr = b;
+}
+
+static inline uint32_t
+__raw_readl(const volatile void *addr)
+{
+	return *(const volatile uint32_t *)addr;
+}
+
+static inline void
+__raw_writel(uint32_t b, volatile void *addr)
+{
+	*(volatile uint32_t *)addr = b;
+}
+
+static inline uint64_t
+__raw_readq(const volatile void *addr)
+{
+	return *(const volatile uint64_t *)addr;
+}
+
+static inline void
+__raw_writeq(uint64_t b, volatile void *addr)
+{
+	*(volatile uint64_t *)addr = b;
+}
+
+/*
+ * XXX This is all x86 specific.  It should be bus space access.
+ */
+#define mmiowb()
+
+#undef writel
+static inline void
+writel(uint32_t b, void *addr)
+{
+        *(volatile uint32_t *)addr = b;
+}
+
+#undef writeq
+static inline void
+writeq(uint64_t b, void *addr)
+{
+        *(volatile uint64_t *)addr = b;
+}
+
+#undef writeb
+static inline void
+writeb(uint8_t b, void *addr)
+{
+        *(volatile uint8_t *)addr = b;
+}
+
+#undef writew
+static inline void
+writew(uint16_t b, void *addr)
+{
+        *(volatile uint16_t *)addr = b;
+}
+
+#undef ioread32be
+static inline uint32_t
+ioread32be(const volatile void *addr)
+{
+	return be32toh(*(const volatile uint32_t *)addr);
+}
+
+#define	in_be32(x) ioread32be(x)
+
+#undef iowrite32be
+static inline void
+iowrite32be(uint32_t v, volatile void *addr)
+{
+	*(volatile uint32_t *)addr = htobe32(v);
+}
+
+#define	out_be32(x, y) iowrite32be(y, x)
+
+void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
+#define	ioremap_nocache(addr, size)					\
+    _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
+#define	ioremap_wc(addr, size)						\
+    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
+#define	ioremap	ioremap_nocache
+void iounmap(void *addr);
+
+#define	memset_io(a, b, c)	memset((a), (b), (c))
+#define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
+#define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
+
+static inline void
+__iowrite64_copy(void *to, void *from, size_t count)
+{
+#ifdef __LP64__
+	uint64_t *src;
+	uint64_t *dst;
+	int i;
+
+	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+		__raw_writeq(*src, dst);
+#else
+	uint32_t *src;
+	uint32_t *dst;
+	int i;
+
+	count *= 2;
+	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
+		__raw_writel(*src, dst);
+#endif
+}
+
+static inline unsigned long
+virt_to_phys(volatile void *address)
+{
+
+	return ((unsigned long)address);
+}
+
+static inline void *
+phys_to_virt(unsigned long address)
+{
+	return ((void *)address);
+}
+
+#endif	/* _LINUX_IO_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/iommu.h b/rtemsbsd/powerpc/include/linux/iommu.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/ioport.h b/rtemsbsd/powerpc/include/linux/ioport.h
new file mode 100644
index 0000000..a47b975
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/ioport.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_IOPORT_H
+#define	_LINUX_IOPORT_H
+
+#include <linux/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct resource {
+	resource_size_t start;
+	resource_size_t end;
+};
+
+#define	IORESOURCE_IRQ	0x00000400
+#define	IORESOURCE_MEM	0x00000420
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_IOPORT_H */
diff --git a/rtemsbsd/powerpc/include/linux/jiffies.h b/rtemsbsd/powerpc/include/linux/jiffies.h
new file mode 100644
index 0000000..f7bc529
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/jiffies.h
@@ -0,0 +1,98 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_JIFFIES_H_
+#define	_LINUX_JIFFIES_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+
+#include <sys/time.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+
+static inline int
+msecs_to_jiffies(int msec)
+{
+	struct timeval tv;
+
+	tv.tv_sec = msec / 1000;
+	tv.tv_usec = (msec % 1000) * 1000;
+	return (tvtohz(&tv) - 1);
+}
+
+#define jiffies                 ticks
+#define	jiffies_64		ticks
+#define jiffies_to_msecs(x)     (((int64_t)(x)) * 1000 / hz)
+
+#define	MAX_JIFFY_OFFSET	((INT_MAX >> 1) - 1)
+
+#define	time_after(a, b)	((int)((b) - (a)) < 0)
+#define	time_before(a, b)	time_after(b,a)
+#define	time_after_eq(a, b)	((int)((a) - (b)) >= 0)
+#define	time_before_eq(a, b)	time_after_eq(b, a)
+#define	time_in_range(a,b,c)	\
+	(time_after_eq(a,b) && time_before_eq(a,c))
+
+#define	HZ	hz
+
+static inline int
+timespec_to_jiffies(const struct timespec *ts)
+{
+	u64 result;
+
+	result = ((u64)hz * ts->tv_sec) +
+	    (((u64)hz * ts->tv_nsec + NSEC_PER_SEC - 1) / NSEC_PER_SEC);
+	if (result > MAX_JIFFY_OFFSET)
+		result = MAX_JIFFY_OFFSET;
+
+	return ((int)result);
+}
+
+static inline int
+usecs_to_jiffies(const unsigned int u)
+{
+	u64 result;
+
+	result = ((u64)u * hz + 1000000 - 1) / 1000000;
+	if (result > MAX_JIFFY_OFFSET)
+		result = MAX_JIFFY_OFFSET;
+
+	return ((int)result);
+}
+
+static inline u64
+get_jiffies_64(void)
+{
+	return ((u64)(unsigned)ticks);
+}
+
+#endif	/* _LINUX_JIFFIES_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/kdev_t.h b/rtemsbsd/powerpc/include/linux/kdev_t.h
new file mode 100644
index 0000000..8dea1ab
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/kdev_t.h
@@ -0,0 +1,38 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_KDEV_T_H_
+#define	_LINUX_KDEV_T_H_
+
+#define MAJOR(dev)      major((dev))
+#define MINOR(dev)      minor((dev))
+#define MKDEV(ma, mi)   makedev((ma), (mi))
+
+#endif	/* _LINUX_KDEV_T_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/kernel.h b/rtemsbsd/powerpc/include/linux/kernel.h
new file mode 100644
index 0000000..6fb6d8f
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/kernel.h
@@ -0,0 +1,236 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2014-2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_KERNEL_H_
+#define	_LINUX_KERNEL_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/libkern.h>
+#include <sys/stat.h>
+#include <sys/smp.h>
+#include <sys/stddef.h>
+#include <sys/syslog.h>
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/log2.h> 
+#include <asm/byteorder.h>
+
+#define KERN_CONT       ""
+#define	KERN_EMERG	"<0>"
+#define	KERN_ALERT	"<1>"
+#define	KERN_CRIT	"<2>"
+#define	KERN_ERR	"<3>"
+#define	KERN_WARNING	"<4>"
+#define	KERN_NOTICE	"<5>"
+#define	KERN_INFO	"<6>"
+#define	KERN_DEBUG	"<7>"
+
+#define	BUILD_BUG_ON(x)		CTASSERT(!(x))
+
+#define BUG()			panic("BUG")
+#define BUG_ON(condition)	do { if (__predict_false(condition)) BUG(); } while(0)
+#define	WARN_ON(condition)	({ int _warn_on = !!(condition); \
+    __predict_false(_warn_on); })
+
+#undef	ALIGN
+#define	ALIGN(x, y)		roundup2((x), (y))
+#undef PTR_ALIGN
+#define	PTR_ALIGN(p, a)		((__typeof(p))ALIGN((uintptr_t)(p), (a)))
+#define	DIV_ROUND_UP(x, n)	howmany(x, n)
+#define	DIV_ROUND_UP_ULL(x, n)	DIV_ROUND_UP((unsigned long long)(x), (n))
+#define	FIELD_SIZEOF(t, f)	sizeof(((t *)0)->f)
+
+#define	printk(X...)		printf(X)
+
+/*
+ * The "pr_debug()" and "pr_devel()" macros should produce zero code
+ * unless DEBUG is defined:
+ */
+#ifdef DEBUG
+#define pr_debug(fmt, ...) \
+        log(LOG_DEBUG, fmt, ##__VA_ARGS__)
+#define pr_devel(fmt, ...) \
+	log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) \
+        ({ if (0) log(LOG_DEBUG, fmt, ##__VA_ARGS__); 0; })
+#define pr_devel(fmt, ...) \
+	({ if (0) log(LOG_DEBUG, pr_fmt(fmt), ##__VA_ARGS__); 0; })
+#endif
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define printk_once(...) do {			\
+	static bool __print_once;		\
+						\
+	if (!__print_once) {			\
+		__print_once = true;		\
+		printk(__VA_ARGS__);		\
+	}					\
+} while (0)
+
+/*
+ * Log a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define log_once(level,...) do {		\
+	static bool __log_once;			\
+						\
+	if (!__log_once) {			\
+		__log_once = true;		\
+		log(level, __VA_ARGS__);	\
+	}					\
+} while (0)
+
+#define pr_emerg(fmt, ...) \
+	log(LOG_EMERG, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+	log(LOG_ALERT, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+	log(LOG_CRIT, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_once(fmt, ...) \
+	log_once(LOG_CRIT, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+	log(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+	log(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_warn_once(fmt, ...) \
+	log_once(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice(fmt, ...) \
+	log(LOG_NOTICE, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+	log(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...) \
+	log_once(LOG_INFO, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont(fmt, ...) \
+	printk(KERN_CONT fmt, ##__VA_ARGS__)
+
+#ifndef WARN
+#define WARN(condition, format...) ({                                   \
+        int __ret_warn_on = !!(condition);                              \
+        if (unlikely(__ret_warn_on))                                    \
+                pr_warning(format);                                     \
+        unlikely(__ret_warn_on);                                        \
+})
+#endif
+
+#define container_of(ptr, type, member)				\
+({								\
+	__typeof(((type *)0)->member) *_p = (ptr);		\
+	(type *)((char *)_p - offsetof(type, member));		\
+})
+  
+#define	ARRAY_SIZE(x)	(sizeof(x) / sizeof((x)[0]))
+
+#define	simple_strtoul	strtoul
+#define	simple_strtol	strtol
+#define kstrtol(a,b,c) ({*(c) = strtol(a,0,b);})
+int __must_check kstrtoint(const char *, unsigned int, int *);
+
+#define min(x, y)	((x) < (y) ? (x) : (y))
+#define max(x, y)	((x) > (y) ? (x) : (y))
+
+#define min3(a, b, c)	min(a, min(b,c))
+#define max3(a, b, c)	max(a, max(b,c))
+
+#define min_t(type, _x, _y)	((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y))
+#define max_t(type, _x, _y)	((type)(_x) > (type)(_y) ? (type)(_x) : (type)(_y))
+
+#define clamp_t(type, _x, min, max)	min_t(type, max_t(type, _x, min), max)
+#define clamp(x, lo, hi)		min( max(x,lo), hi)
+
+#define upper_32_bits(n)	((u32)(((n) >> 16) >> 16))
+
+#define lower_32_bits(n)	((u32)(n))
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define	num_possible_cpus()	mp_ncpus
+#define	num_online_cpus()	mp_ncpus
+
+typedef struct pm_message {
+        int event;
+} pm_message_t;
+
+/* Swap values of a and b */
+#define swap(a, b) do {			\
+	typeof(a) _swap_tmp = a;	\
+	a = b;				\
+	b = _swap_tmp;			\
+} while (0)
+
+#define	DIV_ROUND_CLOSEST(x, divisor)	(((x) + ((divisor) / 2)) / (divisor))
+
+static inline uintmax_t
+mult_frac(uintmax_t x, uintmax_t multiplier, uintmax_t divisor)
+{
+	uintmax_t q = (x / divisor);
+	uintmax_t r = (x % divisor);
+
+	return ((q * multiplier) + ((r * multiplier) / divisor));
+}
+
+static inline int64_t
+abs64(int64_t x)
+{
+	return (x < 0 ? -x : x);
+}
+
+#define	cpu_relax() RTEMS_COMPILER_MEMORY_BARRIER()
+
+#define	udelay(x) DELAY(x)
+
+#define usleep_range(x, y) usleep(x)
+
+#endif	/* _LINUX_KERNEL_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/kobject.h b/rtemsbsd/powerpc/include/linux/kobject.h
new file mode 100644
index 0000000..794d509
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/kobject.h
@@ -0,0 +1,173 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_KOBJECT_H_
+#define	_LINUX_KOBJECT_H_
+
+#include <machine/stdarg.h>
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+
+#if 0
+struct kobject;
+struct sysctl_oid;
+
+struct kobj_type {
+	void (*release)(struct kobject *kobj);
+	const struct sysfs_ops *sysfs_ops;
+	struct attribute **default_attrs;
+};
+
+extern struct kobj_type kfree_type;
+
+struct kobject {
+	struct kobject		*parent;
+	char			*name;
+	struct kref		kref;
+	struct kobj_type	*ktype;
+	struct list_head	entry;
+	struct sysctl_oid	*oidp;
+};
+
+extern struct kobject *mm_kobj;
+
+static inline void
+kobject_init(struct kobject *kobj, struct kobj_type *ktype)
+{
+
+	kref_init(&kobj->kref);
+	INIT_LIST_HEAD(&kobj->entry);
+	kobj->ktype = ktype;
+	kobj->oidp = NULL;
+}
+
+static inline void kobject_put(struct kobject *kobj);
+void kobject_release(struct kref *kref);
+
+static inline void
+kobject_put(struct kobject *kobj)
+{
+
+	if (kobj)
+		kref_put(&kobj->kref, kobject_release);
+}
+
+static inline struct kobject *
+kobject_get(struct kobject *kobj)
+{
+
+	if (kobj)
+		kref_get(&kobj->kref);
+	return kobj;
+}
+
+static inline int
+kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
+{
+	char *old;
+	char *name;
+
+	old = kobj->name;
+
+	if (old && !fmt)
+		return 0;
+
+	name = kzalloc(MAXPATHLEN, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+	vsnprintf(name, MAXPATHLEN, fmt, args);
+	kobj->name = name;
+	kfree(old);
+	for (; *name != '\0'; name++)
+		if (*name == '/')
+			*name = '!';
+	return (0);
+}
+
+int	kobject_add(struct kobject *kobj, struct kobject *parent,
+	    const char *fmt, ...);
+
+static inline struct kobject *
+kobject_create(void)
+{
+	struct kobject *kobj;
+
+	kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+	if (kobj == NULL)
+		return (NULL);
+	kobject_init(kobj, &kfree_type);
+
+	return (kobj);
+}
+
+static inline struct kobject *
+kobject_create_and_add(const char *name, struct kobject *parent)
+{
+	struct kobject *kobj;
+
+	kobj = kobject_create();
+	if (kobj == NULL)
+		return (NULL);
+	if (kobject_add(kobj, parent, "%s", name) == 0)
+		return (kobj);
+	kobject_put(kobj);
+
+	return (NULL);
+}
+
+
+static inline char *
+kobject_name(const struct kobject *kobj)
+{
+
+	return kobj->name;
+}
+
+int	kobject_set_name(struct kobject *kobj, const char *fmt, ...);
+int	kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+	    struct kobject *parent, const char *fmt, ...);
+
+/* sysfs.h calles for 'kobject' which is defined here, 
+ * so we need to add the include only after the 'kobject' def.
+ */
+#include <linux/sysfs.h>
+
+struct kobj_attribute {
+        struct attribute attr;
+        ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+                        char *buf);
+        ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+                         const char *buf, size_t count);
+};
+#endif
+
+#endif /* _LINUX_KOBJECT_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/kref.h b/rtemsbsd/powerpc/include/linux/kref.h
new file mode 100644
index 0000000..de5ddaa
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/kref.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _LINUX_KREF_H_
+#define _LINUX_KREF_H_
+
+#include <sys/types.h>
+#include <sys/refcount.h>
+
+#include <asm/atomic.h>
+
+struct kref {
+	atomic_t refcount;
+};
+
+static inline void
+kref_init(struct kref *kref)
+{
+
+	refcount_init(&kref->refcount.counter, 1);
+}
+
+static inline void
+kref_get(struct kref *kref)
+{
+
+	refcount_acquire(&kref->refcount.counter);
+}
+
+static inline int
+kref_put(struct kref *kref, void (*rel)(struct kref *kref))
+{
+
+	if (refcount_release(&kref->refcount.counter)) {
+		rel(kref);
+		return 1;
+	}
+	return 0;
+}
+
+static inline int
+kref_sub(struct kref *kref, unsigned int count,
+    void (*rel)(struct kref *kref))
+{
+
+	while (count--) {
+		if (refcount_release(&kref->refcount.counter)) {
+			rel(kref);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static inline int __must_check
+kref_get_unless_zero(struct kref *kref)
+{
+
+	return atomic_add_unless(&kref->refcount, 1, 0);
+}
+
+#endif /* _LINUX_KREF_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/kthread.h b/rtemsbsd/powerpc/include/linux/kthread.h
new file mode 100644
index 0000000..db289df
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/kthread.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_KTHREAD_H_
+#define	_LINUX_KTHREAD_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/sleepqueue.h>
+
+#include <machine/rtems-bsd-thread.h>
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#define	KTHREAD_LOCK(task) mtx_lock(&(task)->lock)
+#define	KTHREAD_UNLOCK(task) mtx_unlock(&(task)->lock)
+
+static inline void
+_kthread_fn(void *arg)
+{
+	struct task_struct *task;
+	struct thread *c;
+
+	task = arg;
+	task_struct_set(curthread, task);
+	c = task->task_thread;
+	sleepq_lock(c);
+	while (task->state == TASK_DORMANT)
+		sleepq_wait(c, 0);
+	sleepq_release(c);
+	if (task->should_stop == 0)
+		task->task_ret = task->task_fn(task->task_data);
+	KTHREAD_LOCK(task);
+	task->should_stop = TASK_STOPPED;
+	wakeup(task);
+	KTHREAD_UNLOCK(task);
+	kthread_exit();
+}
+
+static inline struct task_struct *
+_kthread_create(int (*threadfn)(void *data), void *data)
+{
+	struct task_struct *task;
+
+	task = kzalloc(sizeof(*task), GFP_KERNEL);
+	task->task_fn = threadfn;
+	task->task_data = data;
+	mtx_init(&task->lock, "kthread", NULL, MTX_DEF);
+
+	return (task);
+}
+
+#define	kthread_create(fn, data, fmt, ...)					\
+({									\
+	struct task_struct *_task;					\
+									\
+	_task = _kthread_create((fn), (data));				\
+	if (kthread_add(_kthread_fn, _task, NULL, &_task->task_thread,	\
+	    0, 0, fmt, ## __VA_ARGS__)) {				\
+		kfree(_task);						\
+		_task = NULL;						\
+	} else								\
+		task_struct_set(_task->task_thread, _task);		\
+	_task;								\
+})
+
+#define	kthread_should_stop()	current->should_stop
+
+static inline int
+kthread_stop(struct task_struct *task)
+{
+
+	KTHREAD_LOCK(task);
+	task->should_stop = TASK_SHOULD_STOP;
+	wake_up_process(task);
+	while (task->should_stop != TASK_STOPPED)
+		msleep(task, &task->lock, PWAIT, "kstop", hz);
+	KTHREAD_UNLOCK(task);
+	return task->task_ret;
+}
+
+static inline void
+kthread_bind(struct task_struct *task, unsigned int cpu)
+{
+	/* FIXME */
+	rtems_id task_id = rtems_bsd_get_task_id(task->task_thread);
+	rtems_id sched_id;
+	rtems_status_code sc;
+	rtems_task_priority prio;
+
+	sc = rtems_scheduler_ident(cpu, &sched_id);
+	if (sc != RTEMS_SUCCESSFUL)
+		panic("kthread_bind: scheduler ident");
+
+	sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
+	if (sc != RTEMS_SUCCESSFUL)
+		panic("kthread_bind: get priority");
+
+	sc = rtems_task_set_scheduler(task_id, sched_id, prio);
+	if (sc != RTEMS_SUCCESSFUL)
+		panic("kthread_bind: set scheduler");
+}
+
+#endif	/* _LINUX_KTHREAD_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/list.h b/rtemsbsd/powerpc/include/linux/list.h
new file mode 100644
index 0000000..aa5b7e2
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/list.h
@@ -0,0 +1,434 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _LINUX_LIST_H_
+#define _LINUX_LIST_H_
+
+/*
+ * Since LIST_HEAD conflicts with the linux definition we must include any
+ * FreeBSD header which requires it here so it is resolved with the correct
+ * definition prior to the undef.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/cpuset.h>
+#include <sys/jail.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/conf.h>
+#include <sys/socket.h>
+#include <sys/mbuf.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_types.h>
+#include <net/if_media.h>
+#include <net/vnet.h>
+
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+
+#include <netinet6/in6_var.h>
+#include <netinet6/nd6.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+
+#define	prefetch(x)
+
+struct list_head {
+	struct list_head *next;
+	struct list_head *prev;
+};
+
+static inline void
+INIT_LIST_HEAD(struct list_head *list)
+{
+
+	list->next = list->prev = list;
+}
+ 
+static inline int
+list_empty(const struct list_head *head)
+{
+
+	return (head->next == head);
+}
+
+static inline void
+list_del(struct list_head *entry)
+{
+
+	entry->next->prev = entry->prev;
+	entry->prev->next = entry->next;
+}
+
+static inline void
+list_replace(struct list_head *old, struct list_head *new)
+{
+	new->next = old->next;
+	new->next->prev = new;
+	new->prev = old->prev;
+	new->prev->next = new;
+}
+
+static inline void
+_list_add(struct list_head *new, struct list_head *prev,
+    struct list_head *next)
+{
+
+	next->prev = new;
+	new->next = next;
+	new->prev = prev;
+	prev->next = new;
+}
+
+static inline void
+list_del_init(struct list_head *entry)
+{	
+
+	list_del(entry);
+	INIT_LIST_HEAD(entry);
+}
+
+#define	list_entry(ptr, type, field)	container_of(ptr, type, field)
+
+#define list_first_entry(ptr, type, member) \
+        list_entry((ptr)->next, type, member)
+
+#define	list_next_entry(ptr, member)					\
+	list_entry(((ptr)->member.next), typeof(*(ptr)), member)
+
+#define	list_for_each(p, head)						\
+	for (p = (head)->next; p != (head); p = p->next)
+
+#define	list_for_each_safe(p, n, head)					\
+	for (p = (head)->next, n = p->next; p != (head); p = n, n = p->next)
+
+#define list_for_each_entry(p, h, field)				\
+	for (p = list_entry((h)->next, typeof(*p), field); &p->field != (h); \
+	    p = list_entry(p->field.next, typeof(*p), field))
+
+#define list_for_each_entry_safe(p, n, h, field)			\
+	for (p = list_entry((h)->next, typeof(*p), field), 		\
+	    n = list_entry(p->field.next, typeof(*p), field); &p->field != (h);\
+	    p = n, n = list_entry(n->field.next, typeof(*n), field))
+
+#define	list_for_each_entry_continue(p, h, field)			\
+	for (p = list_next_entry((p), field); &p->field != (h);		\
+	    p = list_next_entry((p), field))
+
+#define	list_for_each_entry_safe_from(pos, n, head, member) 			\
+	for (n = list_entry(pos->member.next, typeof(*pos), member);		\
+	     &pos->member != (head);						\
+	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+#define	list_for_each_entry_reverse(p, h, field)			\
+	for (p = list_entry((h)->prev, typeof(*p), field); &p->field != (h); \
+	    p = list_entry(p->field.prev, typeof(*p), field))
+
+#define	list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = p->prev)
+
+static inline void
+list_add(struct list_head *new, struct list_head *head)
+{
+
+	_list_add(new, head, head->next);
+}
+
+static inline void
+list_add_tail(struct list_head *new, struct list_head *head)
+{
+
+	_list_add(new, head->prev, head);
+}
+
+static inline void
+list_move(struct list_head *list, struct list_head *head)
+{
+
+	list_del(list);
+	list_add(list, head);
+}
+
+static inline void
+list_move_tail(struct list_head *entry, struct list_head *head)
+{
+
+	list_del(entry);
+	list_add_tail(entry, head);
+}
+
+static inline void
+_list_splice(const struct list_head *list, struct list_head *prev,  
+    struct list_head *next)
+{
+	struct list_head *first;
+	struct list_head *last;
+
+	if (list_empty(list))
+		return;
+	first = list->next;
+	last = list->prev;
+	first->prev = prev;
+	prev->next = first;
+	last->next = next;
+	next->prev = last;
+}
+
+static inline void
+list_splice(const struct list_head *list, struct list_head *head)
+{
+
+	_list_splice(list, head, head->next);
+} 
+
+static inline void
+list_splice_tail(struct list_head *list, struct list_head *head)
+{
+
+	_list_splice(list, head->prev, head);
+}
+ 
+static inline void
+list_splice_init(struct list_head *list, struct list_head *head)
+{
+
+	_list_splice(list, head, head->next);
+	INIT_LIST_HEAD(list);   
+}
+ 
+static inline void
+list_splice_tail_init(struct list_head *list, struct list_head *head)
+{
+
+	_list_splice(list, head->prev, head);
+	INIT_LIST_HEAD(list);
+}
+
+#undef LIST_HEAD
+#define LIST_HEAD(name)	struct list_head name = { &(name), &(name) }
+
+
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
+#define	HLIST_HEAD_INIT { }
+#define	HLIST_HEAD(name) struct hlist_head name = HLIST_HEAD_INIT
+#define	INIT_HLIST_HEAD(head) (head)->first = NULL
+#define	INIT_HLIST_NODE(node)						\
+do {									\
+	(node)->next = NULL;						\
+	(node)->pprev = NULL;						\
+} while (0)
+
+static inline int
+hlist_unhashed(const struct hlist_node *h)
+{
+
+	return !h->pprev;
+}
+
+static inline int
+hlist_empty(const struct hlist_head *h)
+{
+
+	return !h->first;
+}
+
+static inline void
+hlist_del(struct hlist_node *n)
+{
+
+        if (n->next)
+                n->next->pprev = n->pprev;
+        *n->pprev = n->next;
+}
+
+static inline void
+hlist_del_init(struct hlist_node *n)
+{
+
+	if (hlist_unhashed(n))
+		return;
+	hlist_del(n);
+	INIT_HLIST_NODE(n);
+}
+
+static inline void
+hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+
+	n->next = h->first;
+	if (h->first)
+		h->first->pprev = &n->next;
+	h->first = n;
+	n->pprev = &h->first;
+}
+
+static inline void
+hlist_add_before(struct hlist_node *n, struct hlist_node *next)
+{
+
+	n->pprev = next->pprev;
+	n->next = next;
+	next->pprev = &n->next;
+	*(n->pprev) = n;
+}
+ 
+static inline void
+hlist_add_after(struct hlist_node *n, struct hlist_node *next)
+{
+
+	next->next = n->next;
+	n->next = next;
+	next->pprev = &n->next;
+	if (next->next)
+		next->next->pprev = &next->next;
+}
+ 
+static inline void
+hlist_move_list(struct hlist_head *old, struct hlist_head *new)
+{
+
+	new->first = old->first;
+	if (new->first)
+		new->first->pprev = &new->first;
+	old->first = NULL;
+}
+
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+	return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+		struct list_head *head, struct list_head *entry)
+{
+	struct list_head *new_first = entry->next;
+	list->next = head->next;
+	list->next->prev = list;
+	list->prev = entry;
+	entry->next = list;
+	head->next = new_first;
+	new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *	and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+		struct list_head *head, struct list_head *entry)
+{
+	if (list_empty(head))
+		return;
+	if (list_is_singular(head) &&
+		(head->next != entry && head != entry))
+		return;
+	if (entry == head)
+		INIT_LIST_HEAD(list);
+	else
+		__list_cut_position(list, head, entry);
+}
+
+/**
+ *  list_is_last - tests whether @list is the last entry in list @head
+ *   @list: the entry to test
+ *    @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+                                const struct list_head *head)
+{
+        return list->next == head;
+}
+ 
+#define	hlist_entry(ptr, type, field)	container_of(ptr, type, field)
+
+#define	hlist_for_each(p, head)						\
+	for (p = (head)->first; p; p = p->next)
+
+#define	hlist_for_each_safe(p, n, head)					\
+	for (p = (head)->first; p && ({ n = p->next; 1; }); p = n)
+
+#define	hlist_entry_safe(ptr, type, member) \
+	((ptr) ? hlist_entry(ptr, type, member) : NULL)
+
+#define	hlist_for_each_entry(pos, head, member)				\
+	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+	     pos;							\
+	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#define hlist_for_each_entry_continue(tp, p, field)			\
+	for (p = (p)->next;						\
+	    p ? (tp = hlist_entry(p, typeof(*tp), field)): NULL; p = p->next)
+
+#define	hlist_for_each_entry_from(tp, p, field)				\
+	for (; p ? (tp = hlist_entry(p, typeof(*tp), field)): NULL; p = p->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) 		 \
+	for (pos = (head)->first;					 \
+	     (pos) != 0 && ({ n = (pos)->next; \
+		 tpos = hlist_entry((pos), typeof(*(tpos)), member); 1;}); \
+	     pos = (n))
+
+#define	hlist_add_head_rcu(n, h)	hlist_add_head(n, h)
+
+#define	hlist_del_init_rcu(n)		hlist_del_init(n)
+
+#endif /* _LINUX_LIST_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/log2.h b/rtemsbsd/powerpc/include/linux/log2.h
new file mode 100644
index 0000000..a44c560
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/log2.h
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_LOG2_H_
+#define	_LINUX_LOG2_H_
+
+#include <linux/types.h>
+
+#include <sys/libkern.h>
+
+static inline unsigned long
+roundup_pow_of_two(unsigned long x)
+{
+	return (1UL << flsl(x - 1));
+}
+
+static inline int
+is_power_of_2(unsigned long n)
+{
+	return (n == roundup_pow_of_two(n));
+}
+
+static inline unsigned long
+rounddown_pow_of_two(unsigned long x)
+{
+        return (1UL << (flsl(x) - 1));
+}
+
+#define	ilog2(n)				\
+(						\
+	__builtin_constant_p(n) ? (		\
+		(n) < 1 ? -1 :			\
+		(n) & (1ULL << 63) ? 63 :	\
+		(n) & (1ULL << 62) ? 62 :	\
+		(n) & (1ULL << 61) ? 61 :	\
+		(n) & (1ULL << 60) ? 60 :	\
+		(n) & (1ULL << 59) ? 59 :	\
+		(n) & (1ULL << 58) ? 58 :	\
+		(n) & (1ULL << 57) ? 57 :	\
+		(n) & (1ULL << 56) ? 56 :	\
+		(n) & (1ULL << 55) ? 55 :	\
+		(n) & (1ULL << 54) ? 54 :	\
+		(n) & (1ULL << 53) ? 53 :	\
+		(n) & (1ULL << 52) ? 52 :	\
+		(n) & (1ULL << 51) ? 51 :	\
+		(n) & (1ULL << 50) ? 50 :	\
+		(n) & (1ULL << 49) ? 49 :	\
+		(n) & (1ULL << 48) ? 48 :	\
+		(n) & (1ULL << 47) ? 47 :	\
+		(n) & (1ULL << 46) ? 46 :	\
+		(n) & (1ULL << 45) ? 45 :	\
+		(n) & (1ULL << 44) ? 44 :	\
+		(n) & (1ULL << 43) ? 43 :	\
+		(n) & (1ULL << 42) ? 42 :	\
+		(n) & (1ULL << 41) ? 41 :	\
+		(n) & (1ULL << 40) ? 40 :	\
+		(n) & (1ULL << 39) ? 39 :	\
+		(n) & (1ULL << 38) ? 38 :	\
+		(n) & (1ULL << 37) ? 37 :	\
+		(n) & (1ULL << 36) ? 36 :	\
+		(n) & (1ULL << 35) ? 35 :	\
+		(n) & (1ULL << 34) ? 34 :	\
+		(n) & (1ULL << 33) ? 33 :	\
+		(n) & (1ULL << 32) ? 32 :	\
+		(n) & (1ULL << 31) ? 31 :	\
+		(n) & (1ULL << 30) ? 30 :	\
+		(n) & (1ULL << 29) ? 29 :	\
+		(n) & (1ULL << 28) ? 28 :	\
+		(n) & (1ULL << 27) ? 27 :	\
+		(n) & (1ULL << 26) ? 26 :	\
+		(n) & (1ULL << 25) ? 25 :	\
+		(n) & (1ULL << 24) ? 24 :	\
+		(n) & (1ULL << 23) ? 23 :	\
+		(n) & (1ULL << 22) ? 22 :	\
+		(n) & (1ULL << 21) ? 21 :	\
+		(n) & (1ULL << 20) ? 20 :	\
+		(n) & (1ULL << 19) ? 19 :	\
+		(n) & (1ULL << 18) ? 18 :	\
+		(n) & (1ULL << 17) ? 17 :	\
+		(n) & (1ULL << 16) ? 16 :	\
+		(n) & (1ULL << 15) ? 15 :	\
+		(n) & (1ULL << 14) ? 14 :	\
+		(n) & (1ULL << 13) ? 13 :	\
+		(n) & (1ULL << 12) ? 12 :	\
+		(n) & (1ULL << 11) ? 11 :	\
+		(n) & (1ULL << 10) ? 10 :	\
+		(n) & (1ULL <<  9) ?  9 :	\
+		(n) & (1ULL <<  8) ?  8 :	\
+		(n) & (1ULL <<  7) ?  7 :	\
+		(n) & (1ULL <<  6) ?  6 :	\
+		(n) & (1ULL <<  5) ?  5 :	\
+		(n) & (1ULL <<  4) ?  4 :	\
+		(n) & (1ULL <<  3) ?  3 :	\
+		(n) & (1ULL <<  2) ?  2 :	\
+		(n) & (1ULL <<  1) ?  1 :	\
+		(n) & (1ULL <<  0) ?  0 :	\
+		-1) :				\
+	(sizeof(n) <= 4) ?			\
+	fls((u32)(n)) - 1 : flsll((u64)(n)) - 1	\
+)
+
+#define	order_base_2(x) ilog2(roundup_pow_of_two(x))
+
+#endif	/* _LINUX_LOG2_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/module.h b/rtemsbsd/powerpc/include/linux/module.h
new file mode 100644
index 0000000..3b21b4f
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/module.h
@@ -0,0 +1,102 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_MODULE_H_
+#define	_LINUX_MODULE_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/module.h>
+
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/kobject.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#define MODULE_AUTHOR(name)
+#define MODULE_DESCRIPTION(name)
+#define MODULE_LICENSE(name)
+
+#define	THIS_MODULE	((struct module *)0)
+
+#define	EXPORT_SYMBOL(name)
+#define	EXPORT_SYMBOL_GPL(name)
+
+/* OFED pre-module initialization */
+#define	SI_SUB_OFED_PREINIT	(SI_SUB_ROOT_CONF - 2)
+/* OFED default module initialization */
+#define	SI_SUB_OFED_MODINIT	(SI_SUB_ROOT_CONF - 1)
+
+#include <sys/linker.h>
+
+static inline void
+_module_run(void *arg)
+{
+	void (*fn)(void);
+#ifdef OFED_DEBUG_INIT
+	char name[1024];
+	caddr_t pc;
+	long offset;
+
+	pc = (caddr_t)arg;
+	if (linker_search_symbol_name(pc, name, sizeof(name), &offset) != 0)
+		printf("Running ??? (%p)\n", pc);
+	else
+		printf("Running %s (%p)\n", name, pc);
+#endif
+	fn = arg;
+	DROP_GIANT();
+	fn();
+	PICKUP_GIANT();
+}
+
+#define	module_init(fn)							\
+	SYSINIT(fn, SI_SUB_OFED_MODINIT, SI_ORDER_FIRST, _module_run, (fn))
+
+#define	module_exit(fn)						\
+	SYSUNINIT(fn, SI_SUB_OFED_MODINIT, SI_ORDER_SECOND, _module_run, (fn))
+
+/*
+ * The following two macros are a workaround for not having a module
+ * load and unload order resolver:
+ */
+#define	module_init_order(fn, order)					\
+	SYSINIT(fn, SI_SUB_OFED_MODINIT, (order), _module_run, (fn))
+
+#define	module_exit_order(fn, order)				\
+	SYSUNINIT(fn, SI_SUB_OFED_MODINIT, (order), _module_run, (fn))
+
+#define	module_get(module)
+#define	module_put(module)
+#define	try_module_get(module)	1
+
+#endif	/* _LINUX_MODULE_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/moduleparam.h b/rtemsbsd/powerpc/include/linux/moduleparam.h
new file mode 100644
index 0000000..9699b33
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/moduleparam.h
@@ -0,0 +1,234 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_MODULEPARAM_H_
+#define	_LINUX_MODULEPARAM_H_
+
+#include <linux/types.h>
+
+/*
+ * These are presently not hooked up to anything.  In linux the parameters
+ * can be set when modules are loaded.  On FreeBSD these could be mapped
+ * to kenv in the future.
+ */
+struct kernel_param;
+
+typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
+typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+
+struct kernel_param {
+	const char	*name;
+	u16		perm;
+	u16		flags;
+	param_set_fn	set;
+	param_get_fn	get;
+	union {
+		void	*arg;
+		struct kparam_string	*str;
+		struct kparam_array	*arr;
+	} un;
+};
+
+#define	KPARAM_ISBOOL	2
+
+struct kparam_string {
+	unsigned int maxlen;
+	char *string;
+};
+
+struct kparam_array
+{
+	unsigned int	max;
+	unsigned int	*num;
+	param_set_fn	set;
+	param_get_fn	get;
+	unsigned int	elemsize;
+	void 		*elem;
+};
+
+static inline void
+param_sysinit(struct kernel_param *param)
+{
+}
+
+#define	module_param_call(name, set, get, arg, perm)			\
+	static struct kernel_param __param_##name =			\
+	    { #name, perm, 0, set, get, { arg } };			\
+	SYSINIT(name##_param_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST,	\
+	    param_sysinit, &__param_##name);
+
+#define	module_param_string(name, string, len, perm)
+
+#define	module_param_named(name, var, type, mode)			\
+	module_param_call(name, param_set_##type, param_get_##type, &var, mode)
+
+#define	module_param(var, type, mode)					\
+	module_param_named(var, var, type, mode)
+
+#define module_param_array(var, type, addr_argc, mode)                  \
+        module_param_named(var, var, type, mode)
+
+#define	MODULE_PARM_DESC(name, desc)
+
+static inline int
+param_set_byte(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_byte(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_short(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_short(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_ushort(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_ushort(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_int(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_int(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_uint(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_uint(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_long(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_long(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_ulong(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_ulong(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_charp(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_charp(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+
+static inline int
+param_set_bool(const char *val, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+static inline int
+param_get_bool(char *buffer, struct kernel_param *kp)
+{
+
+	return 0;
+}
+
+#endif	/* _LINUX_MODULEPARAM_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/netdevice.h b/rtemsbsd/powerpc/include/linux/netdevice.h
new file mode 100644
index 0000000..b96e2d9
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/netdevice.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct ifnet;
+
+struct net_device {
+	void *priv;
+	struct ifnet *ifp;
+};
+
+static inline void *
+netdev_priv(struct net_device *net_dev)
+{
+
+	return (net_dev->priv);
+}
+
+#define	netif_err(...)	do { } while (0)
+#define	netif_dbg(...)	do { } while (0)
+#define	netdev_err(...)	do { } while (0)
+#define	netdev_dbg(...)	do { } while (0)
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_NETDEVICE_H */
diff --git a/rtemsbsd/powerpc/include/linux/of.h b/rtemsbsd/powerpc/include/linux/of.h
new file mode 100644
index 0000000..85a8887
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/of.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_OF_H
+#define	_LINUX_OF_H
+
+#include <stdbool.h>
+
+#include <libfdt.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef uint32_t phandle;
+
+struct device_node {
+	int offset;
+	const char *full_name;
+};
+
+static inline struct device_node *
+of_node_get(struct device_node *dn)
+{
+
+	return (dn);
+}
+
+static inline void
+of_node_put(struct device_node *dn)
+{
+
+	(void)dn;
+}
+
+const void *of_get_property(const struct device_node *dn, const char *name,
+    int *len);
+
+bool of_device_is_available(const struct device_node *dn);
+
+int of_device_is_compatible(const struct device_node *dn, const char *name);
+
+struct device_node *of_find_compatible_node(struct device_node *dns,
+    const struct device_node *dn, const char *type, const char *compatible);
+
+#define for_each_compatible_node(dn, type, compatible) \
+    for (dn = of_find_compatible_node(&of_dns, NULL, type, compatible); \
+    dn != NULL; dn = of_find_compatible_node(&of_dns, dn, type, compatible))
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_OF_H */
diff --git a/rtemsbsd/powerpc/include/linux/of_address.h b/rtemsbsd/powerpc/include/linux/of_address.h
new file mode 100644
index 0000000..928b7e5
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/of_address.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_OF_ADDRESS_H
+#define	_LINUX_OF_ADDRESS_H
+
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+int of_address_to_resource(struct device_node *dn, int index,
+    struct resource *res);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_OF_ADDRESS_H */
diff --git a/rtemsbsd/powerpc/include/linux/of_irq.h b/rtemsbsd/powerpc/include/linux/of_irq.h
new file mode 100644
index 0000000..8802258
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/of_irq.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_OF_IRQ_H
+#define	_LINUX_OF_IRQ_H
+
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+int of_irq_to_resource(struct device_node *dn, int index,
+    struct resource *res);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_OF_IRQ_H */
diff --git a/rtemsbsd/powerpc/include/linux/of_mdio.h b/rtemsbsd/powerpc/include/linux/of_mdio.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/of_net.h b/rtemsbsd/powerpc/include/linux/of_net.h
new file mode 100644
index 0000000..92ba55e
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/of_net.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_OF_NET_H
+#define	_LINUX_OF_NET_H
+
+#include <linux/of.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+const void *of_get_mac_address(struct device_node *dn);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_OF_NET_H */
diff --git a/rtemsbsd/powerpc/include/linux/of_platform.h b/rtemsbsd/powerpc/include/linux/of_platform.h
new file mode 100644
index 0000000..25dcd6f
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/of_platform.h
@@ -0,0 +1 @@
+#include <linux/platform_device.h>
diff --git a/rtemsbsd/powerpc/include/linux/of_reserved_mem.h b/rtemsbsd/powerpc/include/linux/of_reserved_mem.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/page.h b/rtemsbsd/powerpc/include/linux/page.h
new file mode 100644
index 0000000..acc9f03
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/page.h
@@ -0,0 +1,53 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_PAGE_H_
+#define _LINUX_PAGE_H_
+
+#include <linux/types.h>
+
+#include <sys/param.h>
+
+#include <machine/atomic.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#define page	vm_page
+
+#define	virt_to_page(x)	PHYS_TO_VM_PAGE(vtophys((x)))
+
+#define	clear_page(page)		memset((page), 0, PAGE_SIZE)
+#define	pgprot_noncached(prot)		VM_MEMATTR_UNCACHEABLE
+#define	pgprot_writecombine(prot)	VM_MEMATTR_WRITE_COMBINING
+
+#undef	PAGE_MASK
+#define	PAGE_MASK	(~(PAGE_SIZE-1))
+
+#endif	/* _LINUX_PAGE_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/percpu.h b/rtemsbsd/powerpc/include/linux/percpu.h
new file mode 100644
index 0000000..73b15a1
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/percpu.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_PERCPU_H
+#define	_LINUX_PERCPU_H
+
+#include <linux/threads.h>
+
+#include <rtems/score/threaddispatch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define	DEFINE_PER_CPU(_type, _designator) \
+    _type _designator[NR_CPUS]
+
+#define per_cpu(_designator, _cpu) \
+    (_designator[_cpu])
+
+#define this_cpu_ptr(_ptr_designator) \
+    (&(*_ptr_designator)[_CPU_SMP_Get_current_processor()])
+
+#define get_cpu_var(_designator) \
+    (*({ Per_CPU_Control *_cpu_self = _Thread_Dispatch_disable(); \
+    &_designator[_Per_CPU_Get_index(_cpu_self)]; }))
+
+#define put_cpu_var(_designator) \
+    _Thread_Dispatch_enable(_Per_CPU_Get())
+
+#define	per_cpu_ptr(_ptr, _index) \
+    ((_ptr) + (_index))
+
+#define	raw_cpu_ptr(_ptr) \
+    per_cpu_ptr(_ptr, _CPU_SMP_Get_current_processor())
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_PERCPU_H */
diff --git a/rtemsbsd/powerpc/include/linux/phy.h b/rtemsbsd/powerpc/include/linux/phy.h
new file mode 100644
index 0000000..1ce8965
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/phy.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_PHY_H
+#define	_LINUX_PHY_H
+
+#include <linux/list.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef enum {
+	PHY_INTERFACE_MODE_NA,
+	PHY_INTERFACE_MODE_MII,
+	PHY_INTERFACE_MODE_GMII,
+	PHY_INTERFACE_MODE_SGMII,
+	PHY_INTERFACE_MODE_TBI,
+	PHY_INTERFACE_MODE_REVMII,
+	PHY_INTERFACE_MODE_RMII,
+	PHY_INTERFACE_MODE_RGMII,
+	PHY_INTERFACE_MODE_RGMII_ID,
+	PHY_INTERFACE_MODE_RGMII_RXID,
+	PHY_INTERFACE_MODE_RGMII_TXID,
+	PHY_INTERFACE_MODE_RTBI,
+	PHY_INTERFACE_MODE_SMII,
+	PHY_INTERFACE_MODE_XGMII,
+	PHY_INTERFACE_MODE_MOCA,
+	PHY_INTERFACE_MODE_QSGMII,
+	PHY_INTERFACE_MODE_MAX
+} phy_interface_t;
+
+#define	SPEED_10 10
+#define	SPEED_100 100
+#define	SPEED_1000 1000
+#define	SPEED_2500 2500
+#define	SPEED_5000 5000
+#define	SPEED_10000 10000
+#define	SPEED_20000 20000
+#define	SPEED_25000 25000
+#define	SPEED_40000 40000
+#define	SPEED_50000 50000
+#define	SPEED_56000 56000
+#define	SPEED_100000 100000
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_PHY_H */
diff --git a/rtemsbsd/powerpc/include/linux/phy_fixed.h b/rtemsbsd/powerpc/include/linux/phy_fixed.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/platform_device.h b/rtemsbsd/powerpc/include/linux/platform_device.h
new file mode 100644
index 0000000..d6374d4
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/platform_device.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_PLATFORM_DEVICE_H
+#define	_LINUX_PLATFORM_DEVICE_H
+
+#include <linux/device.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct platform_device {
+	struct device dev;
+	void *platform_data;
+};
+
+struct resource *platform_get_resource_impl(struct platform_device *dev,
+    unsigned int type, unsigned int num, struct resource *res);
+
+#define	platform_get_resource(dev, type, num) \
+    platform_get_resource_impl(dev, type, num, &platform_resource)
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_PLATFORM_DEVICE_H */
diff --git a/rtemsbsd/powerpc/include/linux/rbtree.h b/rtemsbsd/powerpc/include/linux/rbtree.h
new file mode 100644
index 0000000..bfe28b6
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/rbtree.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2013 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _LINUX_RBTREE_H
+#define _LINUX_RBTREE_H
+
+#include <rtems/score/rbtree.h>
+
+struct rb_node {
+  struct rb_node *rb_left;
+  struct rb_node *rb_right;
+  struct rb_node *rb_parent;
+  int rb_color;
+};
+
+RTEMS_STATIC_ASSERT(
+  sizeof( struct rb_node ) == sizeof( RBTree_Node ),
+  rb_node_size
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( struct rb_node, rb_left ) == offsetof( RBTree_Node, Node.rbe_left ),
+  rb_node_left
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( struct rb_node, rb_right ) == offsetof( RBTree_Node, Node.rbe_right ),
+  rb_node_right
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( struct rb_node, rb_parent ) == offsetof( RBTree_Node, Node.rbe_parent ),
+  rb_node_parent
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( struct rb_node, rb_color ) == offsetof( RBTree_Node, Node.rbe_color ),
+  rb_node_color
+);
+
+struct rb_root {
+  struct rb_node *rb_node;
+};
+
+RTEMS_STATIC_ASSERT(
+  sizeof( struct rb_root ) == sizeof( RBTree_Control ),
+  rb_root_size
+);
+
+RTEMS_STATIC_ASSERT(
+  offsetof( struct rb_root, rb_node ) == offsetof( RBTree_Control, rbh_root ),
+  rb_root_node
+);
+
+#undef RB_ROOT
+#define RB_ROOT ( (struct rb_root) { NULL } )
+
+#define rb_entry( p, container, field ) RTEMS_CONTAINER_OF( p, container, field )
+
+static inline void rb_insert_color( struct rb_node *node, struct rb_root *root)
+{
+  _RBTree_Insert_color( (RBTree_Control *) root, (RBTree_Node *) node );
+}
+
+static inline void rb_erase( struct rb_node *node, struct rb_root *root )
+{
+  _RBTree_Extract( (RBTree_Control *) root, (RBTree_Node *) node );
+}
+
+static inline struct rb_node *rb_next( struct rb_node *node )
+{
+  return (struct rb_node *) _RBTree_Successor( (RBTree_Node *) node );
+}
+
+static inline struct rb_node *rb_prev( struct rb_node *node )
+{
+  return (struct rb_node *) _RBTree_Predecessor( (RBTree_Node *) node );
+}
+
+static inline struct rb_node *rb_first( struct rb_root *root )
+{
+  return (struct rb_node *) _RBTree_Minimum( (RBTree_Control *) root );
+}
+
+static inline struct rb_node *rb_last( struct rb_root *root )
+{
+  return (struct rb_node *) _RBTree_Maximum( (RBTree_Control *) root );
+}
+
+static inline void rb_replace_node(
+  struct rb_node *victim,
+  struct rb_node *replacement, 
+  struct rb_root *root
+)
+{
+  _RBTree_Replace_node(
+    (RBTree_Control *) root,
+    (RBTree_Node *) victim,
+    (RBTree_Node *) replacement
+  );
+}
+
+static inline void rb_link_node(
+  struct rb_node *node,
+  struct rb_node *parent,
+  struct rb_node **link
+)
+{
+  _RBTree_Initialize_node( (RBTree_Node *) node );
+  _RBTree_Add_child(
+    (RBTree_Node *) node,
+    (RBTree_Node *) parent,
+    (RBTree_Node **) link
+  );
+}
+
+static inline struct rb_node *rb_parent( struct rb_node *node )
+{
+  return (struct rb_node *) _RBTree_Parent( (RBTree_Node *) node );
+}
+
+#endif /* _LINUX_RBTREE_H */
diff --git a/rtemsbsd/powerpc/include/linux/rwlock.h b/rtemsbsd/powerpc/include/linux/rwlock.h
new file mode 100644
index 0000000..e7c6301
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/rwlock.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_RWLOCK_H_
+#define	_LINUX_RWLOCK_H_
+
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+
+typedef struct {
+	struct rwlock rw;
+} rwlock_t;
+
+#define	read_lock(_l)		rw_rlock(&(_l)->rw)
+#define	write_lock(_l)		rw_wlock(&(_l)->rw)
+#define	read_unlock(_l)		rw_runlock(&(_l)->rw)
+#define	write_unlock(_l)	rw_wunlock(&(_l)->rw)
+#define	read_lock_irq(lock)	read_lock((lock))
+#define	read_unlock_irq(lock)	read_unlock((lock))
+#define	write_lock_irq(lock)	write_lock((lock))
+#define	write_unlock_irq(lock)	write_unlock((lock))
+#define	read_lock_irqsave(lock, flags)   				\
+    do {(flags) = 0; read_lock(lock); } while (0)
+#define	write_lock_irqsave(lock, flags)   				\
+    do {(flags) = 0; write_lock(lock); } while (0)
+#define	read_unlock_irqrestore(lock, flags)				\
+    do { read_unlock(lock); } while (0)
+#define	write_unlock_irqrestore(lock, flags)				\
+    do { write_unlock(lock); } while (0)
+
+static inline void
+rwlock_init(rwlock_t *lock)
+{
+
+	memset(&lock->rw, 0, sizeof(lock->rw));
+	rw_init_flags(&lock->rw, "lnxrw", RW_NOWITNESS);
+}
+
+#endif	/* _LINUX_RWLOCK_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/sched.h b/rtemsbsd/powerpc/include/linux/sched.h
new file mode 100644
index 0000000..788ad61
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/sched.h
@@ -0,0 +1,125 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_SCHED_H_
+#define	_LINUX_SCHED_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/sleepqueue.h>
+
+#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
+
+#define	TASK_DORMANT		0
+#define	TASK_RUNNING		1
+#define	TASK_INTERRUPTIBLE	2
+#define	TASK_UNINTERRUPTIBLE	4
+#define	TASK_DEAD		64
+#define	TASK_WAKEKILL		128
+#define	TASK_WAKING		256
+
+#define	TASK_SHOULD_STOP	1
+#define	TASK_STOPPED		2
+
+/*
+ * A task_struct is only provided for those tasks created with kthread.
+ * Using these routines with threads not started via kthread will cause
+ * panics because no task_struct is allocated and td_retval[1] is
+ * overwritten by syscalls which kernel threads will not make use of.
+ */
+struct task_struct {
+	struct	thread *task_thread;
+	int	(*task_fn)(void *data);
+	void	*task_data;
+	int	task_ret;
+	int	state;
+	int	should_stop;
+	struct mtx lock;
+};
+
+#define	current			((struct task_struct *)curthread->td_retval[1])
+#define	task_struct_get(x)	(struct task_struct *)(x)->td_retval[1]
+#define	task_struct_set(x, y)	(x)->td_retval[1] = (register_t)(y)
+
+#define	set_current_state(x)						\
+	atomic_store_rel_int((volatile int *)&current->state, (x))
+#define	__set_current_state(x)	current->state = (x)
+
+
+#define	schedule()							\
+do {									\
+	void *c;							\
+									\
+	if (cold)							\
+		break;							\
+	c = curthread;							\
+	sleepq_lock(c);							\
+	if (current->state == TASK_INTERRUPTIBLE ||			\
+	    current->state == TASK_UNINTERRUPTIBLE) {			\
+		sleepq_add(c, NULL, "task", SLEEPQ_SLEEP, 0);		\
+		sleepq_wait(c, 0);					\
+	} else {							\
+		sleepq_release(c);					\
+		sched_relinquish(curthread);				\
+	}								\
+} while (0)
+
+#define	wake_up_process(x)						\
+do {									\
+	int wakeup_swapper;						\
+	void *c;							\
+									\
+	c = (x)->task_thread;						\
+	sleepq_lock(c);							\
+	(x)->state = TASK_RUNNING;					\
+	wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);		\
+	sleepq_release(c);						\
+	if (wakeup_swapper)						\
+		kick_proc0();						\
+} while (0)
+
+#define	cond_resched()	if (!cold)	sched_relinquish(curthread)
+
+static inline long
+schedule_timeout(signed long timeout)
+{
+	if (timeout < 0)
+		return 0;
+
+	pause("lstim", timeout);
+
+	return 0;
+}
+
+#endif	/* _LINUX_SCHED_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/slab.h b/rtemsbsd/powerpc/include/linux/slab.h
new file mode 100644
index 0000000..8455dc3
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/slab.h
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_SLAB_H_
+#define	_LINUX_SLAB_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <vm/uma.h>
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+
+MALLOC_DECLARE(M_KMALLOC);
+
+#define	kmalloc(size, flags)		malloc((size), M_KMALLOC, (flags))
+#define	kmalloc_array(size, n, flags)	malloc((size) * (n), M_KMALLOC, (flags))
+#define	kvmalloc(size)			kmalloc((size), 0)
+#define	kzalloc(size, flags)		kmalloc((size), (flags) | M_ZERO)
+#define	kzalloc_node(size, flags, node)	kzalloc(size, flags)
+#define	kfree(ptr)			free(__DECONST(void *, (ptr)), M_KMALLOC)
+#define	krealloc(ptr, size, flags)	realloc((ptr), (size), M_KMALLOC, (flags))
+#define	kcalloc(n, size, flags)	        kmalloc((n) * (size), flags | M_ZERO)
+#define	vzalloc(size)			kzalloc(size, GFP_KERNEL | __GFP_NOWARN)
+#define	vfree(arg)			kfree(arg)
+#define	kvfree(arg)			kfree(arg)
+#define	vmalloc(size)                   kmalloc(size, GFP_KERNEL)
+#define	vmalloc_node(size, node)        kmalloc(size, GFP_KERNEL)
+
+struct kmem_cache {
+	uma_zone_t	cache_zone;
+	void		(*cache_ctor)(void *);
+};
+
+#define	SLAB_HWCACHE_ALIGN	0x0001
+
+static inline int
+kmem_ctor(void *mem, int size, void *arg, int flags)
+{
+	void (*ctor)(void *);
+
+	ctor = arg;
+	ctor(mem);
+
+	return (0);
+}
+
+static inline struct kmem_cache *
+kmem_cache_create(char *name, size_t size, size_t align, u_long flags,
+    void (*ctor)(void *))
+{
+	struct kmem_cache *c;
+
+	c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
+	if (align)
+		align--;
+	if (flags & SLAB_HWCACHE_ALIGN)
+		align = UMA_ALIGN_CACHE;
+	c->cache_zone = uma_zcreate(name, size, ctor ? kmem_ctor : NULL,
+	    NULL, NULL, NULL, align, 0);
+	c->cache_ctor = ctor;
+
+	return c;
+}
+
+static inline void *
+kmem_cache_alloc(struct kmem_cache *c, int flags)
+{
+	return uma_zalloc_arg(c->cache_zone, c->cache_ctor, flags);
+}
+
+static inline void
+kmem_cache_free(struct kmem_cache *c, void *m)
+{
+	uma_zfree(c->cache_zone, m);
+}
+
+static inline void
+kmem_cache_destroy(struct kmem_cache *c)
+{
+	uma_zdestroy(c->cache_zone);
+	free(c, M_KMALLOC);
+}
+
+#endif	/* _LINUX_SLAB_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/smp.h b/rtemsbsd/powerpc/include/linux/smp.h
new file mode 100644
index 0000000..a9fac70
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/smp.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_SMP_H
+#define	_LINUX_SMP_H
+
+#include <rtems.h>
+#include <rtems/score/smpimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define smp_processor_id() ((int)rtems_get_current_processor())
+
+typedef void (*smp_call_func_t)(void *arg);
+
+static inline int
+smp_call_function_single(int cpu, smp_call_func_t func, void *arg, int wait)
+{
+	cpu_set_t set;
+
+	(void)wait;
+	CPU_ZERO(&set);
+	CPU_SET(cpu, &set);
+	_SMP_Multicast_action(sizeof(set), &set, func, arg);
+	return (0);
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_SMP_H */
diff --git a/rtemsbsd/powerpc/include/linux/spinlock.h b/rtemsbsd/powerpc/include/linux/spinlock.h
new file mode 100644
index 0000000..e7c9a48
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/spinlock.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_SPINLOCK_H_
+#define	_LINUX_SPINLOCK_H_
+
+#include <sys/lock.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/rwlock.h>
+
+typedef struct _Mutex_Control spinlock_t;
+
+#define	spin_lock_init(_l) _Mutex_Initialize(_l)
+#define	spin_lock(_l) _Mutex_Acquire(_l)
+#define	spin_unlock(_l) _Mutex_Release(_l)
+#define	spin_lock_irq(_l) spin_lock(_l)
+#define	spin_unlock_irq(_l) spin_unlock(_l)
+#define	spin_lock_irqsave(_l, _f) \
+    do { (void)_f; spin_lock(_l); } while (0)
+#define	spin_unlock_irqrestore(_l, _f) \
+    do { (void)_f; spin_unlock(_l); } while (0)
+
+#define	__SPIN_LOCK_UNLOCKED(_l) _MUTEX_INITIALIZER
+
+#define	DEFINE_SPINLOCK(_l) spinlock_t _l = __SPIN_LOCK_UNLOCKED(_l)
+
+typedef spinlock_t raw_spinlock_t;
+
+#define	raw_spin_lock_init(_l) spin_lock_init(_l)
+#define	raw_spin_lock_irqsave(_l, _f) spin_lock_irqsave(_l, _f)
+#define	raw_spin_unlock_irqrestore(_l, _f) spin_unlock_irqrestore(_l, _f)
+
+#define	local_irq_save(_f) rtems_interrupt_local_disable(_f)
+#define	local_irq_restore(_f) rtems_interrupt_local_enable(_f)
+
+#endif	/* _LINUX_SPINLOCK_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/sysfs.h b/rtemsbsd/powerpc/include/linux/sysfs.h
new file mode 100644
index 0000000..003e48d
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/sysfs.h
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_SYSFS_H_
+#define	_LINUX_SYSFS_H_
+
+#if 0
+#include <sys/sysctl.h>
+
+struct attribute {
+	const char 	*name;
+	struct module	*owner;
+	mode_t		mode;
+};
+
+struct sysfs_ops {
+	ssize_t (*show)(struct kobject *, struct attribute *, char *);
+	ssize_t (*store)(struct kobject *, struct attribute *, const char *,
+	    size_t);
+};
+
+struct attribute_group {
+	const char		*name;
+	mode_t                  (*is_visible)(struct kobject *,
+				    struct attribute *, int);
+	struct attribute	**attrs;
+};
+
+#define	__ATTR(_name, _mode, _show, _store) {				\
+	.attr = { .name = __stringify(_name), .mode = _mode },		\
+        .show = _show, .store  = _store,				\
+}
+
+#define	__ATTR_RO(_name) {						\
+	.attr = { .name = __stringify(_name), .mode = 0444 },		\
+	.show   = _name##_show,						\
+}
+
+#define	__ATTR_NULL	{ .attr = { .name = NULL } }
+
+/*
+ * Handle our generic '\0' terminated 'C' string.
+ * Two cases:
+ *      a variable string:  point arg1 at it, arg2 is max length.
+ *      a constant string:  point arg1 at it, arg2 is zero.
+ */
+
+static inline int
+sysctl_handle_attr(SYSCTL_HANDLER_ARGS)
+{
+	struct kobject *kobj;
+	struct attribute *attr;
+	const struct sysfs_ops *ops;
+	char *buf;
+	int error;
+	ssize_t len;
+
+	kobj = arg1;
+	attr = (struct attribute *)arg2;
+	if (kobj->ktype == NULL || kobj->ktype->sysfs_ops == NULL)
+		return (ENODEV);
+	buf = (char *)get_zeroed_page(GFP_KERNEL);
+	if (buf == NULL)
+		return (ENOMEM);
+	ops = kobj->ktype->sysfs_ops;
+	if (ops->show) {
+		len = ops->show(kobj, attr, buf);
+		/*
+		 * It's valid to not have a 'show' so just return an
+		 * empty string.
+	 	 */
+		if (len < 0) {
+			error = -len;
+			if (error != EIO)
+				goto out;
+			buf[0] = '\0';
+		} else if (len) {
+			len--;
+			if (len >= PAGE_SIZE)
+				len = PAGE_SIZE - 1;
+			/* Trim trailing newline. */
+			buf[len] = '\0';
+		}
+	}
+
+	/* Leave one trailing byte to append a newline. */
+	error = sysctl_handle_string(oidp, buf, PAGE_SIZE - 1, req);
+	if (error != 0 || req->newptr == NULL || ops->store == NULL)
+		goto out;
+	len = strlcat(buf, "\n", PAGE_SIZE);
+	KASSERT(len < PAGE_SIZE, ("new attribute truncated"));
+	len = ops->store(kobj, attr, buf, len);
+	if (len < 0)
+		error = -len;
+out:
+	free_page((unsigned long)buf);
+
+	return (error);
+}
+
+static inline int
+sysfs_create_file(struct kobject *kobj, const struct attribute *attr)
+{
+
+	sysctl_add_oid(NULL, SYSCTL_CHILDREN(kobj->oidp), OID_AUTO,
+	    attr->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, kobj,
+	    (uintptr_t)attr, sysctl_handle_attr, "A", "");
+
+	return (0);
+}
+
+static inline void
+sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
+{
+
+	if (kobj->oidp)
+		sysctl_remove_name(kobj->oidp, attr->name, 1, 1);
+}
+
+static inline void
+sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+
+	if (kobj->oidp)
+		sysctl_remove_name(kobj->oidp, grp->name, 1, 1);
+}
+
+static inline int
+sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp)
+{
+	struct attribute **attr;
+	struct sysctl_oid *oidp;
+
+	oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->oidp),
+	    OID_AUTO, grp->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, grp->name);
+	for (attr = grp->attrs; *attr != NULL; attr++) {
+		sysctl_add_oid(NULL, SYSCTL_CHILDREN(oidp), OID_AUTO,
+		    (*attr)->name, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+		    kobj, (uintptr_t)*attr, sysctl_handle_attr, "A", "");
+	}
+
+	return (0);
+}
+
+static inline int
+sysfs_create_dir(struct kobject *kobj)
+{
+
+	kobj->oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(kobj->parent->oidp),
+	    OID_AUTO, kobj->name, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, kobj->name);
+
+        return (0);
+}
+
+static inline void
+sysfs_remove_dir(struct kobject *kobj)
+{
+
+	if (kobj->oidp == NULL)
+		return;
+	sysctl_remove_oid(kobj->oidp, 1, 1);
+}
+
+#define sysfs_attr_init(attr) do {} while(0)
+#endif
+
+#endif	/* _LINUX_SYSFS_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/threads.h b/rtemsbsd/powerpc/include/linux/threads.h
new file mode 100644
index 0000000..753b93e
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/threads.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_THREADS_H
+#define	_LINUX_THREADS_H
+
+#include <rtems/score/threaddispatch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define	NR_CPUS 32
+
+#define	preempt_disable() _Thread_Dispatch_disable()
+
+#define	preempt_enable() _Thread_Dispatch_enable(_Per_CPU_Get())
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _LINUX_THREADS_H */
diff --git a/rtemsbsd/powerpc/include/linux/time.h b/rtemsbsd/powerpc/include/linux/time.h
new file mode 100644
index 0000000..27516a4
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/time.h
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2014-2015 François Tigeot
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _LINUX_TIME_H_
+#define	_LINUX_TIME_H_
+
+#define	NSEC_PER_USEC	1000L
+#define	NSEC_PER_SEC	1000000000L
+
+#include <sys/time.h>
+#include <sys/stdint.h>
+
+static inline struct timeval
+ns_to_timeval(const int64_t nsec)
+{
+	struct timeval tv;
+	long rem;
+
+	if (nsec == 0) {
+		tv.tv_sec = 0;
+		tv.tv_usec = 0;
+		return (tv);
+	}
+
+	tv.tv_sec = nsec / NSEC_PER_SEC;
+	rem = nsec % NSEC_PER_SEC;
+	if (rem < 0) {
+		tv.tv_sec--;
+		rem += NSEC_PER_SEC;
+	}
+	tv.tv_usec = rem / 1000;
+	return (tv);
+}
+
+static inline int64_t
+timeval_to_ns(const struct timeval *tv)
+{
+	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
+		tv->tv_usec * NSEC_PER_USEC;
+}
+
+#define getrawmonotonic(ts)	nanouptime(ts)
+
+static inline struct timespec
+timespec_sub(struct timespec lhs, struct timespec rhs)
+{
+	struct timespec ts;
+
+	ts.tv_sec = lhs.tv_sec;
+	ts.tv_nsec = lhs.tv_nsec;
+	timespecsub(&ts, &rhs);
+
+	return ts;
+}
+
+static inline void
+set_normalized_timespec(struct timespec *ts, time_t sec, int64_t nsec)
+{
+	/* XXX: this doesn't actually normalize anything */
+	ts->tv_sec = sec;
+	ts->tv_nsec = nsec;
+}
+
+static inline int64_t
+timespec_to_ns(const struct timespec *ts)
+{
+	return ((ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec);
+}
+
+static inline struct timespec
+ns_to_timespec(const int64_t nsec)
+{
+	struct timespec ts;
+	int32_t rem;
+
+	if (nsec == 0) {
+		ts.tv_sec = 0;
+		ts.tv_nsec = 0;
+		return (ts);
+	}
+
+	ts.tv_sec = nsec / NSEC_PER_SEC;
+	rem = nsec % NSEC_PER_SEC;
+	if (rem < 0) {
+		ts.tv_sec--;
+		rem += NSEC_PER_SEC;
+	}
+	ts.tv_nsec = rem;
+	return (ts);
+}
+
+static inline int
+timespec_valid(const struct timespec *ts)
+{
+	if (ts->tv_sec < 0 || ts->tv_sec > 100000000 ||
+	    ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
+		return (0);
+	return (1);
+}
+
+static inline unsigned long
+get_seconds(void)
+{
+	return time_uptime;
+}
+
+#endif /* _LINUX_TIME_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/timer.h b/rtemsbsd/powerpc/include/linux/timer.h
new file mode 100644
index 0000000..a794c13
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/timer.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _LINUX_TIMER_H_
+#define	_LINUX_TIMER_H_
+
+#include <linux/types.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/callout.h>
+
+struct timer_list {
+	struct callout timer_callout;
+	void    (*function) (unsigned long);
+	unsigned long data;
+	unsigned long expires;
+};
+
+extern unsigned long linux_timer_hz_mask;
+
+#define	setup_timer(timer, func, dat)					\
+do {									\
+	(timer)->function = (func);					\
+	(timer)->data = (dat);						\
+	callout_init(&(timer)->timer_callout, 1);			\
+} while (0)
+
+#define	init_timer(timer)						\
+do {									\
+	(timer)->function = NULL;					\
+	(timer)->data = 0;						\
+	callout_init(&(timer)->timer_callout, 1);			\
+} while (0)
+
+extern void mod_timer(struct timer_list *, unsigned long);
+extern void add_timer(struct timer_list *);
+
+#define	del_timer(timer)	callout_stop(&(timer)->timer_callout)
+#define	del_timer_sync(timer)	callout_drain(&(timer)->timer_callout)
+#define	timer_pending(timer)	callout_pending(&(timer)->timer_callout)
+#define	round_jiffies(j) \
+	((unsigned long)(((j) + linux_timer_hz_mask) & ~linux_timer_hz_mask))
+#define	round_jiffies_relative(j) \
+	round_jiffies(j)
+
+#endif					/* _LINUX_TIMER_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/types.h b/rtemsbsd/powerpc/include/linux/types.h
new file mode 100644
index 0000000..e52dede
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/types.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_TYPES_H_
+#define	_LINUX_TYPES_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifndef __bitwise__
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+#endif
+
+typedef uint16_t __le16;
+typedef uint16_t __be16;
+typedef uint32_t __le32;
+typedef uint32_t __be32;
+typedef uint64_t __le64;
+typedef uint64_t __be64;
+
+typedef unsigned int    uint;
+typedef unsigned gfp_t;
+typedef uint64_t loff_t;
+typedef uint64_t resource_size_t;
+
+typedef u64 phys_addr_t;
+
+#define	DECLARE_BITMAP(n, bits)						\
+	unsigned long n[howmany(bits, sizeof(long) * 8)]
+
+#endif	/* _LINUX_TYPES_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/uaccess.h b/rtemsbsd/powerpc/include/linux/uaccess.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/vmalloc.h b/rtemsbsd/powerpc/include/linux/vmalloc.h
new file mode 100644
index 0000000..e69de29
diff --git a/rtemsbsd/powerpc/include/linux/wait.h b/rtemsbsd/powerpc/include/linux/wait.h
new file mode 100644
index 0000000..a97f980
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/wait.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_WAIT_H_
+#define	_LINUX_WAIT_H_
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sleepqueue.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+
+typedef struct {
+} wait_queue_t;
+
+typedef struct {
+	unsigned int	wchan;
+} wait_queue_head_t;
+
+#define	init_waitqueue_head(x) \
+    do { } while (0)
+
+#define DECLARE_WAIT_QUEUE_HEAD(x) wait_queue_head_t x = { 0 }
+
+static inline void
+__wake_up(wait_queue_head_t *q, int all)
+{
+	int wakeup_swapper;
+	void *c;
+
+	c = &q->wchan;
+	sleepq_lock(c);
+	if (all)
+		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
+	else
+		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
+	sleepq_release(c);
+	if (wakeup_swapper)
+		kick_proc0();
+}
+
+#define	wake_up(q)				__wake_up(q, 0)
+#define	wake_up_nr(q, nr)			__wake_up(q, 1)
+#define	wake_up_all(q)				__wake_up(q, 1)
+#define	wake_up_interruptible(q)		__wake_up(q, 0)
+#define	wake_up_interruptible_nr(q, nr)		__wake_up(q, 1)
+#define	wake_up_interruptible_all(q, nr)	__wake_up(q, 1)
+
+#define	wait_event(q, cond)						\
+do {									\
+	void *c = &(q).wchan;						\
+	if (!(cond)) {							\
+		for (;;) {						\
+			sleepq_lock(c);					\
+			if (cond) {					\
+				sleepq_release(c);			\
+				break;					\
+			}						\
+			sleepq_add(c, NULL, "completion", SLEEPQ_SLEEP, 0); \
+			sleepq_wait(c, 0);				\
+		}							\
+	}								\
+} while (0)
+
+#define	wait_event_interruptible(q, cond)				\
+({									\
+	void *c = &(q).wchan;						\
+	int _error;							\
+									\
+	_error = 0;							\
+	if (!(cond)) {							\
+		for (; _error == 0;) {					\
+			sleepq_lock(c);					\
+			if (cond) {					\
+				sleepq_release(c);			\
+				break;					\
+			}						\
+			sleepq_add(c, NULL, "completion",		\
+			    SLEEPQ_SLEEP | SLEEPQ_INTERRUPTIBLE, 0);	\
+			if (sleepq_wait_sig(c, 0))			\
+				_error = -ERESTARTSYS;			\
+		}							\
+	}								\
+	-_error;							\
+})
+
+static inline int
+waitqueue_active(wait_queue_head_t *q)
+{
+	return 0;	/* XXX: not really implemented */
+}
+
+#define DEFINE_WAIT(name)	\
+	wait_queue_t name = {}
+
+static inline void
+prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+}
+
+static inline void
+finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+{
+}
+
+#endif	/* _LINUX_WAIT_H_ */
diff --git a/rtemsbsd/powerpc/include/linux/workqueue.h b/rtemsbsd/powerpc/include/linux/workqueue.h
new file mode 100644
index 0000000..e9a6f5a
--- /dev/null
+++ b/rtemsbsd/powerpc/include/linux/workqueue.h
@@ -0,0 +1,231 @@
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef	_LINUX_WORKQUEUE_H_
+#define	_LINUX_WORKQUEUE_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include <sys/taskqueue.h>
+
+struct workqueue_struct {
+	struct taskqueue	*taskqueue;
+};
+
+struct work_struct {
+	struct	task 		work_task;
+	struct	taskqueue	*taskqueue;
+	void			(*fn)(struct work_struct *);
+};
+
+struct delayed_work {
+	struct work_struct	work;
+	struct callout		timer;
+};
+
+static inline struct delayed_work *
+to_delayed_work(struct work_struct *work)
+{
+
+ 	return container_of(work, struct delayed_work, work);
+}
+
+
+static inline void
+_work_fn(void *context, int pending)
+{
+	struct work_struct *work;
+
+	work = context;
+	work->fn(work);
+}
+
+#define	INIT_WORK(work, func) 	 					\
+do {									\
+	(work)->fn = (func);						\
+	(work)->taskqueue = NULL;					\
+	TASK_INIT(&(work)->work_task, 0, _work_fn, (work));		\
+} while (0)
+
+#define	INIT_DELAYED_WORK(_work, func)					\
+do {									\
+	INIT_WORK(&(_work)->work, func);				\
+	callout_init(&(_work)->timer, 1);				\
+} while (0)
+
+#define	INIT_DEFERRABLE_WORK	INIT_DELAYED_WORK
+
+#define	schedule_work(work)						\
+do {									\
+	(work)->taskqueue = taskqueue_thread;				\
+	taskqueue_enqueue(taskqueue_thread, &(work)->work_task);	\
+} while (0)
+
+#define	flush_scheduled_work()	flush_taskqueue(taskqueue_thread)
+
+static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
+{
+	(work)->taskqueue = (q)->taskqueue;
+	/* Return opposite val to align with Linux logic */
+	return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
+}
+
+static inline void
+_delayed_work_fn(void *arg)
+{
+	struct delayed_work *work;
+
+	work = arg;
+	taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
+}
+
+static inline int
+queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
+    unsigned long delay)
+{
+	int pending;
+
+	pending = work->work.work_task.ta_pending;
+	work->work.taskqueue = wq->taskqueue;
+	if (delay != 0)
+		callout_reset(&work->timer, delay, _delayed_work_fn, work);
+	else
+		_delayed_work_fn((void *)work);
+
+	return (!pending);
+}
+
+static inline bool schedule_delayed_work(struct delayed_work *dwork,
+                                         unsigned long delay)
+{
+        struct workqueue_struct wq;
+        wq.taskqueue = taskqueue_thread;
+        return queue_delayed_work(&wq, dwork, delay);
+}
+
+static inline struct workqueue_struct *
+_create_workqueue_common(char *name, int cpus)
+{
+	struct workqueue_struct *wq;
+
+	wq = kmalloc(sizeof(*wq), M_WAITOK);
+	wq->taskqueue = taskqueue_create((name), M_WAITOK,
+	    taskqueue_thread_enqueue,  &wq->taskqueue);
+	taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
+
+	return (wq);
+}
+
+
+#define	create_singlethread_workqueue(name)				\
+	_create_workqueue_common(name, 1)
+
+#define	create_workqueue(name)						\
+	_create_workqueue_common(name, MAXCPU)
+
+#define	alloc_ordered_workqueue(name, flags)				\
+	_create_workqueue_common(name, 1)
+
+#define	alloc_workqueue(name, flags, max_active)			\
+	_create_workqueue_common(name, max_active)
+
+static inline void
+destroy_workqueue(struct workqueue_struct *wq)
+{
+	taskqueue_free(wq->taskqueue);
+	kfree(wq);
+}
+
+#define	flush_workqueue(wq)	flush_taskqueue((wq)->taskqueue)
+
+static inline void
+_flush_fn(void *context, int pending)
+{
+}
+
+static inline void
+flush_taskqueue(struct taskqueue *tq)
+{
+	struct task flushtask;
+
+	PHOLD(curproc);
+	TASK_INIT(&flushtask, 0, _flush_fn, NULL);
+	taskqueue_enqueue(tq, &flushtask);
+	taskqueue_drain(tq, &flushtask);
+	PRELE(curproc);
+}
+
+static inline int
+cancel_work_sync(struct work_struct *work)
+{
+	if (work->taskqueue &&
+	    taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
+		taskqueue_drain(work->taskqueue, &work->work_task);
+	return 0;
+}
+
+/*
+ * This may leave work running on another CPU as it does on Linux.
+ */
+static inline int
+cancel_delayed_work(struct delayed_work *work)
+{
+
+	callout_stop(&work->timer);
+	if (work->work.taskqueue)
+		return (taskqueue_cancel(work->work.taskqueue,
+		    &work->work.work_task, NULL) == 0);
+	return 0;
+}
+
+static inline int
+cancel_delayed_work_sync(struct delayed_work *work)
+{
+
+        callout_drain(&work->timer);
+        if (work->work.taskqueue &&
+            taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
+                taskqueue_drain(work->work.taskqueue, &work->work.work_task);
+        return 0;
+}
+
+static inline bool
+mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
+		                      unsigned long delay)
+{
+	cancel_delayed_work(dwork);
+	queue_delayed_work(wq, dwork, delay);
+	return false;
+}
+
+#endif	/* _LINUX_WORKQUEUE_H_ */
diff --git a/rtemsbsd/sys/powerpc/compat.c b/rtemsbsd/sys/powerpc/compat.c
new file mode 100644
index 0000000..2ee73a9
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/compat.c
@@ -0,0 +1,297 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/slab.h>
+
+MALLOC_DEFINE(M_KMALLOC, "kmalloc", "Linux kmalloc compatibility");
+
+#include <bsp/fdt.h>
+
+#include <linux/of.h>
+
+const void *
+of_get_property(const struct device_node *dn, const char *name, int *len)
+{
+	const void *fdt = bsp_fdt_get();
+
+	return (fdt_getprop(fdt, dn->offset, name, len));
+}
+
+bool
+of_device_is_available(const struct device_node *dn)
+{
+	const char *status;
+	int len;
+
+	status = of_get_property(dn, "status", &len);
+	return (status == NULL ||
+	    (len > 0 && (strcmp(status, "okay") == 0 ||
+	    strcmp(status, "ok") == 0)));
+}
+
+int
+of_device_is_compatible(const struct device_node *dn, const char *name)
+{
+	const void *fdt = bsp_fdt_get();
+
+	return (fdt_node_check_compatible(fdt, dn->offset, name) == 0);
+}
+
+struct device_node *
+of_find_compatible_node(struct device_node *dns, const struct device_node *dn,
+    const char *type, const char *compatible)
+{
+	const void *fdt = bsp_fdt_get();
+	int node;
+
+	(void)type;
+
+	if (dn != NULL) {
+		node = dn->offset;
+	} else {
+		node = 0;
+	}
+
+	memset(dns, 0, sizeof(*dns));
+
+	while (1) {
+		int err;
+
+		node = fdt_next_node(fdt, node, NULL);
+		if (node < 0)
+			return (NULL);
+
+		err = fdt_node_check_compatible(fdt, node, compatible);
+		if (err == 0) {
+			dns->offset = node;
+			return (dns);
+		}
+	}
+}
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+static int
+get_cells(const void *fdt, int node, const char *name)
+{
+	const fdt32_t *c;
+	int len;
+	int val;
+
+	do {
+		c = fdt_getprop(fdt, node, name, &len);
+		if (c != NULL) {
+			if (len != sizeof(*c))
+				return (-EINVAL);
+			val = fdt32_to_cpu(*c);
+			if (val <= 0 ||
+			    val > sizeof(resource_size_t) / sizeof(*c))
+				return (-EINVAL);
+			return (val);
+		}
+		node = fdt_parent_offset(fdt, node);
+	} while (node >= 0);
+
+	return (-EINVAL);
+}
+
+static int
+get_address_cells(const void *fdt, int node)
+{
+
+	return (get_cells(fdt, node, "#address-cells"));
+}
+
+static int
+get_size_cells(const void *fdt, int node)
+{
+
+	return (get_cells(fdt, node, "#size-cells"));
+}
+
+int
+of_address_to_resource(struct device_node *dn, int index,
+    struct resource *res)
+{
+	const void *fdt = bsp_fdt_get();
+	int ac;
+	int sc;
+	int len;
+	const fdt32_t *p;
+	int i;
+
+	memset(res, 0, sizeof(*res));
+
+	ac = get_address_cells(fdt, dn->offset);
+	if (ac < 0)
+		return (-EINVAL);
+
+	sc = get_size_cells(fdt, dn->offset);
+	if (sc < 0)
+		return (-EINVAL);
+
+	p = fdt_getprop(fdt, dn->offset, "reg", &len);
+	if (p == NULL)
+		return (-EINVAL);
+
+	len /= sizeof(*p);
+	i = index * (ac + sc);
+	if (i + ac + sc > len)
+		return (-EINVAL);
+
+	while (ac > 0) {
+		res->start = (res->start << 32) | fdt32_to_cpu(p[i]);
+		++i;
+		--ac;
+	}
+
+	while (sc > 0) {
+		res->end = (res->end << 32) | fdt32_to_cpu(p[i]);
+		++i;
+		--sc;
+	}
+	res->end += res->start;
+
+	return (0);
+}
+
+int
+of_irq_to_resource(struct device_node *dn, int index,
+    struct resource *res)
+{
+	const void *fdt = bsp_fdt_get();
+	int len;
+	const fdt32_t *p;
+	int i;
+	int irq;
+
+	if (res != NULL)
+		memset(res, 0, sizeof(*res));
+
+	p = fdt_getprop(fdt, dn->offset, "interrupts", &len);
+	if (p == NULL)
+		return (-EINVAL);
+
+	i = index * 16;
+	if (i + 16 > len)
+		return (-EINVAL);
+
+	irq = (int)fdt32_to_cpu(p[i / sizeof(*p)]);
+#ifdef __PPC__
+	/* FIXME */
+	irq -= 16;
+#endif
+	return (irq);
+}
+
+#include <linux/of_net.h>
+#include <linux/if_ether.h>
+
+static const void *
+get_mac_address(struct device_node *dn, const char *name)
+{
+	const void *fdt = bsp_fdt_get();
+	int len;
+	const fdt32_t *p;
+
+	p = fdt_getprop(fdt, dn->offset, name, &len);
+	if (p == NULL || len != ETH_ALEN) {
+		return (NULL);
+	}
+
+	return (p);
+}
+
+const void *
+of_get_mac_address(struct device_node *dn)
+{
+	const void *addr;
+
+	addr = get_mac_address(dn, "mac-address");
+	if (addr != NULL) {
+		return addr;
+	}
+
+	return get_mac_address(dn, "local-mac-address");
+}
+
+#include <linux/interrupt.h>
+
+struct arg_wrapper {
+	irq_handler_t handler;
+	unsigned int irq;
+	void *arg;
+};
+
+static void
+handler_wrapper(void *arg)
+{
+	struct arg_wrapper *aw = arg;
+
+	(*aw->handler)(aw->irq, aw->arg);
+}
+
+int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+    const char *name, void *arg)
+{
+	struct arg_wrapper *aw;
+	rtems_status_code sc;
+
+	aw = kmalloc(sizeof(*aw), GFP_KERNEL);
+	if (aw == NULL)
+		return (-ENOMEM);
+
+	aw->handler = handler;
+	aw->irq = irq;
+	aw->arg = arg;
+	sc = rtems_interrupt_server_handler_install(RTEMS_ID_NONE, irq, name,
+	    RTEMS_INTERRUPT_SHARED, handler_wrapper, aw);
+	if (sc != RTEMS_SUCCESSFUL)
+		return (-EINVAL);
+
+	return (0);
+}
+
+#include <linux/bitrev.h>
+
+const uint8_t bitrev_nibbles[16] = {
+	0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
+};
+
+#include <linux/platform_device.h>
+
+struct resource *
+platform_get_resource_impl(struct platform_device *dev,
+    unsigned int type, unsigned int num, struct resource *res)
+{
+
+	return (res);
+}
diff --git a/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c
new file mode 100644
index 0000000..35e83d1
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.c
@@ -0,0 +1,801 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 embedded brains GmbH
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in the
+ *      documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *      names of its contributors may be used to endorse or promote products
+ *      derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include "if_fmanmac.h"
+
+#include <sys/sockio.h>
+
+#include <net/if_vlan_var.h>
+#include <netinet/ip.h>
+
+#include <linux/phy.h>
+
+#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h"
+#include "../../../../../../../../linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_common.h"
+
+#define	FMAN_MAC_LOCK(sc)		mtx_lock(&(sc)->mtx)
+#define	FMAN_MAC_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
+#define	FMAN_MAC_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
+
+#define	FMAN_MAC_CSUM (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | \
+    CSUM_UDP_IPV6)
+
+struct fman_mac_sgt {
+	char priv[DPA_TX_PRIV_DATA_SIZE];
+	struct fman_prs_result prs;
+	struct qm_sg_entry sg[DPA_SGT_MAX_ENTRIES];
+	struct mbuf *m;
+};
+
+static void
+fman_mac_enable_tx_csum(struct mbuf *m, struct qm_fd *fd,
+    struct fman_prs_result *prs)
+{
+	int csum_flags = m->m_pkthdr.csum_flags;
+
+	if ((csum_flags & FMAN_MAC_CSUM) == 0) {
+		return;
+	}
+
+	memset(prs, 0, sizeof(*prs));
+
+	if ((csum_flags & FMAN_MAC_CSUM) == CSUM_IP) {
+		prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+	} else if ((csum_flags & CSUM_TCP) != 0) {
+		prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+		prs->l4r = FM_L4_PARSE_RESULT_TCP;
+	} else if ((csum_flags & CSUM_UDP) != 0) {
+		prs->l3r = FM_L3_PARSE_RESULT_IPV4;
+		prs->l4r = FM_L4_PARSE_RESULT_UDP;
+	} else if ((csum_flags & CSUM_TCP_IPV6) != 0) {
+		prs->l3r = FM_L3_PARSE_RESULT_IPV6;
+		prs->l4r = FM_L4_PARSE_RESULT_TCP;
+	} else if ((csum_flags & CSUM_UDP_IPV6) != 0) {
+		prs->l3r = FM_L3_PARSE_RESULT_IPV6;
+		prs->l4r = FM_L4_PARSE_RESULT_UDP;
+	} else {
+		BSD_ASSERT(0);
+	}
+
+	/* FIXME: VLAN */
+	prs->ip_off[0] = (u8)sizeof(struct ether_header);
+	prs->l4_off = (u8)(sizeof(struct ether_header) + sizeof(struct ip));
+
+	fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC;
+}
+
+static void
+fman_mac_txstart_locked(struct ifnet *ifp, struct fman_mac_softc *sc)
+{
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+
+	for (;;) {
+		struct fman_mac_sgt *sgt;
+		struct mbuf *m;
+		struct mbuf *n;
+		struct qm_fd fd;
+		struct dpa_priv_s *priv;
+		struct qman_fq *egress_fq;
+		int queue = 0;
+		size_t i;
+		uintptr_t addr;
+
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+		if (m == NULL) {
+			break;
+		}
+
+		sgt = uma_zalloc(sc->sgt_zone, M_NOWAIT);
+		if (sgt == NULL) {
+			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+			m_freem(m);
+			continue;
+		}
+
+		clear_fd(&fd);
+		fd.bpid = 0xff;
+		fd.offset = offsetof(struct fman_mac_sgt, sg);
+		fd.format = qm_fd_sg;
+		fd.length20 = m->m_pkthdr.len;
+		fd.cmd |= FM_FD_CMD_FCO;
+		addr = (uintptr_t)sgt;
+		fd.addr_hi = (u8)upper_32_bits(addr);
+		fd.addr_lo = lower_32_bits(addr);
+		fman_mac_enable_tx_csum(m, &fd, &sgt->prs);
+
+repeat_with_collapsed_mbuf_chain:
+
+		i = 0;
+		n = m;
+
+		while (n != NULL && i < DPA_SGT_MAX_ENTRIES) {
+			int len = n->m_len;
+
+			if (len > 0) {
+				sgt->sg[i].bpid = 0xff;
+				sgt->sg[i].offset = 0;
+				sgt->sg[i].length = len;
+				sgt->sg[i].extension = 0;
+				sgt->sg[i].final = 0;
+				addr = mtod(n, uintptr_t);
+				sgt->sg[i].addr_hi = (u8)upper_32_bits(addr);
+				sgt->sg[i].addr_lo =
+				    cpu_to_be32(lower_32_bits(addr));
+				++i;
+			}
+
+			n = n->m_next;
+		}
+
+		if (n != NULL && i == DPA_SGT_MAX_ENTRIES) {
+			struct mbuf *c;
+
+			c = m_collapse(m, M_NOWAIT, DPA_SGT_MAX_ENTRIES);
+			if (c == NULL) {
+				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+				m_freem(m);
+				uma_zfree(sc->sgt_zone, sgt);
+				continue;
+			}
+
+			m = c;
+			goto repeat_with_collapsed_mbuf_chain;
+		}
+
+		sgt->sg[i - 1].final = 1;
+		sgt->m = m;
+		priv = netdev_priv(&sc->mac_dev.net_dev);
+		egress_fq = priv->egress_fqs[queue];
+		fd.cmd |= qman_fq_fqid(priv->conf_fqs[queue]);
+		qman_enqueue(egress_fq, &fd, QMAN_ENQUEUE_FLAG_WAIT);
+	}
+}
+
+static void
+fman_mac_txstart(struct ifnet *ifp)
+{
+	struct fman_mac_softc *sc;
+
+	sc = ifp->if_softc;
+
+	FMAN_MAC_LOCK(sc);
+	fman_mac_txstart_locked(ifp, sc);
+	FMAN_MAC_UNLOCK(sc);
+}
+
+static void
+fman_mac_tick(void *arg)
+{
+	struct fman_mac_softc *sc;
+	struct ifnet *ifp;
+
+	sc = arg;
+	ifp = sc->ifp;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		return;
+	}
+
+	mii_tick(sc->mii_softc);
+	callout_reset(&sc->fman_mac_callout, hz, fman_mac_tick, sc);
+}
+
+static void
+fman_mac_set_multi(struct fman_mac_softc *sc)
+{
+	struct mac_device *mac_dev;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+	mac_dev = &sc->mac_dev;
+	(*mac_dev->set_multi)(&mac_dev->net_dev, mac_dev);
+}
+
+static void
+fman_mac_set_promisc(struct fman_mac_softc *sc, int if_flags)
+{
+	struct mac_device *mac_dev;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+	mac_dev = &sc->mac_dev;
+	(*mac_dev->set_promisc)(mac_dev->fman_mac,
+	    (if_flags & IFF_PROMISC) != 0);
+}
+
+static int
+fman_mac_set_mtu(struct fman_mac_softc *sc, int mtu)
+{
+	struct ifnet *ifp;
+	int real_mtu;
+
+	ifp = sc->ifp;
+	real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+	if (real_mtu > fman_get_max_frm() ||
+	    real_mtu < ETHER_MIN_LEN) {
+		return (EINVAL);
+	}
+
+	ifp->if_mtu = mtu;
+	return (0);
+}
+
+static void
+fman_mac_init_locked(struct fman_mac_softc *sc)
+{
+	struct ifnet *ifp;
+	int error;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+		return;
+	}
+
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+	error = dpa_eth_priv_start(&sc->mac_dev.net_dev);
+	BSD_ASSERT(error == 0);
+
+	mii_mediachg(sc->mii_softc);
+	callout_reset(&sc->fman_mac_callout, hz, fman_mac_tick, sc);
+
+	fman_mac_set_multi(sc);
+}
+
+static void
+fman_mac_stop_locked(struct fman_mac_softc *sc)
+{
+	struct ifnet *ifp;
+	int error;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+
+	ifp = sc->ifp;
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+	error = dpa_eth_priv_stop(&sc->mac_dev.net_dev);
+	BSD_ASSERT(error == 0);
+}
+
+static void
+fman_mac_init(void *if_softc)
+{
+	struct fman_mac_softc *sc;
+
+	sc = if_softc;
+	FMAN_MAC_LOCK(sc);
+	fman_mac_init_locked(sc);
+	FMAN_MAC_UNLOCK(sc);
+}
+
+static int
+fman_mac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct fman_mac_softc *sc;
+	struct mii_data *mii;
+	struct ifreq *ifr;
+	int error;
+
+	sc = ifp->if_softc;
+	ifr = (struct ifreq *)data;
+
+	error = 0;
+	switch (cmd) {
+	case SIOCSIFFLAGS:
+		FMAN_MAC_LOCK(sc);
+		if (ifp->if_flags & IFF_UP) {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+				if ((ifp->if_flags ^ sc->if_flags) &
+				    IFF_PROMISC)
+					fman_mac_set_promisc(sc,
+					    ifp->if_flags);
+			} else {
+				fman_mac_init_locked(sc);
+			}
+		} else {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+				fman_mac_stop_locked(sc);
+			}
+		}
+		sc->if_flags = ifp->if_flags;
+		FMAN_MAC_UNLOCK(sc);
+		break;
+	case SIOCSIFMTU:
+		error = fman_mac_set_mtu(sc, ifr->ifr_mtu);
+		break;
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			FMAN_MAC_LOCK(sc);
+			fman_mac_set_multi(sc);
+			FMAN_MAC_UNLOCK(sc);
+		}
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		mii = sc->mii_softc;
+		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+		break;
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return (error);
+}
+
+static int
+fman_mac_media_change(struct ifnet *ifp)
+{
+	struct fman_mac_softc *sc;
+	int error;
+
+	sc = ifp->if_softc;
+	FMAN_MAC_LOCK(sc);
+	error = mii_mediachg(sc->mii_softc);
+	FMAN_MAC_UNLOCK(sc);
+	return (error);
+}
+
+static void
+fman_mac_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+	struct fman_mac_softc *sc;
+	struct mii_data *mii;
+
+	sc = ifp->if_softc;
+	mii = sc->mii_softc;
+	FMAN_MAC_LOCK(sc);
+	mii_pollstat(mii);
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+	FMAN_MAC_UNLOCK(sc);
+}
+
+int
+fman_mac_dev_attach(device_t dev)
+{
+	struct fman_mac_softc *sc;
+	struct fman_ivars *ivars;
+	struct ifnet *ifp;
+	int error;
+
+	sc = device_get_softc(dev);
+
+	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
+	    MTX_DEF);
+
+	callout_init_mtx(&sc->fman_mac_callout, &sc->mtx, 0);
+
+	sc->sgt_zone = uma_zcreate("FMan MAC SGT", sizeof(struct fman_mac_sgt),
+	    NULL, NULL, NULL, NULL, 16, 0);
+	if (sc->sgt_zone == NULL) {
+		goto error_0;
+	}
+
+	/* Set up the Ethernet interface */
+	sc->ifp = ifp = if_alloc(IFT_ETHER);
+	if (sc->ifp == NULL) {
+		goto error_1;
+	}
+
+	snprintf(&sc->name[0], sizeof(sc->name), "fm%im",
+	    device_get_unit(device_get_parent(dev)));
+
+	ifp->if_softc = sc;
+	if_initname(ifp, &sc->name[0], sc->mac_dev.data.mac_hw_id);
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
+	    IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
+	ifp->if_capenable = ifp->if_capabilities;
+	ifp->if_hwassist = FMAN_MAC_CSUM;
+	ifp->if_start = fman_mac_txstart;
+	ifp->if_ioctl = fman_mac_ioctl;
+	ifp->if_init = fman_mac_init;
+	IFQ_SET_MAXLEN(&ifp->if_snd, 128);
+	ifp->if_snd.ifq_drv_maxlen = 128;
+	IFQ_SET_READY(&ifp->if_snd);
+	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+	/* Attach the MII driver if necessary */
+	ivars = device_get_ivars(dev);
+	sc->phy_dev = fdt_phy_obtain(ivars->of_dev.dev.of_node->offset);
+	if (sc->phy_dev != NULL) {
+		error = mii_attach(dev, &sc->miibus, ifp,
+		    fman_mac_media_change, fman_mac_media_status,
+		    BMSR_DEFCAPMASK, sc->phy_dev->phy, MII_OFFSET_ANY, 0);
+		if (error != 0) {
+			goto error_2;
+		}
+		sc->mii_softc = device_get_softc(sc->miibus);
+	}
+
+	sc->mac_dev.net_dev.ifp = ifp;
+
+	ether_ifattach(ifp, &sc->mac_dev.addr[0]);
+#if 0
+	fman_mac_set_mtu(sc, ETHERMTU_JUMBO);
+#endif
+
+	return (0);
+
+error_2:
+	if_free(ifp);
+error_1:
+	uma_zdestroy(sc->sgt_zone);
+error_0:
+	mtx_destroy(&sc->mtx);
+	return (ENXIO);
+}
+
+int
+fman_mac_dev_detach(device_t _dev)
+{
+	struct fman_mac_softc *sc = device_get_softc(_dev);
+
+	ether_ifdetach(sc->ifp);
+
+	FMAN_MAC_LOCK(sc);
+	fman_mac_stop_locked(sc);
+	FMAN_MAC_UNLOCK(sc);
+
+	if_free(sc->ifp);
+	uma_zdestroy(sc->sgt_zone);
+	mtx_destroy(&sc->mtx);
+
+	return (bus_generic_detach(_dev));
+}
+
+int
+fman_mac_miibus_read_reg(device_t dev, int phy, int reg)
+{
+	struct fman_mac_softc *sc;
+	struct fdt_phy_device *phy_dev;
+	struct fdt_mdio_device *mdio_dev;
+
+	sc = device_get_softc(dev);
+	phy_dev = sc->phy_dev;
+	BSD_ASSERT(phy == phy_dev->phy);
+	mdio_dev = phy_dev->mdio_dev;
+	return ((*mdio_dev->read)(mdio_dev, phy, reg));
+}
+
+int
+fman_mac_miibus_write_reg(device_t dev, int phy, int reg, int val)
+{
+	struct fman_mac_softc *sc;
+	struct fdt_phy_device *phy_dev;
+	struct fdt_mdio_device *mdio_dev;
+
+	sc = device_get_softc(dev);
+	phy_dev = sc->phy_dev;
+	BSD_ASSERT(phy == phy_dev->phy);
+	mdio_dev = phy_dev->mdio_dev;
+	return ((*mdio_dev->write)(mdio_dev, phy, reg, val));
+}
+
+void
+fman_mac_miibus_statchg(device_t dev)
+{
+	struct fman_mac_softc *sc;
+	struct mac_device *mac_dev;
+	struct mii_data *mii;
+	u16 speed;
+
+	sc = device_get_softc(dev);
+	mac_dev = &sc->mac_dev;
+	mii = sc->mii_softc;
+
+	FMAN_MAC_ASSERT_LOCKED(sc);
+
+	switch (IFM_SUBTYPE(mii->mii_media_active)) {
+	case IFM_10_T:
+	case IFM_10_2:
+	case IFM_10_5:
+	case IFM_10_STP:
+	case IFM_10_FL:
+		speed = SPEED_10;
+		break;
+	case IFM_100_TX:
+	case IFM_100_FX:
+	case IFM_100_T4:
+	case IFM_100_VG:
+	case IFM_100_T2:
+		speed = SPEED_100;
+		break;
+	case IFM_1000_SX:
+	case IFM_1000_LX:
+	case IFM_1000_CX:
+	case IFM_1000_T:
+		speed = SPEED_1000;
+		break;
+	case IFM_10G_LR:
+	case IFM_10G_SR:
+	case IFM_10G_CX4:
+	case IFM_10G_TWINAX:
+	case IFM_10G_TWINAX_LONG:
+	case IFM_10G_LRM:
+		speed = SPEED_10000;
+		break;
+	default:
+		speed = 0;
+		break;
+	}
+
+	(*mac_dev->adjust_link)(mac_dev, speed);
+}
+
+static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
+{
+	struct bm_buffer bmb[8];
+	u8 i;
+
+	memset(bmb, 0, sizeof(bmb));
+
+	for (i = 0; i < 8; ++i) {
+		struct mbuf *m;
+
+		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+		if (unlikely(m == NULL)) {
+			goto cl_alloc_failed;
+		}
+
+		RTEMS_STATIC_ASSERT(DPA_BP_RAW_SIZE == MCLBYTES, DPA_BP_RAW_SIZE);
+		*(struct mbuf **)(mtod(m, char *) + DPA_MBUF_POINTER_OFFSET) = m;
+
+		bm_buffer_set64(&bmb[i], mtod(m, uintptr_t));
+	}
+
+release_bufs:
+	/* Release the buffers. In case bman is busy, keep trying
+	 * until successful. bman_release() is guaranteed to succeed
+	 * in a reasonable amount of time
+	 */
+	while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
+		cpu_relax();
+	return i;
+
+cl_alloc_failed:
+	bm_buffer_set64(&bmb[i], 0);
+	/* Avoid releasing a completely null buffer; bman_release() requires
+	 * at least one buffer.
+	 */
+	if (likely(i))
+		goto release_bufs;
+
+	return 0;
+}
+
+/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
+static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
+{
+	int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
+	*count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
+}
+
+int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
+{
+	int i;
+
+	/* Give each CPU an allotment of "config_count" buffers */
+#ifndef __rtems__
+	for_each_possible_cpu(i) {
+#else /* __rtems__ */
+	for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
+#endif /* __rtems__ */
+		int j;
+
+		/* Although we access another CPU's counters here
+		 * we do it at boot time so it is safe
+		 */
+		for (j = 0; j < dpa_bp->config_count; j += 8)
+			dpa_bp_add_8_bufs(dpa_bp, i);
+	}
+	return 0;
+}
+
+/* Add buffers/(pages) for Rx processing whenever bpool count falls below
+ * REFILL_THRESHOLD.
+ */
+int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
+{
+	int count = *countptr;
+	int new_bufs;
+
+	if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
+		do {
+			new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
+			if (unlikely(!new_bufs)) {
+				/* Avoid looping forever if we've temporarily
+				 * run out of memory. We'll try again at the
+				 * next NAPI cycle.
+				 */
+				break;
+			}
+			count += new_bufs;
+		} while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
+
+		*countptr = count;
+		if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static struct mbuf *
+addr_to_mbuf(dma_addr_t addr)
+{
+	void *vaddr = phys_to_virt(addr);
+
+	return (*(struct mbuf **)(vaddr + DPA_MBUF_POINTER_OFFSET));
+}
+
+static struct mbuf *
+contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
+{
+	struct mbuf *m;
+	ssize_t fd_off = dpa_fd_offset(fd);
+	dma_addr_t addr = qm_fd_addr(fd);
+
+	m = addr_to_mbuf(addr);
+	m->m_pkthdr.rcvif = ifp;
+	m->m_pkthdr.len = m->m_len = dpa_fd_length(fd);
+	m->m_data = mtod(m, char *) + fd_off;
+
+	return (m);
+}
+
+static void
+dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, dma_addr_t addr, int *count_ptr)
+{
+	struct bm_buffer bmb;
+
+	bm_buffer_set64(&bmb, addr);
+
+	while (bman_release(dpa_bp->pool, &bmb, 1, 0))
+		cpu_relax();
+
+	++(*count_ptr);
+}
+
+static struct mbuf *
+sg_fd_to_mbuf(struct dpa_bp *dpa_bp, const struct qm_fd *fd,
+    struct ifnet *ifp, int *count_ptr)
+{
+	ssize_t fd_off = dpa_fd_offset(fd);
+	dma_addr_t addr = qm_fd_addr(fd);
+	const struct qm_sg_entry *sgt;
+	int i;
+	int len;
+	struct mbuf *m;
+	struct mbuf *last;
+
+	sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
+	len = 0;
+
+	for (i = 0; i < DPA_SGT_MAX_ENTRIES; ++i) {
+		dma_addr_t sg_addr;
+		int sg_len;
+		struct mbuf *n;
+
+		BSD_ASSERT(sgt[i].extension == 0);
+		BSD_ASSERT(dpa_bp == dpa_bpid2pool(sgt[i].bpid));
+
+		sg_addr = qm_sg_addr(&sgt[i]);
+		n = addr_to_mbuf(sg_addr);
+
+		sg_len = sgt[i].length;
+		len += sg_len;
+
+		if (i == 0) {
+			m = n;
+		} else {
+			last->m_next = n;
+		}
+
+		n->m_len = sg_len;
+		m->m_data = mtod(m, char *) + sgt[i].offset;
+		last = n;
+
+		--(*count_ptr);
+
+		if (sgt[i].final) {
+			break;
+		}
+	}
+
+	m->m_pkthdr.rcvif = ifp;
+	m->m_pkthdr.len = len;
+
+	dpa_bp_recycle_frag(dpa_bp, addr, count_ptr);
+
+	return (m);
+}
+
+void
+_dpa_rx(struct net_device *net_dev, struct qman_portal *portal,
+    const struct dpa_priv_s *priv, struct dpa_percpu_priv_s *percpu_priv,
+    const struct qm_fd *fd, u32 fqid, int *count_ptr)
+{
+	struct dpa_bp *dpa_bp;
+	struct mbuf *m;
+	struct ifnet *ifp;
+
+	ifp = net_dev->ifp;
+
+	if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
+		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+		dpa_fd_release(net_dev, fd);
+		return;
+	}
+
+	dpa_bp = priv->dpa_bp;
+	BSD_ASSERT(dpa_bp == dpa_bpid2pool(fd->bpid));
+
+	if (likely(fd->format == qm_fd_contig)) {
+		m = contig_fd_to_mbuf(fd, ifp);
+	} else {
+		BSD_ASSERT(fd->format == qm_fd_sg);
+		m = sg_fd_to_mbuf(dpa_bp, fd, ifp, count_ptr);
+	}
+
+	/* Account for either the contig buffer or the SGT buffer (depending on
+	 * which case we were in) having been removed from the pool.
+	 */
+	(*count_ptr)--;
+
+	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+	(*ifp->if_input)(ifp, m);
+}
+
+void _dpa_cleanup_tx_fd(struct ifnet *ifp, const struct qm_fd *fd)
+{
+	struct fman_mac_softc *sc;
+	struct fman_mac_sgt *sgt;
+
+	BSD_ASSERT(fd->format == qm_fd_sg);
+
+	sc = ifp->if_softc;
+	sgt = (struct fman_mac_sgt *)qm_fd_addr(fd);
+
+	m_freem(sgt->m);
+	uma_zfree(sc->sgt_zone, sgt);
+}
diff --git a/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h
new file mode 100644
index 0000000..ba07e36
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/drivers/net/ethernet/freescale/dpaa/if_fmanmac.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_FMANMAC_H
+#define	_IF_FMANMAC_H
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_var.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <linux/netdevice.h>
+
+#include <fdt_phy.h>
+
+#include "mac.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+struct fman_mac_softc {
+	struct mac_device	mac_dev;
+	struct fdt_phy_device	*phy_dev;
+	device_t		miibus;
+	struct mii_data		*mii_softc;
+	struct ifnet		*ifp;
+	int			if_flags;
+	struct mtx		mtx;
+	uma_zone_t		sgt_zone;
+	struct callout		fman_mac_callout;
+	char			name[8];
+};
+
+int fman_mac_dev_attach(device_t dev);
+
+int fman_mac_dev_detach(device_t dev);
+
+int fman_mac_miibus_read_reg(device_t dev, int phy, int reg);
+
+int fman_mac_miibus_write_reg(device_t dev, int phy, int reg, int val);
+
+void fman_mac_miibus_statchg(device_t dev);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _IF_FMANMAC_H */
diff --git a/rtemsbsd/sys/powerpc/fdt_phy.c b/rtemsbsd/sys/powerpc/fdt_phy.c
new file mode 100644
index 0000000..b6f87f9
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/fdt_phy.c
@@ -0,0 +1,360 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+
+#include <fdt_phy.h>
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/time.h>
+#include <sys/queue.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+
+#include <libfdt.h>
+
+#include <rtems/bsd.h>
+
+#include <bsp/fdt.h>
+
+#define	MDIO_LOCK()	mtx_lock(&mdio.mutex)
+#define	MDIO_UNLOCK()	mtx_unlock(&mdio.mutex)
+
+struct mdio_device {
+	struct fdt_mdio_device base;
+	SLIST_ENTRY(mdio_device) next;
+	int node;
+};
+
+static struct {
+	SLIST_HEAD(, mdio_device) instances;
+	struct mtx mutex;
+} mdio = {
+	.instances = SLIST_HEAD_INITIALIZER(mdio.instances)
+};
+
+MTX_SYSINIT(mdio_mutex, &mdio.mutex, "FDT MDIO", MTX_DEF);
+
+static uint64_t
+fdt_get_address(const void *fdt, int node)
+{
+	uint64_t addr;
+	int nodes[16];
+	size_t i;
+	int ac;
+
+	i = 0;
+	do {
+		nodes[i] = node;
+		++i;
+		node = fdt_parent_offset(fdt, node);
+	} while (node >= 0 && i < nitems(nodes));
+
+	if (node >= 0) {
+		return (0);
+	}
+
+	ac = 1;
+	addr = 0;
+	while (i > 0) {
+		const fdt32_t *p;
+		int len;
+
+		p = fdt_getprop(fdt, nodes[i - 1], "reg", &len);
+		if (p != NULL) {
+			if (ac == 1 && len >= 4) {
+				addr += fdt32_to_cpu(p[0]);
+			} else if (ac == 2 && len >= 8) {
+				addr += fdt32_to_cpu(p[1]);
+				addr += (uint64_t)fdt32_to_cpu(p[0]) << 32;
+			} else {
+				return (0);
+			}
+		}
+
+		p = fdt_getprop(fdt, nodes[i - 1], "#address-cells", &len);
+		if (p != NULL) {
+			if (len != 4) {
+				return (0);
+			}
+			ac = (int)fdt32_to_cpu(p[0]);
+			if (ac != 1 && ac != 2) {
+				return (0);
+			}
+		}
+
+		--i;
+	}
+
+	return (addr);
+}
+
+struct fman_mdio_regs {
+	uint32_t reserved[12];
+	uint32_t mdio_cfg;
+	uint32_t mdio_ctrl;
+	uint32_t mdio_data;
+	uint32_t mdio_addr;
+};
+
+#define	MDIO_CFG_BSY		(1U << 31)
+#define	MDIO_CFG_ENC45		(1U << 6)
+#define	MDIO_CFG_RD_ERR		(1U << 1)
+
+#define	MDIO_CTRL_READ		(1U << 15)
+#define	MDIO_CTRL_REG_ADDR(x)	((x) & 0x1fU)
+#define	MDIO_CTRL_PHY_ADDR(x)	(((x) & 0x1fU) << 5)
+
+struct fman_mdio_device {
+	struct mdio_device base;
+	volatile struct fman_mdio_regs *regs;
+};
+
+static int
+fman_mdio_wait(volatile struct fman_mdio_regs *regs)
+{
+	struct bintime start;
+
+	rtems_bsd_binuptime(&start);
+
+	while ((regs->mdio_cfg & MDIO_CFG_BSY) != 0) {
+		struct bintime now;
+
+		rtems_bsd_binuptime(&now);
+		if (bttosbt(now) - bttosbt(start) > 100 * SBT_1US) {
+			break;
+		}
+	}
+
+	/* Check again, to take thread pre-emption into account */
+	if ((regs->mdio_cfg & MDIO_CFG_BSY) != 0) {
+		return (EIO);
+	}
+
+	return (0);
+}
+
+static int
+fman_mdio_read(struct fdt_mdio_device *base, int phy, int reg)
+{
+	struct fman_mdio_device *fm;
+	volatile struct fman_mdio_regs *regs;
+	int val;
+	int err;
+
+	fm = (struct fman_mdio_device *)base;
+	regs = fm->regs;
+
+	MDIO_LOCK();
+
+	err = fman_mdio_wait(regs);
+	if (err == 0) {
+		uint32_t mdio_cfg;
+		uint32_t mdio_ctrl;
+
+		mdio_cfg = regs->mdio_cfg;
+		mdio_cfg &= ~MDIO_CFG_ENC45;
+		regs->mdio_cfg = mdio_cfg;
+
+		mdio_ctrl = MDIO_CTRL_PHY_ADDR(phy) | MDIO_CTRL_REG_ADDR(reg);
+		regs->mdio_ctrl = mdio_ctrl;
+		mdio_ctrl |= MDIO_CTRL_READ;
+		regs->mdio_ctrl = mdio_ctrl;
+
+		err = fman_mdio_wait(regs);
+		if (err == 0 && (regs->mdio_cfg & MDIO_CFG_RD_ERR) == 0) {
+			val = (int)(regs->mdio_data & 0xffff);
+		} else {
+			val = 0xffff;
+		}
+	} else {
+		val = 0xffff;
+	}
+
+	MDIO_UNLOCK();
+
+	return (val);
+}
+
+static int
+fman_mdio_write(struct fdt_mdio_device *base, int phy, int reg, int val)
+{
+	struct fman_mdio_device *fm;
+	volatile struct fman_mdio_regs *regs;
+	int err;
+
+	fm = (struct fman_mdio_device *)base;
+	regs = fm->regs;
+
+	MDIO_LOCK();
+
+	err = fman_mdio_wait(regs);
+	if (err == 0) {
+		uint32_t mdio_cfg;
+		uint32_t mdio_ctrl;
+
+		mdio_cfg = regs->mdio_cfg;
+		mdio_cfg &= ~MDIO_CFG_ENC45;
+		regs->mdio_cfg = mdio_cfg;
+
+		mdio_ctrl = MDIO_CTRL_PHY_ADDR(phy) | MDIO_CTRL_REG_ADDR(reg);
+		regs->mdio_ctrl = mdio_ctrl;
+
+		regs->mdio_data = (uint32_t)(val & 0xffff);
+
+		fman_mdio_wait(regs);
+	}
+
+	MDIO_UNLOCK();
+
+	return (0);
+}
+
+static struct mdio_device *
+create_fman_mdio(const void *fdt, int mdio_node)
+{
+	struct fman_mdio_device *fm = NULL;
+
+	fm = malloc(sizeof(*fm), M_TEMP, M_WAITOK | M_ZERO);
+	if (fm == NULL) {
+		return (NULL);
+	}
+
+	fm->regs = (volatile struct fman_mdio_regs *)(uintptr_t)
+	    fdt_get_address(fdt, mdio_node);
+	fm->base.base.read = fman_mdio_read;
+	fm->base.base.write = fman_mdio_write;
+
+	return (&fm->base);
+}
+
+static struct mdio_device *
+create_mdio_device(const void *fdt, int mdio_node)
+{
+
+	if (fdt_node_check_compatible(fdt, mdio_node,
+	    "fsl,fman-memac-mdio") == 0 ||
+	    fdt_node_check_compatible(fdt, mdio_node,
+	    "fsl,fman-xmdio") == 0) {
+		return (create_fman_mdio(fdt, mdio_node));
+	} else {
+		return (NULL);
+	}
+}
+
+static int
+find_mdio_device(const void *fdt, int mdio_node,
+    struct fdt_phy_device *phy_dev)
+{
+	struct mdio_device *mdio_dev = NULL;
+
+	SLIST_FOREACH(mdio_dev, &mdio.instances, next) {
+		if (mdio_dev->node == mdio_node) {
+			break;
+		}
+	}
+
+	if (mdio_dev == NULL) {
+		mdio_dev = create_mdio_device(fdt, mdio_node);
+	}
+
+	if (mdio_dev == NULL) {
+		return (ENXIO);
+	}
+
+	phy_dev->mdio_dev = &mdio_dev->base;
+	return (0);
+}
+
+static struct fdt_phy_device *
+phy_obtain(const void *fdt, int mdio_node, int phy)
+{
+	struct fdt_phy_device *phy_dev;
+	int err;
+
+	phy_dev = malloc(sizeof(*phy_dev), M_TEMP, M_WAITOK | M_ZERO);
+	if (phy_dev == NULL) {
+		return (NULL);
+	}
+
+	phy_dev->phy = phy;
+	MDIO_LOCK();
+	err = find_mdio_device(fdt, mdio_node, phy_dev);
+	MDIO_UNLOCK();
+
+	if (err != 0) {
+		free(phy_dev, M_TEMP);
+		return (NULL);
+	}
+
+	return (phy_dev);
+}
+
+struct fdt_phy_device *
+fdt_phy_obtain(int device_node)
+{
+	const void *fdt;
+	const fdt32_t *phandle;
+	const fdt32_t *phy;
+	int len;
+	int node;
+
+	fdt = bsp_fdt_get();
+
+	phandle = fdt_getprop(fdt, device_node, "phy-handle", &len);
+	if (phandle == NULL || len != sizeof(*phandle)) {
+		return (NULL);
+	}
+
+	node = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*phandle));
+	if (node < 0) {
+		return (NULL);
+	}
+
+	phy = fdt_getprop(fdt, node, "reg", &len);
+	if (phy == NULL || len != sizeof(*phy)) {
+		return (NULL);
+	}
+
+	node = fdt_parent_offset(fdt, node);
+	if (node < 0) {
+		return (NULL);
+	}
+
+	return (phy_obtain(fdt, node, (int)fdt32_to_cpu(*phy)));
+}
+
+void
+fdt_phy_release(struct fdt_phy_device *phy_dev)
+{
+
+	free(phy_dev, M_TEMP);
+}
diff --git a/rtemsbsd/sys/powerpc/fman_muram.c b/rtemsbsd/sys/powerpc/fman_muram.c
new file mode 100644
index 0000000..c4a8e8d
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/fman_muram.c
@@ -0,0 +1,116 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*
+ * Copyright 2008-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "../../../linux/drivers/net/ethernet/freescale/fman/fman_muram.h"
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <stdlib.h>
+#include <rtems/score/heapimpl.h>
+
+#define	MURAM_LOCK(x) (mtx_lock(&(x)->mtx))
+
+#define	MURAM_UNLOCK(x) (mtx_unlock(&(x)->mtx))
+
+struct muram_info {
+	struct mtx mtx;
+	Heap_Control heap;
+	unsigned long base;
+};
+
+static unsigned long
+fman_muram_vbase_to_offset(struct muram_info *muram, unsigned long addr)
+{
+
+	return (addr - muram->base);
+}
+
+struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
+{
+	struct muram_info *muram;
+	uintptr_t s;
+
+	muram = malloc(sizeof(*muram));
+	if (muram == NULL)
+		return (NULL);
+
+	muram->base = (unsigned long)base;
+	memset((void *)muram->base, 0xab, size);
+
+	s = _Heap_Initialize(&muram->heap, (void *)(uintptr_t)base, size, 64);
+	if (s == 0) {
+		free(muram);
+		return (NULL);
+	}
+
+	mtx_init(&muram->mtx, "FMan MURAM", NULL, MTX_DEF);
+
+	return (muram);
+}
+
+unsigned long
+fman_muram_offset_to_vbase(struct muram_info *muram, unsigned long offset)
+{
+
+	return (offset + muram->base);
+}
+
+int
+fman_muram_alloc(struct muram_info *muram, size_t size)
+{
+	void *p;
+
+	MURAM_LOCK(muram);
+	p = _Heap_Allocate(&muram->heap, size);
+	MURAM_UNLOCK(muram);
+
+	if (p == NULL)
+		return -ENOMEM;
+
+	memset(p, 0, size);
+
+	return (fman_muram_vbase_to_offset(muram, (unsigned long)p));
+}
+
+void
+fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
+{
+	void *p = (void *)fman_muram_offset_to_vbase(muram, offset);
+
+	MURAM_LOCK(muram);
+	_Heap_Free(&muram->heap, p);
+	MURAM_UNLOCK(muram);
+}
diff --git a/rtemsbsd/sys/powerpc/linux_compat.c b/rtemsbsd/sys/powerpc/linux_compat.c
new file mode 100644
index 0000000..f54a671
--- /dev/null
+++ b/rtemsbsd/sys/powerpc/linux_compat.c
@@ -0,0 +1,965 @@
+#include <machine/rtems-bsd-kernel-space.h>
+#include <rtems/bsd/local/opt_dpaa.h>
+
+/*-
+ * Copyright (c) 2010 Isilon Systems, Inc.
+ * Copyright (c) 2010 iX Systems, Inc.
+ * Copyright (c) 2010 Panasas, Inc.
+ * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/proc.h>
+#ifndef __rtems__
+#include <sys/sglist.h>
+#endif /* __rtems__ */
+#include <sys/sleepqueue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filio.h>
+#include <sys/rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/stdarg.h>
+#ifndef __rtems__
+#include <machine/pmap.h>
+#endif /* __rtems__ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#ifdef __rtems__
+#include <linux/completion.h>
+#else /* __rtems__ */
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+
+#include <vm/vm_pager.h>
+
+MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
+
+#include <linux/rbtree.h>
+/* Undo Linux compat changes. */
+#undef RB_ROOT
+#undef file
+#undef cdev
+#define	RB_ROOT(head)	(head)->rbh_root
+
+struct kobject class_root;
+struct device linux_rootdev;
+struct class miscclass;
+struct list_head pci_drivers;
+struct list_head pci_devices;
+struct net init_net;
+spinlock_t pci_lock;
+
+unsigned long linux_timer_hz_mask;
+
+int
+panic_cmp(struct rb_node *one, struct rb_node *two)
+{
+	panic("no cmp");
+}
+
+RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
+ 
+int
+kobject_set_name(struct kobject *kobj, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+
+	return (error);
+}
+
+static inline int
+kobject_add_complete(struct kobject *kobj, struct kobject *parent)
+{
+	struct kobj_type *t;
+	int error;
+
+	kobj->parent = kobject_get(parent);
+	error = sysfs_create_dir(kobj);
+	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
+		struct attribute **attr;
+		t = kobj->ktype;
+
+		for (attr = t->default_attrs; *attr != NULL; attr++) {
+			error = sysfs_create_file(kobj, *attr);
+			if (error)
+				break;
+		}
+		if (error)
+			sysfs_remove_dir(kobj);
+		
+	}
+	return (error);
+}
+
+int
+kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+	if (error)
+		return (error);
+
+	return kobject_add_complete(kobj, parent);
+}
+
+void
+kobject_release(struct kref *kref)
+{
+	struct kobject *kobj;
+	char *name;
+
+	kobj = container_of(kref, struct kobject, kref);
+	sysfs_remove_dir(kobj);
+	if (kobj->parent)
+		kobject_put(kobj->parent);
+	kobj->parent = NULL;
+	name = kobj->name;
+	if (kobj->ktype && kobj->ktype->release)
+		kobj->ktype->release(kobj);
+	kfree(name);
+}
+
+static void
+kobject_kfree(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static void
+kobject_kfree_name(struct kobject *kobj)
+{
+	if (kobj) {
+		kfree(kobj->name);
+	}
+}
+
+struct kobj_type kfree_type = { .release = kobject_kfree };
+
+static void
+dev_release(struct device *dev)
+{
+	pr_debug("dev_release: %s\n", dev_name(dev));
+	kfree(dev);
+}
+
+struct device *
+device_create(struct class *class, struct device *parent, dev_t devt,
+    void *drvdata, const char *fmt, ...)
+{
+	struct device *dev;
+	va_list args;
+
+	dev = kzalloc(sizeof(*dev), M_WAITOK);
+	dev->parent = parent;
+	dev->class = class;
+	dev->devt = devt;
+	dev->driver_data = drvdata;
+	dev->release = dev_release;
+	va_start(args, fmt);
+	kobject_set_name_vargs(&dev->kobj, fmt, args);
+	va_end(args);
+	device_register(dev);
+
+	return (dev);
+}
+
+int
+kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+    struct kobject *parent, const char *fmt, ...)
+{
+	va_list args;
+	int error;
+
+	kobject_init(kobj, ktype);
+	kobj->ktype = ktype;
+	kobj->parent = parent;
+	kobj->name = NULL;
+
+	va_start(args, fmt);
+	error = kobject_set_name_vargs(kobj, fmt, args);
+	va_end(args);
+	if (error)
+		return (error);
+	return kobject_add_complete(kobj, parent);
+}
+
+static void
+linux_file_dtor(void *cdp)
+{
+	struct linux_file *filp;
+
+	filp = cdp;
+	filp->f_op->release(filp->f_vnode, filp);
+	vdrop(filp->f_vnode);
+	kfree(filp);
+}
+
+static int
+linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (ENODEV);
+	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+	filp->f_dentry = &filp->f_dentry_store;
+	filp->f_op = ldev->ops;
+	filp->f_flags = file->f_flag;
+	vhold(file->f_vnode);
+	filp->f_vnode = file->f_vnode;
+	if (filp->f_op->open) {
+		error = -filp->f_op->open(file->f_vnode, filp);
+		if (error) {
+			kfree(filp);
+			return (error);
+		}
+	}
+	error = devfs_set_cdevpriv(filp, linux_file_dtor);
+	if (error) {
+		filp->f_op->release(file->f_vnode, filp);
+		kfree(filp);
+		return (error);
+	}
+
+	return 0;
+}
+
+static int
+linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (0);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+        devfs_clear_cdevpriv();
+        
+
+	return (0);
+}
+
+static int
+linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+    struct thread *td)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (0);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+	/*
+	 * Linux does not have a generic ioctl copyin/copyout layer.  All
+	 * linux ioctls must be converted to void ioctls which pass a
+	 * pointer to the address of the data.  We want the actual user
+	 * address so we dereference here.
+	 */
+	data = *(void **)data;
+	if (filp->f_op->unlocked_ioctl)
+		error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
+	else
+		error = ENOTTY;
+
+	return (error);
+}
+
+static int
+linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	ssize_t bytes;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (0);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+	if (uio->uio_iovcnt != 1)
+		panic("linux_dev_read: uio %p iovcnt %d",
+		    uio, uio->uio_iovcnt);
+	if (filp->f_op->read) {
+		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
+		    uio->uio_iov->iov_len, &uio->uio_offset);
+		if (bytes >= 0) {
+			uio->uio_iov->iov_base =
+			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+			uio->uio_iov->iov_len -= bytes;
+			uio->uio_resid -= bytes;
+		} else
+			error = -bytes;
+	} else
+		error = ENXIO;
+
+	return (error);
+}
+
+static int
+linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	ssize_t bytes;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (0);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+	if (uio->uio_iovcnt != 1)
+		panic("linux_dev_write: uio %p iovcnt %d",
+		    uio, uio->uio_iovcnt);
+	if (filp->f_op->write) {
+		bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
+		    uio->uio_iov->iov_len, &uio->uio_offset);
+		if (bytes >= 0) {
+			uio->uio_iov->iov_base =
+			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+			uio->uio_iov->iov_len -= bytes;
+			uio->uio_resid -= bytes;
+		} else
+			error = -bytes;
+	} else
+		error = ENXIO;
+
+	return (error);
+}
+
+static int
+linux_dev_poll(struct cdev *dev, int events, struct thread *td)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	int revents;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (0);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+	if (filp->f_op->poll)
+		revents = filp->f_op->poll(filp, NULL) & events;
+	else
+		revents = 0;
+
+	return (revents);
+}
+
+static int
+linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
+    vm_size_t size, struct vm_object **object, int nprot)
+{
+	struct linux_cdev *ldev;
+	struct linux_file *filp;
+	struct file *file;
+	struct vm_area_struct vma;
+	int error;
+
+	file = curthread->td_fpop;
+	ldev = dev->si_drv1;
+	if (ldev == NULL)
+		return (ENODEV);
+	if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
+		return (error);
+	filp->f_flags = file->f_flag;
+	vma.vm_start = 0;
+	vma.vm_end = size;
+	vma.vm_pgoff = *offset / PAGE_SIZE;
+	vma.vm_pfn = 0;
+	vma.vm_page_prot = 0;
+	if (filp->f_op->mmap) {
+		error = -filp->f_op->mmap(filp, &vma);
+		if (error == 0) {
+			struct sglist *sg;
+
+			sg = sglist_alloc(1, M_WAITOK);
+			sglist_append_phys(sg,
+			    (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
+			*object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
+			    nprot, 0, curthread->td_ucred);
+		        if (*object == NULL) {
+				sglist_free(sg);
+				return (EINVAL);
+			}
+			*offset = 0;
+			if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
+				VM_OBJECT_WLOCK(*object);
+				vm_object_set_memattr(*object,
+				    vma.vm_page_prot);
+				VM_OBJECT_WUNLOCK(*object);
+			}
+		}
+	} else
+		error = ENODEV;
+
+	return (error);
+}
+
+struct cdevsw linuxcdevsw = {
+	.d_version = D_VERSION,
+	.d_flags = D_TRACKCLOSE,
+	.d_open = linux_dev_open,
+	.d_close = linux_dev_close,
+	.d_read = linux_dev_read,
+	.d_write = linux_dev_write,
+	.d_ioctl = linux_dev_ioctl,
+	.d_mmap_single = linux_dev_mmap_single,
+	.d_poll = linux_dev_poll,
+};
+
+static int
+linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
+    int flags, struct thread *td)
+{
+	struct linux_file *filp;
+	ssize_t bytes;
+	int error;
+
+	error = 0;
+	filp = (struct linux_file *)file->f_data;
+	filp->f_flags = file->f_flag;
+	if (uio->uio_iovcnt != 1)
+		panic("linux_file_read: uio %p iovcnt %d",
+		    uio, uio->uio_iovcnt);
+	if (filp->f_op->read) {
+		bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
+		    uio->uio_iov->iov_len, &uio->uio_offset);
+		if (bytes >= 0) {
+			uio->uio_iov->iov_base =
+			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
+			uio->uio_iov->iov_len -= bytes;
+			uio->uio_resid -= bytes;
+		} else
+			error = -bytes;
+	} else
+		error = ENXIO;
+
+	return (error);
+}
+
+static int
+linux_file_poll(struct file *file, int events, struct ucred *active_cred,
+    struct thread *td)
+{
+	struct linux_file *filp;
+	int revents;
+
+	filp = (struct linux_file *)file->f_data;
+	filp->f_flags = file->f_flag;
+	if (filp->f_op->poll)
+		revents = filp->f_op->poll(filp, NULL) & events;
+	else
+		revents = 0;
+
+	return (0);
+}
+
+static int
+linux_file_close(struct file *file, struct thread *td)
+{
+	struct linux_file *filp;
+	int error;
+
+	filp = (struct linux_file *)file->f_data;
+	filp->f_flags = file->f_flag;
+	error = -filp->f_op->release(NULL, filp);
+	funsetown(&filp->f_sigio);
+	kfree(filp);
+
+	return (error);
+}
+
+static int
+linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
+    struct thread *td)
+{
+	struct linux_file *filp;
+	int error;
+
+	filp = (struct linux_file *)fp->f_data;
+	filp->f_flags = fp->f_flag;
+	error = 0;
+
+	switch (cmd) {
+	case FIONBIO:
+		break;
+	case FIOASYNC:
+		if (filp->f_op->fasync == NULL)
+			break;
+		error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
+		break;
+	case FIOSETOWN:
+		error = fsetown(*(int *)data, &filp->f_sigio);
+		if (error == 0)
+			error = filp->f_op->fasync(0, filp,
+			    fp->f_flag & FASYNC);
+		break;
+	case FIOGETOWN:
+		*(int *)data = fgetown(&filp->f_sigio);
+		break;
+	default:
+		error = ENOTTY;
+		break;
+	}
+	return (error);
+}
+
+static int
+linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
+    struct thread *td)
+{
+
+	return (EOPNOTSUPP);
+}
+
+static int
+linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
+    struct filedesc *fdp)
+{
+
+	return (0);
+}
+
+struct fileops linuxfileops = {
+	.fo_read = linux_file_read,
+	.fo_write = invfo_rdwr,
+	.fo_truncate = invfo_truncate,
+	.fo_kqfilter = invfo_kqfilter,
+	.fo_stat = linux_file_stat,
+	.fo_fill_kinfo = linux_file_fill_kinfo,
+	.fo_poll = linux_file_poll,
+	.fo_close = linux_file_close,
+	.fo_ioctl = linux_file_ioctl,
+	.fo_chmod = invfo_chmod,
+	.fo_chown = invfo_chown,
+	.fo_sendfile = invfo_sendfile,
+};
+
+/*
+ * Hash of vmmap addresses.  This is infrequently accessed and does not
+ * need to be particularly large.  This is done because we must store the
+ * caller's idea of the map size to properly unmap.
+ */
+struct vmmap {
+	LIST_ENTRY(vmmap)	vm_next;
+	void 			*vm_addr;
+	unsigned long		vm_size;
+};
+
+struct vmmaphd {
+	struct vmmap *lh_first;
+};
+#define	VMMAP_HASH_SIZE	64
+#define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
+#define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
+static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
+static struct mtx vmmaplock;
+
+static void
+vmmap_add(void *addr, unsigned long size)
+{
+	struct vmmap *vmmap;
+
+	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
+	mtx_lock(&vmmaplock);
+	vmmap->vm_size = size;
+	vmmap->vm_addr = addr;
+	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
+	mtx_unlock(&vmmaplock);
+}
+
+static struct vmmap *
+vmmap_remove(void *addr)
+{
+	struct vmmap *vmmap;
+
+	mtx_lock(&vmmaplock);
+	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
+		if (vmmap->vm_addr == addr)
+			break;
+	if (vmmap)
+		LIST_REMOVE(vmmap, vm_next);
+	mtx_unlock(&vmmaplock);
+
+	return (vmmap);
+}
+
+void *
+_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
+{
+	void *addr;
+
+	addr = pmap_mapdev_attr(phys_addr, size, attr);
+	if (addr == NULL)
+		return (NULL);
+	vmmap_add(addr, size);
+
+	return (addr);
+}
+
+void
+iounmap(void *addr)
+{
+	struct vmmap *vmmap;
+
+	vmmap = vmmap_remove(addr);
+	if (vmmap == NULL)
+		return;
+	pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
+	kfree(vmmap);
+}
+
+
+void *
+vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
+{
+	vm_offset_t off;
+	size_t size;
+
+	size = count * PAGE_SIZE;
+	off = kva_alloc(size);
+	if (off == 0)
+		return (NULL);
+	vmmap_add((void *)off, size);
+	pmap_qenter(off, pages, count);
+
+	return ((void *)off);
+}
+
+void
+vunmap(void *addr)
+{
+	struct vmmap *vmmap;
+
+	vmmap = vmmap_remove(addr);
+	if (vmmap == NULL)
+		return;
+	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
+	kva_free((vm_offset_t)addr, vmmap->vm_size);
+	kfree(vmmap);
+}
+
+char *
+kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+	unsigned int len;
+	char *p;
+	va_list aq;
+
+	va_copy(aq, ap);
+	len = vsnprintf(NULL, 0, fmt, aq);
+	va_end(aq);
+
+	p = kmalloc(len + 1, gfp);
+	if (p != NULL)
+		vsnprintf(p, len + 1, fmt, ap);
+
+	return (p);
+}
+
+char *
+kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+	va_list ap;
+	char *p;
+
+	va_start(ap, fmt);
+	p = kvasprintf(gfp, fmt, ap);
+	va_end(ap);
+
+	return (p);
+}
+
+static int
+linux_timer_jiffies_until(unsigned long expires)
+{
+	int delta = expires - jiffies;
+	/* guard against already expired values */
+	if (delta < 1)
+		delta = 1;
+	return (delta);
+}
+
+static void
+linux_timer_callback_wrapper(void *context)
+{
+	struct timer_list *timer;
+
+	timer = context;
+	timer->function(timer->data);
+}
+
+void
+mod_timer(struct timer_list *timer, unsigned long expires)
+{
+
+	timer->expires = expires;
+	callout_reset(&timer->timer_callout,		      
+	    linux_timer_jiffies_until(expires),
+	    &linux_timer_callback_wrapper, timer);
+}
+
+void
+add_timer(struct timer_list *timer)
+{
+
+	callout_reset(&timer->timer_callout,
+	    linux_timer_jiffies_until(timer->expires),
+	    &linux_timer_callback_wrapper, timer);
+}
+
+static void
+linux_timer_init(void *arg)
+{
+
+	/*
+	 * Compute an internal HZ value which can divide 2**32 to
+	 * avoid timer rounding problems when the tick value wraps
+	 * around 2**32:
+	 */
+	linux_timer_hz_mask = 1;
+	while (linux_timer_hz_mask < (unsigned long)hz)
+		linux_timer_hz_mask *= 2;
+	linux_timer_hz_mask--;
+}
+SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
+#endif /* __rtems__ */
+
+void
+linux_complete_common(struct completion *c, int all)
+{
+	int wakeup_swapper;
+
+	sleepq_lock(c);
+	c->done++;
+	if (all)
+		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
+	else
+		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
+	sleepq_release(c);
+	if (wakeup_swapper)
+		kick_proc0();
+}
+
+/*
+ * Indefinite wait for done != 0 with or without signals.
+ */
+long
+linux_wait_for_common(struct completion *c, int flags)
+{
+
+	if (flags != 0)
+		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+	else
+		flags = SLEEPQ_SLEEP;
+	for (;;) {
+		sleepq_lock(c);
+		if (c->done)
+			break;
+		sleepq_add(c, NULL, "completion", flags, 0);
+#ifndef __rtems__
+		if (flags & SLEEPQ_INTERRUPTIBLE) {
+			if (sleepq_wait_sig(c, 0) != 0)
+				return (-ERESTARTSYS);
+		} else
+#endif /* __rtems__ */
+			sleepq_wait(c, 0);
+	}
+	c->done--;
+	sleepq_release(c);
+
+	return (0);
+}
+
+#ifndef __rtems__
+/*
+ * Time limited wait for done != 0 with or without signals.
+ */
+long
+linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
+{
+	long end = jiffies + timeout;
+
+	if (flags != 0)
+		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
+	else
+		flags = SLEEPQ_SLEEP;
+	for (;;) {
+		int ret;
+
+		sleepq_lock(c);
+		if (c->done)
+			break;
+		sleepq_add(c, NULL, "completion", flags, 0);
+		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
+		if (flags & SLEEPQ_INTERRUPTIBLE)
+			ret = sleepq_timedwait_sig(c, 0);
+		else
+			ret = sleepq_timedwait(c, 0);
+		if (ret != 0) {
+			/* check for timeout or signal */
+			if (ret == EWOULDBLOCK)
+				return (0);
+			else
+				return (-ERESTARTSYS);
+		}
+	}
+	c->done--;
+	sleepq_release(c);
+
+	/* return how many jiffies are left */
+	return (linux_timer_jiffies_until(end));
+}
+#endif /* __rtems__ */
+
+int
+linux_try_wait_for_completion(struct completion *c)
+{
+	int isdone;
+
+	isdone = 1;
+	sleepq_lock(c);
+	if (c->done)
+		c->done--;
+	else
+		isdone = 0;
+	sleepq_release(c);
+	return (isdone);
+}
+
+int
+linux_completion_done(struct completion *c)
+{
+	int isdone;
+
+	isdone = 1;
+	sleepq_lock(c);
+	if (c->done == 0)
+		isdone = 0;
+	sleepq_release(c);
+	return (isdone);
+}
+
+#ifndef __rtems__
+static void
+linux_compat_init(void *arg)
+{
+	struct sysctl_oid *rootoid;
+	int i;
+
+	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
+	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
+	kobject_init(&class_root, &class_ktype);
+	kobject_set_name(&class_root, "class");
+	class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
+	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
+	kobject_init(&linux_rootdev.kobj, &dev_ktype);
+	kobject_set_name(&linux_rootdev.kobj, "device");
+	linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
+	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
+	    "device");
+	linux_rootdev.bsddev = root_bus;
+	miscclass.name = "misc";
+	class_register(&miscclass);
+	INIT_LIST_HEAD(&pci_drivers);
+	INIT_LIST_HEAD(&pci_devices);
+	spin_lock_init(&pci_lock);
+	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
+	for (i = 0; i < VMMAP_HASH_SIZE; i++)
+		LIST_INIT(&vmmaphead[i]);
+}
+SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
+
+static void
+linux_compat_uninit(void *arg)
+{
+	kobject_kfree_name(&class_root);
+	kobject_kfree_name(&linux_rootdev.kobj);
+	kobject_kfree_name(&miscclass.kobj);
+}
+SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
+#endif /* __rtems__ */
diff --git a/testsuite/include/rtems/bsd/test/network-config.h.in b/testsuite/include/rtems/bsd/test/network-config.h.in
index c20a735..2cef97d 100755
--- a/testsuite/include/rtems/bsd/test/network-config.h.in
+++ b/testsuite/include/rtems/bsd/test/network-config.h.in
@@ -44,6 +44,12 @@
   #define NET_CFG_INTERFACE_0 "fec0"
 #elif defined(LIBBSP_ARM_LPC32XX_BSP_H)
   #define NET_CFG_INTERFACE_0 "lpe0"
+#elif defined(LIBBSP_POWERPC_QORIQ_BSP_H)
+  #if QORIQ_CHIP_IS_T_VARIANT(QORIQ_CHIP_VARIANT)
+    #define NET_CFG_INTERFACE_0 "fm1m3"
+  #else
+    #define NET_CFG_INTERFACE_0 "tsec0"
+  #endif
 #else
   #define NET_CFG_INTERFACE_0 "lo0"
 #endif




More information about the vc mailing list