[rtems-libbsd commit] if_cgem: Import from FreeBSD

Sebastian Huber sebh at rtems.org
Wed Jan 21 14:02:59 UTC 2015


Module:    rtems-libbsd
Branch:    master
Commit:    b8e0c66e5f2f9631c365f509bbcda422533e4886
Changeset: http://git.rtems.org/rtems-libbsd/commit/?id=b8e0c66e5f2f9631c365f509bbcda422533e4886

Author:    Sebastian Huber <sebastian.huber at embedded-brains.de>
Date:      Thu Nov 20 07:57:05 2014 +0100

if_cgem: Import from FreeBSD

---

 Makefile                             |    1 +
 freebsd-to-rtems.py                  |    2 +
 freebsd/sys/dev/cadence/if_cgem.c    | 1828 ++++++++++++++++++++++++++++++++++
 freebsd/sys/dev/cadence/if_cgem_hw.h |  382 +++++++
 4 files changed, 2213 insertions(+)

diff --git a/Makefile b/Makefile
index 076a9e2..e692fad 100644
--- a/Makefile
+++ b/Makefile
@@ -401,6 +401,7 @@ LIB_C_FILES += freebsd/sys/dev/mii/mii_physubr.c
 LIB_C_FILES += freebsd/sys/dev/mii/icsphy.c
 LIB_C_FILES += freebsd/sys/dev/mii/brgphy.c
 LIB_C_FILES += freebsd/sys/dev/tsec/if_tsec.c
+LIB_C_FILES += freebsd/sys/dev/cadence/if_cgem.c
 LIB_C_FILES += freebsd/sys/dev/usb/usb_busdma.c
 LIB_C_FILES += freebsd/sys/dev/usb/usb_core.c
 LIB_C_FILES += freebsd/sys/dev/usb/usb_debug.c
diff --git a/freebsd-to-rtems.py b/freebsd-to-rtems.py
index aa1cf7d..bf3e61a 100755
--- a/freebsd-to-rtems.py
+++ b/freebsd-to-rtems.py
@@ -1258,6 +1258,7 @@ devNet.addKernelSpaceHeaderFiles(
 		'sys/dev/ofw/openfirm.h',
 		'sys/dev/tsec/if_tsec.h',
 		'sys/dev/tsec/if_tsecreg.h',
+		'sys/dev/cadence/if_cgem_hw.h',
 	]
 )
 devNet.addKernelSpaceSourceFiles(
@@ -1268,6 +1269,7 @@ devNet.addKernelSpaceSourceFiles(
 		'sys/dev/mii/icsphy.c',
 		'sys/dev/mii/brgphy.c',
 		'sys/dev/tsec/if_tsec.c',
+		'sys/dev/cadence/if_cgem.c',
 	]
 )
 
diff --git a/freebsd/sys/dev/cadence/if_cgem.c b/freebsd/sys/dev/cadence/if_cgem.c
new file mode 100644
index 0000000..bf14a9a
--- /dev/null
+++ b/freebsd/sys/dev/cadence/if_cgem.c
@@ -0,0 +1,1828 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo at yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * A network interface driver for Cadence GEM Gigabit Ethernet
+ * interface such as the one used in Xilinx Zynq-7000 SoC.
+ *
+ * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
+ * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
+ * and register definitions are in appendix B.18.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_mib.h>
+#include <net/if_types.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#endif
+
+#include <net/bpf.h>
+#include <net/bpfdesc.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/cadence/if_cgem_hw.h>
+
+#include <rtems/bsd/local/miibus_if.h>
+
+#define IF_CGEM_NAME "cgem"
+
+#define CGEM_NUM_RX_DESCS	512	/* size of receive descriptor ring */
+#define CGEM_NUM_TX_DESCS	512	/* size of transmit descriptor ring */
+
+#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\
+				CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc)))
+
+
+/* Default for sysctl rxbufs.  Must be < CGEM_NUM_RX_DESCS of course. */
+#define DEFAULT_NUM_RX_BUFS	256	/* number of receive bufs to queue. */
+
+#define TX_MAX_DMA_SEGS		8	/* maximum segs in a tx mbuf dma */
+
+#define CGEM_CKSUM_ASSIST	(CSUM_IP | CSUM_TCP | CSUM_UDP | \
+				 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
+
+struct cgem_softc {
+	struct ifnet		*ifp;
+	struct mtx		sc_mtx;
+	device_t		dev;
+	device_t		miibus;
+	u_int			mii_media_active;	/* last active media */
+	int			if_old_flags;
+	struct resource 	*mem_res;
+	struct resource 	*irq_res;
+	void			*intrhand;
+	struct callout		tick_ch;
+	uint32_t		net_ctl_shadow;
+	int			ref_clk_num;
+	u_char			eaddr[6];
+
+	bus_dma_tag_t		desc_dma_tag;
+	bus_dma_tag_t		mbuf_dma_tag;
+
+	/* receive descriptor ring */
+	struct cgem_rx_desc	*rxring;
+	bus_addr_t		rxring_physaddr;
+	struct mbuf		*rxring_m[CGEM_NUM_RX_DESCS];
+	bus_dmamap_t		rxring_m_dmamap[CGEM_NUM_RX_DESCS];
+	int			rxring_hd_ptr;	/* where to put rcv bufs */
+	int			rxring_tl_ptr;	/* where to get receives */
+	int			rxring_queued;	/* how many rcv bufs queued */
+ 	bus_dmamap_t		rxring_dma_map;
+	int			rxbufs;		/* tunable number rcv bufs */
+	int			rxhangwar;	/* rx hang work-around */
+	u_int			rxoverruns;	/* rx overruns */
+	u_int			rxnobufs;	/* rx buf ring empty events */
+	u_int			rxdmamapfails;	/* rx dmamap failures */
+	uint32_t		rx_frames_prev;
+
+	/* transmit descriptor ring */
+	struct cgem_tx_desc	*txring;
+	bus_addr_t		txring_physaddr;
+	struct mbuf		*txring_m[CGEM_NUM_TX_DESCS];
+	bus_dmamap_t		txring_m_dmamap[CGEM_NUM_TX_DESCS];
+	int			txring_hd_ptr;	/* where to put next xmits */
+	int			txring_tl_ptr;	/* next xmit mbuf to free */
+	int			txring_queued;	/* num xmits segs queued */
+	bus_dmamap_t		txring_dma_map;
+	u_int			txfull;		/* tx ring full events */
+	u_int			txdefrags;	/* tx calls to m_defrag() */
+	u_int			txdefragfails;	/* tx m_defrag() failures */
+	u_int			txdmamapfails;	/* tx dmamap failures */
+
+	/* hardware provided statistics */
+	struct cgem_hw_stats {
+		uint64_t		tx_bytes;
+		uint32_t		tx_frames;
+		uint32_t		tx_frames_bcast;
+		uint32_t		tx_frames_multi;
+		uint32_t		tx_frames_pause;
+		uint32_t		tx_frames_64b;
+		uint32_t		tx_frames_65to127b;
+		uint32_t		tx_frames_128to255b;
+		uint32_t		tx_frames_256to511b;
+		uint32_t		tx_frames_512to1023b;
+		uint32_t		tx_frames_1024to1536b;
+		uint32_t		tx_under_runs;
+		uint32_t		tx_single_collisn;
+		uint32_t		tx_multi_collisn;
+		uint32_t		tx_excsv_collisn;
+		uint32_t		tx_late_collisn;
+		uint32_t		tx_deferred_frames;
+		uint32_t		tx_carrier_sense_errs;
+
+		uint64_t		rx_bytes;
+		uint32_t		rx_frames;
+		uint32_t		rx_frames_bcast;
+		uint32_t		rx_frames_multi;
+		uint32_t		rx_frames_pause;
+		uint32_t		rx_frames_64b;
+		uint32_t		rx_frames_65to127b;
+		uint32_t		rx_frames_128to255b;
+		uint32_t		rx_frames_256to511b;
+		uint32_t		rx_frames_512to1023b;
+		uint32_t		rx_frames_1024to1536b;
+		uint32_t		rx_frames_undersize;
+		uint32_t		rx_frames_oversize;
+		uint32_t		rx_frames_jabber;
+		uint32_t		rx_frames_fcs_errs;
+		uint32_t		rx_frames_length_errs;
+		uint32_t		rx_symbol_errs;
+		uint32_t		rx_align_errs;
+		uint32_t		rx_resource_errs;
+		uint32_t		rx_overrun_errs;
+		uint32_t		rx_ip_hdr_csum_errs;
+		uint32_t		rx_tcp_csum_errs;
+		uint32_t		rx_udp_csum_errs;
+	} stats;
+};
+
+#define RD4(sc, off) 		(bus_read_4((sc)->mem_res, (off)))
+#define WR4(sc, off, val) 	(bus_write_4((sc)->mem_res, (off), (val)))
+#define BARRIER(sc, off, len, flags) \
+	(bus_barrier((sc)->mem_res, (off), (len), (flags))
+
+#define CGEM_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
+#define CGEM_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
+#define CGEM_LOCK_INIT(sc)	\
+	mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \
+		 MTX_NETWORK_LOCK, MTX_DEF)
+#define CGEM_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->sc_mtx)
+#define CGEM_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
+
+/* Allow platforms to optionally provide a way to set the reference clock. */
+int cgem_set_ref_clk(int unit, int frequency);
+
+static devclass_t cgem_devclass;
+
+static int cgem_probe(device_t dev);
+static int cgem_attach(device_t dev);
+static int cgem_detach(device_t dev);
+static void cgem_tick(void *);
+static void cgem_intr(void *);
+
+static void cgem_mediachange(struct cgem_softc *, struct mii_data *);
+
+static void
+cgem_get_mac(struct cgem_softc *sc, u_char eaddr[])
+{
+	int i;
+	uint32_t rnd;
+
+	/* See if boot loader gave us a MAC address already. */
+	for (i = 0; i < 4; i++) {
+		uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i));
+		uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff;
+		if (low != 0 || high != 0) {
+			eaddr[0] = low & 0xff;
+			eaddr[1] = (low >> 8) & 0xff;
+			eaddr[2] = (low >> 16) & 0xff;
+			eaddr[3] = (low >> 24) & 0xff;
+			eaddr[4] = high & 0xff;
+			eaddr[5] = (high >> 8) & 0xff;
+			break;
+		}
+	}
+
+	/* No MAC from boot loader?  Assign a random one. */
+	if (i == 4) {
+		rnd = arc4random();
+
+		eaddr[0] = 'b';
+		eaddr[1] = 's';
+		eaddr[2] = 'd';
+		eaddr[3] = (rnd >> 16) & 0xff;
+		eaddr[4] = (rnd >> 8) & 0xff;
+		eaddr[5] = rnd & 0xff;
+
+		device_printf(sc->dev, "no mac address found, assigning "
+			      "random: %02x:%02x:%02x:%02x:%02x:%02x\n",
+			      eaddr[0], eaddr[1], eaddr[2],
+			      eaddr[3], eaddr[4], eaddr[5]);
+	}
+
+	/* Move address to first slot and zero out the rest. */
+	WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
+	    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
+	WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
+
+	for (i = 1; i < 4; i++) {
+		WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0);
+		WR4(sc, CGEM_SPEC_ADDR_HI(i), 0);
+	}
+}
+
+/* cgem_mac_hash():  map 48-bit address to a 6-bit hash.
+ * The 6-bit hash corresponds to a bit in a 64-bit hash
+ * register.  Setting that bit in the hash register enables
+ * reception of all frames with a destination address that hashes
+ * to that 6-bit value.
+ *
+ * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech
+ * Reference Manual.  Bits 0-5 in the hash are the exclusive-or of
+ * every sixth bit in the destination address.
+ */
+static int
+cgem_mac_hash(u_char eaddr[])
+{
+	int hash;
+	int i, j;
+
+	hash = 0;
+	for (i = 0; i < 6; i++)
+		for (j = i; j < 48; j += 6)
+			if ((eaddr[j >> 3] & (1 << (j & 7))) != 0)
+				hash ^= (1 << i);
+
+	return hash;
+}
+
+/* After any change in rx flags or multi-cast addresses, set up
+ * hash registers and net config register bits.
+ */
+static void
+cgem_rx_filter(struct cgem_softc *sc)
+{
+	struct ifnet *ifp = sc->ifp;
+	struct ifmultiaddr *ifma;
+	int index;
+	uint32_t hash_hi, hash_lo;
+	uint32_t net_cfg;
+
+	hash_hi = 0;
+	hash_lo = 0;
+
+	net_cfg = RD4(sc, CGEM_NET_CFG);
+
+	net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN |
+		     CGEM_NET_CFG_NO_BCAST | 
+		     CGEM_NET_CFG_COPY_ALL);
+
+	if ((ifp->if_flags & IFF_PROMISC) != 0)
+		net_cfg |= CGEM_NET_CFG_COPY_ALL;
+	else {
+		if ((ifp->if_flags & IFF_BROADCAST) == 0)
+			net_cfg |= CGEM_NET_CFG_NO_BCAST;
+		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
+			hash_hi = 0xffffffff;
+			hash_lo = 0xffffffff;
+		} else {
+			if_maddr_rlock(ifp);
+			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+				if (ifma->ifma_addr->sa_family != AF_LINK)
+					continue;
+				index = cgem_mac_hash(
+					LLADDR((struct sockaddr_dl *)
+					       ifma->ifma_addr));
+				if (index > 31)
+					hash_hi |= (1<<(index-32));
+				else
+					hash_lo |= (1<<index);
+			}
+			if_maddr_runlock(ifp);
+		}
+
+		if (hash_hi != 0 || hash_lo != 0)
+			net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN;
+	}
+
+	WR4(sc, CGEM_HASH_TOP, hash_hi);
+	WR4(sc, CGEM_HASH_BOT, hash_lo);
+	WR4(sc, CGEM_NET_CFG, net_cfg);
+}
+
+/* For bus_dmamap_load() callback. */
+static void
+cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+	if (nsegs != 1 || error != 0)
+		return;
+	*(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+/* Create DMA'able descriptor rings. */
+static int
+cgem_setup_descs(struct cgem_softc *sc)
+{
+	int i, err;
+
+	sc->txring = NULL;
+	sc->rxring = NULL;
+
+	/* Allocate non-cached DMA space for RX and TX descriptors.
+	 */
+	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
+				 BUS_SPACE_MAXADDR_32BIT,
+				 BUS_SPACE_MAXADDR,
+				 NULL, NULL,
+				 MAX_DESC_RING_SIZE,
+				 1,
+				 MAX_DESC_RING_SIZE,
+				 0,
+				 busdma_lock_mutex,
+				 &sc->sc_mtx,
+				 &sc->desc_dma_tag);
+	if (err)
+		return (err);
+
+	/* Set up a bus_dma_tag for mbufs. */
+	err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
+				 BUS_SPACE_MAXADDR_32BIT,
+				 BUS_SPACE_MAXADDR,
+				 NULL, NULL,
+				 MCLBYTES,
+				 TX_MAX_DMA_SEGS,
+				 MCLBYTES,
+				 0,
+				 busdma_lock_mutex,
+				 &sc->sc_mtx,
+				 &sc->mbuf_dma_tag);
+	if (err)
+		return (err);
+
+	/* Allocate DMA memory in non-cacheable space. */
+	err = bus_dmamem_alloc(sc->desc_dma_tag,
+			       (void **)&sc->rxring,
+			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+			       &sc->rxring_dma_map);
+	if (err)
+		return (err);
+
+	/* Load descriptor DMA memory. */
+	err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map,
+			      (void *)sc->rxring,
+			      CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),
+			      cgem_getaddr, &sc->rxring_physaddr,
+			      BUS_DMA_NOWAIT);
+	if (err)
+		return (err);
+
+	/* Initialize RX descriptors. */
+	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
+		sc->rxring[i].addr = CGEM_RXDESC_OWN;
+		sc->rxring[i].ctl = 0;
+		sc->rxring_m[i] = NULL;
+		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
+					&sc->rxring_m_dmamap[i]);
+		if (err)
+			return (err);
+	}
+	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
+
+	sc->rxring_hd_ptr = 0;
+	sc->rxring_tl_ptr = 0;
+	sc->rxring_queued = 0;
+
+	/* Allocate DMA memory for TX descriptors in non-cacheable space. */
+	err = bus_dmamem_alloc(sc->desc_dma_tag,
+			       (void **)&sc->txring,
+			       BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
+			       &sc->txring_dma_map);
+	if (err)
+		return (err);
+
+	/* Load TX descriptor DMA memory. */
+	err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map,
+			      (void *)sc->txring,
+			      CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc),
+			      cgem_getaddr, &sc->txring_physaddr, 
+			      BUS_DMA_NOWAIT);
+	if (err)
+		return (err);
+
+	/* Initialize TX descriptor ring. */
+	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
+		sc->txring[i].addr = 0;
+		sc->txring[i].ctl = CGEM_TXDESC_USED;
+		sc->txring_m[i] = NULL;
+		err = bus_dmamap_create(sc->mbuf_dma_tag, 0,
+					&sc->txring_m_dmamap[i]);
+		if (err)
+			return (err);
+	}
+	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
+
+	sc->txring_hd_ptr = 0;
+	sc->txring_tl_ptr = 0;
+	sc->txring_queued = 0;
+
+	return (0);
+}
+
+/* Fill receive descriptor ring with mbufs. */
+static void
+cgem_fill_rqueue(struct cgem_softc *sc)
+{
+	struct mbuf *m = NULL;
+	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
+	int nsegs;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	while (sc->rxring_queued < sc->rxbufs) {
+		/* Get a cluster mbuf. */
+		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+		if (m == NULL)
+			break;
+
+		m->m_len = MCLBYTES;
+		m->m_pkthdr.len = MCLBYTES;
+		m->m_pkthdr.rcvif = sc->ifp;
+
+		/* Load map and plug in physical address. */
+		if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 
+			      sc->rxring_m_dmamap[sc->rxring_hd_ptr], m,
+			      segs, &nsegs, BUS_DMA_NOWAIT)) {
+			sc->rxdmamapfails++;
+			m_free(m);
+			break;
+		}
+		sc->rxring_m[sc->rxring_hd_ptr] = m;
+
+		/* Sync cache with receive buffer. */
+		bus_dmamap_sync(sc->mbuf_dma_tag,
+				sc->rxring_m_dmamap[sc->rxring_hd_ptr],
+				BUS_DMASYNC_PREREAD);
+
+		/* Write rx descriptor and increment head pointer. */
+		sc->rxring[sc->rxring_hd_ptr].ctl = 0;
+		if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) {
+			sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr |
+				CGEM_RXDESC_WRAP;
+			sc->rxring_hd_ptr = 0;
+		} else
+			sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr;
+			
+		sc->rxring_queued++;
+	}
+}
+
+/* Pull received packets off of receive descriptor ring. */
+static void
+cgem_recv(struct cgem_softc *sc)
+{
+	struct ifnet *ifp = sc->ifp;
+	struct mbuf *m, *m_hd, **m_tl;
+	uint32_t ctl;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	/* Pick up all packets in which the OWN bit is set. */
+	m_hd = NULL;
+	m_tl = &m_hd;
+	while (sc->rxring_queued > 0 &&
+	       (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) {
+
+		ctl = sc->rxring[sc->rxring_tl_ptr].ctl;
+
+		/* Grab filled mbuf. */
+		m = sc->rxring_m[sc->rxring_tl_ptr];
+		sc->rxring_m[sc->rxring_tl_ptr] = NULL;
+
+		/* Sync cache with receive buffer. */
+		bus_dmamap_sync(sc->mbuf_dma_tag,
+				sc->rxring_m_dmamap[sc->rxring_tl_ptr],
+				BUS_DMASYNC_POSTREAD);
+
+		/* Unload dmamap. */
+		bus_dmamap_unload(sc->mbuf_dma_tag,
+		  	sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
+
+		/* Increment tail pointer. */
+		if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS)
+			sc->rxring_tl_ptr = 0;
+		sc->rxring_queued--;
+
+		/* Check FCS and make sure entire packet landed in one mbuf
+		 * cluster (which is much bigger than the largest ethernet
+		 * packet).
+		 */
+		if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 ||
+		    (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) !=
+		           (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) {
+			/* discard. */
+			m_free(m);
+			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+			continue;
+		}
+
+		/* Ready it to hand off to upper layers. */
+		m->m_data += ETHER_ALIGN;
+		m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK);
+		m->m_pkthdr.rcvif = ifp;
+		m->m_pkthdr.len = m->m_len;
+
+		/* Are we using hardware checksumming?  Check the
+		 * status in the receive descriptor.
+		 */
+		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+			/* TCP or UDP checks out, IP checks out too. */
+			if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
+			    CGEM_RXDESC_CKSUM_STAT_TCP_GOOD ||
+			    (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
+			    CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) {
+				m->m_pkthdr.csum_flags |=
+					CSUM_IP_CHECKED | CSUM_IP_VALID |
+					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+				m->m_pkthdr.csum_data = 0xffff;
+			} else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) ==
+				   CGEM_RXDESC_CKSUM_STAT_IP_GOOD) {
+				/* Only IP checks out. */
+				m->m_pkthdr.csum_flags |=
+					CSUM_IP_CHECKED | CSUM_IP_VALID;
+				m->m_pkthdr.csum_data = 0xffff;
+			}
+		}
+
+		/* Queue it up for delivery below. */
+		*m_tl = m;
+		m_tl = &m->m_next;
+	}
+
+	/* Replenish receive buffers. */
+	cgem_fill_rqueue(sc);
+
+	/* Unlock and send up packets. */
+	CGEM_UNLOCK(sc);
+	while (m_hd != NULL) {
+		m = m_hd;
+		m_hd = m_hd->m_next;
+		m->m_next = NULL;
+		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+		(*ifp->if_input)(ifp, m);
+	}
+	CGEM_LOCK(sc);
+}
+
+/* Find completed transmits and free their mbufs. */
+static void
+cgem_clean_tx(struct cgem_softc *sc)
+{
+	struct mbuf *m;
+	uint32_t ctl;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	/* free up finished transmits. */
+	while (sc->txring_queued > 0 &&
+	       ((ctl = sc->txring[sc->txring_tl_ptr].ctl) &
+		CGEM_TXDESC_USED) != 0) {
+
+		/* Sync cache.  nop? */
+		bus_dmamap_sync(sc->mbuf_dma_tag,
+				sc->txring_m_dmamap[sc->txring_tl_ptr],
+				BUS_DMASYNC_POSTWRITE);
+
+		/* Unload DMA map. */
+		bus_dmamap_unload(sc->mbuf_dma_tag,
+				  sc->txring_m_dmamap[sc->txring_tl_ptr]);
+
+		/* Free up the mbuf. */
+		m = sc->txring_m[sc->txring_tl_ptr];
+		sc->txring_m[sc->txring_tl_ptr] = NULL;
+		m_freem(m);
+
+		/* Check the status. */
+		if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) {
+			/* Serious bus error. log to console. */
+			device_printf(sc->dev, "cgem_clean_tx: Whoa! "
+				   "AHB error, addr=0x%x\n",
+				   sc->txring[sc->txring_tl_ptr].addr);
+		} else if ((ctl & (CGEM_TXDESC_RETRY_ERR |
+				   CGEM_TXDESC_LATE_COLL)) != 0) {
+			if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
+		} else
+			if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
+
+		/* If the packet spanned more than one tx descriptor,
+		 * skip descriptors until we find the end so that only
+		 * start-of-frame descriptors are processed.
+		 */
+		while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) {
+			if ((ctl & CGEM_TXDESC_WRAP) != 0)
+				sc->txring_tl_ptr = 0;
+			else
+				sc->txring_tl_ptr++;
+			sc->txring_queued--;
+
+			ctl = sc->txring[sc->txring_tl_ptr].ctl;
+
+			sc->txring[sc->txring_tl_ptr].ctl =
+				ctl | CGEM_TXDESC_USED;
+		}
+
+		/* Next descriptor. */
+		if ((ctl & CGEM_TXDESC_WRAP) != 0)
+			sc->txring_tl_ptr = 0;
+		else
+			sc->txring_tl_ptr++;
+		sc->txring_queued--;
+
+		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+	}
+}
+
+/* Start transmits. */
+static void
+cgem_start_locked(struct ifnet *ifp)
+{
+	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
+	struct mbuf *m;
+	bus_dma_segment_t segs[TX_MAX_DMA_SEGS];
+	uint32_t ctl;
+	int i, nsegs, wrap, err;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0)
+		return;
+
+	for (;;) {
+		/* Check that there is room in the descriptor ring. */
+		if (sc->txring_queued >=
+		    CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
+
+			/* Try to make room. */
+			cgem_clean_tx(sc);
+
+			/* Still no room? */
+			if (sc->txring_queued >=
+			    CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) {
+				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+				sc->txfull++;
+				break;
+			}
+		}
+
+		/* Grab next transmit packet. */
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+		if (m == NULL)
+			break;
+
+		/* Load DMA map. */
+		err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
+				      sc->txring_m_dmamap[sc->txring_hd_ptr],
+				      m, segs, &nsegs, BUS_DMA_NOWAIT);
+		if (err == EFBIG) {
+			/* Too many segments!  defrag and try again. */
+			struct mbuf *m2 = m_defrag(m, M_NOWAIT);
+
+			if (m2 == NULL) {
+				sc->txdefragfails++;
+				m_freem(m);
+				continue;
+			}
+			m = m2;
+			err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag,
+				      sc->txring_m_dmamap[sc->txring_hd_ptr],
+				      m, segs, &nsegs, BUS_DMA_NOWAIT);
+			sc->txdefrags++;
+		}
+		if (err) {
+			/* Give up. */
+			m_freem(m);
+			sc->txdmamapfails++;
+			continue;
+		}
+		sc->txring_m[sc->txring_hd_ptr] = m;
+
+		/* Sync tx buffer with cache. */
+		bus_dmamap_sync(sc->mbuf_dma_tag,
+				sc->txring_m_dmamap[sc->txring_hd_ptr],
+				BUS_DMASYNC_PREWRITE);
+
+		/* Set wrap flag if next packet might run off end of ring. */
+		wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >=
+			CGEM_NUM_TX_DESCS;
+
+		/* Fill in the TX descriptors back to front so that USED
+		 * bit in first descriptor is cleared last.
+		 */
+		for (i = nsegs - 1; i >= 0; i--) {
+			/* Descriptor address. */
+			sc->txring[sc->txring_hd_ptr + i].addr =
+				segs[i].ds_addr;
+
+			/* Descriptor control word. */
+			ctl = segs[i].ds_len;
+			if (i == nsegs - 1) {
+				ctl |= CGEM_TXDESC_LAST_BUF;
+				if (wrap)
+					ctl |= CGEM_TXDESC_WRAP;
+			}
+			sc->txring[sc->txring_hd_ptr + i].ctl = ctl;
+
+			if (i != 0)
+				sc->txring_m[sc->txring_hd_ptr + i] = NULL;
+		}
+
+		if (wrap)
+			sc->txring_hd_ptr = 0;
+		else
+			sc->txring_hd_ptr += nsegs;
+		sc->txring_queued += nsegs;
+
+		/* Kick the transmitter. */
+		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
+		    CGEM_NET_CTRL_START_TX);
+
+		/* If there is a BPF listener, bounce a copy to to him. */
+		ETHER_BPF_MTAP(ifp, m);
+	}
+}
+
+static void
+cgem_start(struct ifnet *ifp)
+{
+	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
+
+	CGEM_LOCK(sc);
+	cgem_start_locked(ifp);
+	CGEM_UNLOCK(sc);
+}
+
+static void
+cgem_poll_hw_stats(struct cgem_softc *sc)
+{
+	uint32_t n;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT);
+	sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32;
+
+	sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX);
+	sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX);
+	sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX);
+	sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX);
+	sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX);
+	sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX);
+	sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX);
+	sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX);
+	sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX);
+	sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX);
+	sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS);
+
+	n = RD4(sc, CGEM_SINGLE_COLL_FRAMES);
+	sc->stats.tx_single_collisn += n;
+	if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
+	n = RD4(sc, CGEM_MULTI_COLL_FRAMES);
+	sc->stats.tx_multi_collisn += n;
+	if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
+	n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES);
+	sc->stats.tx_excsv_collisn += n;
+	if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
+	n = RD4(sc, CGEM_LATE_COLL);
+	sc->stats.tx_late_collisn += n;
+	if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n);
+
+	sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES);
+	sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS);
+
+	sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT);
+	sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32;
+
+	sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX);
+	sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX);
+	sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX);
+	sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX);
+	sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX);
+	sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX);
+	sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX);
+	sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX);
+	sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX);
+	sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX);
+	sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX);
+	sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX);
+	sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX);
+	sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS);
+	sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS);
+	sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS);
+	sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS);
+	sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS);
+	sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS);
+	sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS);
+	sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS);
+	sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS);
+}
+
+static void
+cgem_tick(void *arg)
+{
+	struct cgem_softc *sc = (struct cgem_softc *)arg;
+	struct mii_data *mii;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	/* Poll the phy. */
+	if (sc->miibus != NULL) {
+		mii = device_get_softc(sc->miibus);
+		mii_tick(mii);
+	}
+
+	/* Poll statistics registers. */
+	cgem_poll_hw_stats(sc);
+
+	/* Check for receiver hang. */
+	if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) {
+		/*
+		 * Reset receiver logic by toggling RX_EN bit.  1usec
+		 * delay is necessary especially when operating at 100mbps
+		 * and 10mbps speeds.
+		 */
+		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow &
+		    ~CGEM_NET_CTRL_RX_EN);
+		DELAY(1);
+		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
+	}
+	sc->rx_frames_prev = sc->stats.rx_frames;
+
+	/* Next callout in one second. */
+	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
+}
+
+/* Interrupt handler. */
+static void
+cgem_intr(void *arg)
+{
+	struct cgem_softc *sc = (struct cgem_softc *)arg;
+	uint32_t istatus;
+
+	CGEM_LOCK(sc);
+
+	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		CGEM_UNLOCK(sc);
+		return;
+	}
+
+	/* Read interrupt status and immediately clear the bits. */
+	istatus = RD4(sc, CGEM_INTR_STAT);
+	WR4(sc, CGEM_INTR_STAT, istatus);
+
+	/* Packets received. */
+	if ((istatus & CGEM_INTR_RX_COMPLETE) != 0)
+		cgem_recv(sc);
+
+	/* Free up any completed transmit buffers. */
+	cgem_clean_tx(sc);
+
+	/* Hresp not ok.  Something is very bad with DMA.  Try to clear. */
+	if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) {
+		device_printf(sc->dev, "cgem_intr: hresp not okay! "
+			      "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT));
+		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK);
+	}
+
+	/* Receiver overrun. */
+	if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) {
+		/* Clear status bit. */
+		WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN);
+		sc->rxoverruns++;
+	}
+
+	/* Receiver ran out of bufs. */
+	if ((istatus & CGEM_INTR_RX_USED_READ) != 0) {
+		WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow |
+		    CGEM_NET_CTRL_FLUSH_DPRAM_PKT);
+		cgem_fill_rqueue(sc);
+		sc->rxnobufs++;
+	}
+
+	/* Restart transmitter if needed. */
+	if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
+		cgem_start_locked(sc->ifp);
+
+	CGEM_UNLOCK(sc);
+}
+
+/* Reset hardware. */
+static void
+cgem_reset(struct cgem_softc *sc)
+{
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	WR4(sc, CGEM_NET_CTRL, 0);
+	WR4(sc, CGEM_NET_CFG, 0);
+	WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS);
+	WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL);
+	WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL);
+	WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL);
+	WR4(sc, CGEM_HASH_BOT, 0);
+	WR4(sc, CGEM_HASH_TOP, 0);
+	WR4(sc, CGEM_TX_QBAR, 0);	/* manual says do this. */
+	WR4(sc, CGEM_RX_QBAR, 0);
+
+	/* Get management port running even if interface is down. */
+	WR4(sc, CGEM_NET_CFG,
+	    CGEM_NET_CFG_DBUS_WIDTH_32 |
+	    CGEM_NET_CFG_MDC_CLK_DIV_64);
+
+	sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN;
+	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
+}
+
+/* Bring up the hardware. */
+static void
+cgem_config(struct cgem_softc *sc)
+{
+	uint32_t net_cfg;
+	uint32_t dma_cfg;
+	u_char *eaddr = IF_LLADDR(sc->ifp);
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	/* Program Net Config Register. */
+	net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 |
+		CGEM_NET_CFG_MDC_CLK_DIV_64 |
+		CGEM_NET_CFG_FCS_REMOVE |
+		CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) |
+		CGEM_NET_CFG_GIGE_EN |
+		CGEM_NET_CFG_1536RXEN |
+		CGEM_NET_CFG_FULL_DUPLEX |
+		CGEM_NET_CFG_SPEED100;
+
+	/* Enable receive checksum offloading? */
+	if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0)
+		net_cfg |=  CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN;
+
+	WR4(sc, CGEM_NET_CFG, net_cfg);
+
+	/* Program DMA Config Register. */
+	dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) |
+		CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K |
+		CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
+		CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 |
+		CGEM_DMA_CFG_DISC_WHEN_NO_AHB;
+
+	/* Enable transmit checksum offloading? */
+	if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0)
+		dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
+
+	WR4(sc, CGEM_DMA_CFG, dma_cfg);
+
+	/* Write the rx and tx descriptor ring addresses to the QBAR regs. */
+	WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr);
+	WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr);
+	
+	/* Enable rx and tx. */
+	sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN);
+	WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow);
+
+	/* Set receive address in case it changed. */
+	WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) |
+	    (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]);
+	WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]);
+
+	/* Set up interrupts. */
+	WR4(sc, CGEM_INTR_EN,
+	    CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN |
+	    CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ |
+	    CGEM_INTR_HRESP_NOT_OK);
+}
+
+/* Turn on interface and load up receive ring with buffers. */
+static void
+cgem_init_locked(struct cgem_softc *sc)
+{
+	struct mii_data *mii;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+		return;
+
+	cgem_config(sc);
+	cgem_fill_rqueue(sc);
+
+	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+	mii = device_get_softc(sc->miibus);
+	mii_mediachg(mii);
+
+	callout_reset(&sc->tick_ch, hz, cgem_tick, sc);
+}
+
+static void
+cgem_init(void *arg)
+{
+	struct cgem_softc *sc = (struct cgem_softc *)arg;
+
+	CGEM_LOCK(sc);
+	cgem_init_locked(sc);
+	CGEM_UNLOCK(sc);
+}
+
+/* Turn off interface.  Free up any buffers in transmit or receive queues. */
+static void
+cgem_stop(struct cgem_softc *sc)
+{
+	int i;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	callout_stop(&sc->tick_ch);
+
+	/* Shut down hardware. */
+	cgem_reset(sc);
+
+	/* Clear out transmit queue. */
+	for (i = 0; i < CGEM_NUM_TX_DESCS; i++) {
+		sc->txring[i].ctl = CGEM_TXDESC_USED;
+		sc->txring[i].addr = 0;
+		if (sc->txring_m[i]) {
+			bus_dmamap_unload(sc->mbuf_dma_tag,
+					  sc->txring_m_dmamap[i]);
+			m_freem(sc->txring_m[i]);
+			sc->txring_m[i] = NULL;
+		}
+	}
+	sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP;
+
+	sc->txring_hd_ptr = 0;
+	sc->txring_tl_ptr = 0;
+	sc->txring_queued = 0;
+
+	/* Clear out receive queue. */
+	for (i = 0; i < CGEM_NUM_RX_DESCS; i++) {
+		sc->rxring[i].addr = CGEM_RXDESC_OWN;
+		sc->rxring[i].ctl = 0;
+		if (sc->rxring_m[i]) {
+			/* Unload dmamap. */
+			bus_dmamap_unload(sc->mbuf_dma_tag,
+				  sc->rxring_m_dmamap[sc->rxring_tl_ptr]);
+
+			m_freem(sc->rxring_m[i]);
+			sc->rxring_m[i] = NULL;
+		}
+	}
+	sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP;
+
+	sc->rxring_hd_ptr = 0;
+	sc->rxring_tl_ptr = 0;
+	sc->rxring_queued = 0;
+
+	/* Force next statchg or linkchg to program net config register. */
+	sc->mii_media_active = 0;
+}
+
+
+static int
+cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct cgem_softc *sc = ifp->if_softc;
+	struct ifreq *ifr = (struct ifreq *)data;
+	struct mii_data *mii;
+	int error = 0, mask;
+
+	switch (cmd) {
+	case SIOCSIFFLAGS:
+		CGEM_LOCK(sc);
+		if ((ifp->if_flags & IFF_UP) != 0) {
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+				if (((ifp->if_flags ^ sc->if_old_flags) &
+				     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
+					cgem_rx_filter(sc);
+				}
+			} else {
+				cgem_init_locked(sc);
+			}
+		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+			cgem_stop(sc);
+		}
+		sc->if_old_flags = ifp->if_flags;
+		CGEM_UNLOCK(sc);
+		break;
+
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		/* Set up multi-cast filters. */
+		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+			CGEM_LOCK(sc);
+			cgem_rx_filter(sc);
+			CGEM_UNLOCK(sc);
+		}
+		break;
+
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		mii = device_get_softc(sc->miibus);
+		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
+		break;
+
+	case SIOCSIFCAP:
+		CGEM_LOCK(sc);
+		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+
+		if ((mask & IFCAP_TXCSUM) != 0) {
+			if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) {
+				/* Turn on TX checksumming. */
+				ifp->if_capenable |= (IFCAP_TXCSUM |
+						      IFCAP_TXCSUM_IPV6);
+				ifp->if_hwassist |= CGEM_CKSUM_ASSIST;
+
+				WR4(sc, CGEM_DMA_CFG,
+				    RD4(sc, CGEM_DMA_CFG) |
+				     CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
+			} else {
+				/* Turn off TX checksumming. */
+				ifp->if_capenable &= ~(IFCAP_TXCSUM |
+						       IFCAP_TXCSUM_IPV6);
+				ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST;
+
+				WR4(sc, CGEM_DMA_CFG,
+				    RD4(sc, CGEM_DMA_CFG) &
+				     ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN);
+			}
+		}
+		if ((mask & IFCAP_RXCSUM) != 0) {
+			if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) {
+				/* Turn on RX checksumming. */
+				ifp->if_capenable |= (IFCAP_RXCSUM |
+						      IFCAP_RXCSUM_IPV6);
+				WR4(sc, CGEM_NET_CFG,
+				    RD4(sc, CGEM_NET_CFG) |
+				     CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
+			} else {
+				/* Turn off RX checksumming. */
+				ifp->if_capenable &= ~(IFCAP_RXCSUM |
+						       IFCAP_RXCSUM_IPV6);
+				WR4(sc, CGEM_NET_CFG,
+				    RD4(sc, CGEM_NET_CFG) &
+				     ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN);
+			}
+		}
+		if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == 
+		    (IFCAP_RXCSUM | IFCAP_TXCSUM))
+			ifp->if_capenable |= IFCAP_VLAN_HWCSUM;
+		else
+			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
+
+		CGEM_UNLOCK(sc);
+		break;
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	return (error);
+}
+
+/* MII bus support routines.
+ */
+static void
+cgem_child_detached(device_t dev, device_t child)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+
+	if (child == sc->miibus)
+		sc->miibus = NULL;
+}
+
+static int
+cgem_ifmedia_upd(struct ifnet *ifp)
+{
+	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
+	struct mii_data *mii;
+	struct mii_softc *miisc;
+	int error = 0;
+
+	mii = device_get_softc(sc->miibus);
+	CGEM_LOCK(sc);
+	if ((ifp->if_flags & IFF_UP) != 0) {
+		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
+			PHY_RESET(miisc);
+		error = mii_mediachg(mii);
+	}
+	CGEM_UNLOCK(sc);
+
+	return (error);
+}
+
+static void
+cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+	struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc;
+	struct mii_data *mii;
+
+	mii = device_get_softc(sc->miibus);
+	CGEM_LOCK(sc);
+	mii_pollstat(mii);
+	ifmr->ifm_active = mii->mii_media_active;
+	ifmr->ifm_status = mii->mii_media_status;
+	CGEM_UNLOCK(sc);
+}
+
+static int
+cgem_miibus_readreg(device_t dev, int phy, int reg)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+	int tries, val;
+
+	WR4(sc, CGEM_PHY_MAINT,
+	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
+	    CGEM_PHY_MAINT_OP_READ |
+	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
+	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT));
+
+	/* Wait for completion. */
+	tries=0;
+	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
+		DELAY(5);
+		if (++tries > 200) {
+			device_printf(dev, "phy read timeout: %d\n", reg);
+			return (-1);
+		}
+	}
+
+	val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK;
+
+	if (reg == MII_EXTSR)
+		/*
+		 * MAC does not support half-duplex at gig speeds.
+		 * Let mii(4) exclude the capability.
+		 */
+		val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX);
+
+	return (val);
+}
+
+static int
+cgem_miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+	int tries;
+	
+	WR4(sc, CGEM_PHY_MAINT,
+	    CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 |
+	    CGEM_PHY_MAINT_OP_WRITE |
+	    (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) |
+	    (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) |
+	    (data & CGEM_PHY_MAINT_DATA_MASK));
+
+	/* Wait for completion. */
+	tries = 0;
+	while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) {
+		DELAY(5);
+		if (++tries > 200) {
+			device_printf(dev, "phy write timeout: %d\n", reg);
+			return (-1);
+		}
+	}
+
+	return (0);
+}
+
+static void
+cgem_miibus_statchg(device_t dev)
+{
+	struct cgem_softc *sc  = device_get_softc(dev);
+	struct mii_data *mii = device_get_softc(sc->miibus);
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+	    (IFM_ACTIVE | IFM_AVALID) &&
+	    sc->mii_media_active != mii->mii_media_active)
+		cgem_mediachange(sc, mii);
+}
+
+static void
+cgem_miibus_linkchg(device_t dev)
+{
+	struct cgem_softc *sc  = device_get_softc(dev);
+	struct mii_data *mii = device_get_softc(sc->miibus);
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
+	    (IFM_ACTIVE | IFM_AVALID) &&
+	    sc->mii_media_active != mii->mii_media_active)
+		cgem_mediachange(sc, mii);
+}
+
+/*
+ * Overridable weak symbol cgem_set_ref_clk().  This allows platforms to
+ * provide a function to set the cgem's reference clock.
+ */
+static int __used
+cgem_default_set_ref_clk(int unit, int frequency)
+{
+
+	return 0;
+}
+__weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk);
+
+/* Call to set reference clock and network config bits according to media. */
+static void
+cgem_mediachange(struct cgem_softc *sc,	struct mii_data *mii)
+{
+	uint32_t net_cfg;
+	int ref_clk_freq;
+
+	CGEM_ASSERT_LOCKED(sc);
+
+	/* Update hardware to reflect media. */
+	net_cfg = RD4(sc, CGEM_NET_CFG);
+	net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN |
+		     CGEM_NET_CFG_FULL_DUPLEX);
+
+	switch (IFM_SUBTYPE(mii->mii_media_active)) {
+	case IFM_1000_T:
+		net_cfg |= (CGEM_NET_CFG_SPEED100 |
+			    CGEM_NET_CFG_GIGE_EN);
+		ref_clk_freq = 125000000;
+		break;
+	case IFM_100_TX:
+		net_cfg |= CGEM_NET_CFG_SPEED100;
+		ref_clk_freq = 25000000;
+		break;
+	default:
+		ref_clk_freq = 2500000;
+	}
+
+	if ((mii->mii_media_active & IFM_FDX) != 0)
+		net_cfg |= CGEM_NET_CFG_FULL_DUPLEX;
+
+	WR4(sc, CGEM_NET_CFG, net_cfg);
+
+	/* Set the reference clock if necessary. */
+	if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq))
+		device_printf(sc->dev, "cgem_mediachange: "
+			      "could not set ref clk%d to %d.\n",
+			      sc->ref_clk_num, ref_clk_freq);
+
+	sc->mii_media_active = mii->mii_media_active;
+}
+
+static void
+cgem_add_sysctls(device_t dev)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+	struct sysctl_ctx_list *ctx;
+	struct sysctl_oid_list *child;
+	struct sysctl_oid *tree;
+
+	ctx = device_get_sysctl_ctx(dev);
+	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW,
+		       &sc->rxbufs, 0,
+		       "Number receive buffers to provide");
+
+	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW,
+		       &sc->rxhangwar, 0,
+		       "Enable receive hang work-around");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD,
+			&sc->rxoverruns, 0,
+			"Receive overrun events");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD,
+			&sc->rxnobufs, 0,
+			"Receive buf queue empty events");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD,
+			&sc->rxdmamapfails, 0,
+			"Receive DMA map failures");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD,
+			&sc->txfull, 0,
+			"Transmit ring full events");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD,
+			&sc->txdmamapfails, 0,
+			"Transmit DMA map failures");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD,
+			&sc->txdefrags, 0,
+			"Transmit m_defrag() calls");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD,
+			&sc->txdefragfails, 0,
+			"Transmit m_defrag() failures");
+
+	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
+			       NULL, "GEM statistics");
+	child = SYSCTL_CHILDREN(tree);
+
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD,
+			 &sc->stats.tx_bytes, "Total bytes transmitted");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD,
+			&sc->stats.tx_frames, 0, "Total frames transmitted");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD,
+			&sc->stats.tx_frames_bcast, 0,
+			"Number broadcast frames transmitted");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD,
+			&sc->stats.tx_frames_multi, 0,
+			"Number multicast frames transmitted");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause",
+			CTLFLAG_RD, &sc->stats.tx_frames_pause, 0,
+			"Number pause frames transmitted");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD,
+			&sc->stats.tx_frames_64b, 0,
+			"Number frames transmitted of size 64 bytes or less");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD,
+			&sc->stats.tx_frames_65to127b, 0,
+			"Number frames transmitted of size 65-127 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b",
+			CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0,
+			"Number frames transmitted of size 128-255 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b",
+			CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0,
+			"Number frames transmitted of size 256-511 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b",
+			CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0,
+			"Number frames transmitted of size 512-1023 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b",
+			CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0,
+			"Number frames transmitted of size 1024-1536 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs",
+			CTLFLAG_RD, &sc->stats.tx_under_runs, 0,
+			"Number transmit under-run events");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn",
+			CTLFLAG_RD, &sc->stats.tx_single_collisn, 0,
+			"Number single-collision transmit frames");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn",
+			CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0,
+			"Number multi-collision transmit frames");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn",
+			CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0,
+			"Number excessive collision transmit frames");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn",
+			CTLFLAG_RD, &sc->stats.tx_late_collisn, 0,
+			"Number late-collision transmit frames");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames",
+			CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0,
+			"Number deferred transmit frames");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs",
+			CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0,
+			"Number carrier sense errors on transmit");
+
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD,
+			 &sc->stats.rx_bytes, "Total bytes received");
+
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD,
+			&sc->stats.rx_frames, 0, "Total frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast",
+			CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0,
+			"Number broadcast frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi",
+			CTLFLAG_RD, &sc->stats.rx_frames_multi, 0,
+			"Number multicast frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause",
+			CTLFLAG_RD, &sc->stats.rx_frames_pause, 0,
+			"Number pause frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b",
+			CTLFLAG_RD, &sc->stats.rx_frames_64b, 0,
+			"Number frames received of size 64 bytes or less");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b",
+			CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0,
+			"Number frames received of size 65-127 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b",
+			CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0,
+			"Number frames received of size 128-255 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b",
+			CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0,
+			"Number frames received of size 256-511 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b",
+			CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0,
+			"Number frames received of size 512-1023 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b",
+			CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0,
+			"Number frames received of size 1024-1536 bytes");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize",
+			CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0,
+			"Number undersize frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize",
+			CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0,
+			"Number oversize frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber",
+			CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0,
+			"Number jabber frames received");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs",
+			CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0,
+			"Number frames received with FCS errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs",
+			CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0,
+			"Number frames received with length errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs",
+			CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0,
+			"Number receive symbol errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs",
+			CTLFLAG_RD, &sc->stats.rx_align_errs, 0,
+			"Number receive alignment errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs",
+			CTLFLAG_RD, &sc->stats.rx_resource_errs, 0,
+			"Number frames received when no rx buffer available");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs",
+			CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0,
+			"Number frames received but not copied due to "
+			"receive overrun");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs",
+			CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0,
+			"Number frames received with IP header checksum "
+			"errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs",
+			CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0,
+			"Number frames received with TCP checksum errors");
+	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs",
+			CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0,
+			"Number frames received with UDP checksum errors");
+}
+
+
+static int
+cgem_probe(device_t dev)
+{
+
+	if (!ofw_bus_is_compatible(dev, "cadence,gem"))
+		return (ENXIO);
+
+	device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface");
+	return (0);
+}
+
+static int
+cgem_attach(device_t dev)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+	struct ifnet *ifp = NULL;
+	phandle_t node;
+	pcell_t cell;
+	int rid, err;
+	u_char eaddr[ETHER_ADDR_LEN];
+
+	sc->dev = dev;
+	CGEM_LOCK_INIT(sc);
+
+	/* Get reference clock number and base divider from fdt. */
+	node = ofw_bus_get_node(dev);
+	sc->ref_clk_num = 0;
+	if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0)
+		sc->ref_clk_num = fdt32_to_cpu(cell);
+
+	/* Get memory resource. */
+	rid = 0;
+	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+					     RF_ACTIVE);
+	if (sc->mem_res == NULL) {
+		device_printf(dev, "could not allocate memory resources.\n");
+		return (ENOMEM);
+	}
+
+	/* Get IRQ resource. */
+	rid = 0;
+	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+					     RF_ACTIVE);
+	if (sc->irq_res == NULL) {
+		device_printf(dev, "could not allocate interrupt resource.\n");
+		cgem_detach(dev);
+		return (ENOMEM);
+	}
+
+	/* Set up ifnet structure. */
+	ifp = sc->ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "could not allocate ifnet structure\n");
+		cgem_detach(dev);
+		return (ENOMEM);
+	}
+	ifp->if_softc = sc;
+	if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_start = cgem_start;
+	ifp->if_ioctl = cgem_ioctl;
+	ifp->if_init = cgem_init;
+	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
+		IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM;
+	/* Disable hardware checksumming by default. */
+	ifp->if_hwassist = 0;
+	ifp->if_capenable = ifp->if_capabilities &
+		~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM);
+	ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS;
+	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+	IFQ_SET_READY(&ifp->if_snd);
+
+	sc->if_old_flags = ifp->if_flags;
+	sc->rxbufs = DEFAULT_NUM_RX_BUFS;
+	sc->rxhangwar = 1;
+
+	/* Reset hardware. */
+	CGEM_LOCK(sc);
+	cgem_reset(sc);
+	CGEM_UNLOCK(sc);
+
+	/* Attach phy to mii bus. */
+	err = mii_attach(dev, &sc->miibus, ifp,
+			 cgem_ifmedia_upd, cgem_ifmedia_sts,
+			 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
+	if (err) {
+		device_printf(dev, "attaching PHYs failed\n");
+		cgem_detach(dev);
+		return (err);
+	}
+
+	/* Set up TX and RX descriptor area. */
+	err = cgem_setup_descs(sc);
+	if (err) {
+		device_printf(dev, "could not set up dma mem for descs.\n");
+		cgem_detach(dev);
+		return (ENOMEM);
+	}
+
+	/* Get a MAC address. */
+	cgem_get_mac(sc, eaddr);
+
+	/* Start ticks. */
+	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
+
+	ether_ifattach(ifp, eaddr);
+
+	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE |
+			     INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand);
+	if (err) {
+		device_printf(dev, "could not set interrupt handler.\n");
+		ether_ifdetach(ifp);
+		cgem_detach(dev);
+		return (err);
+	}
+
+	cgem_add_sysctls(dev);
+
+	return (0);
+}
+
+static int
+cgem_detach(device_t dev)
+{
+	struct cgem_softc *sc = device_get_softc(dev);
+	int i;
+
+	if (sc == NULL)
+		return (ENODEV);
+
+	if (device_is_attached(dev)) {
+		CGEM_LOCK(sc);
+		cgem_stop(sc);
+		CGEM_UNLOCK(sc);
+		callout_drain(&sc->tick_ch);
+		sc->ifp->if_flags &= ~IFF_UP;
+		ether_ifdetach(sc->ifp);
+	}
+
+	if (sc->miibus != NULL) {
+		device_delete_child(dev, sc->miibus);
+		sc->miibus = NULL;
+	}
+
+	/* Release resources. */
+	if (sc->mem_res != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY,
+				     rman_get_rid(sc->mem_res), sc->mem_res);
+		sc->mem_res = NULL;
+	}
+	if (sc->irq_res != NULL) {
+		if (sc->intrhand)
+			bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
+		bus_release_resource(dev, SYS_RES_IRQ,
+				     rman_get_rid(sc->irq_res), sc->irq_res);
+		sc->irq_res = NULL;
+	}
+
+	/* Release DMA resources. */
+	if (sc->rxring != NULL) {
+		if (sc->rxring_physaddr != 0) {
+			bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map);
+			sc->rxring_physaddr = 0;
+		}
+		bus_dmamem_free(sc->desc_dma_tag, sc->rxring,
+				sc->rxring_dma_map);
+		sc->rxring = NULL;
+		for (i = 0; i < CGEM_NUM_RX_DESCS; i++)
+			if (sc->rxring_m_dmamap[i] != NULL) {
+				bus_dmamap_destroy(sc->mbuf_dma_tag,
+						   sc->rxring_m_dmamap[i]);
+				sc->rxring_m_dmamap[i] = NULL;
+			}
+	}
+	if (sc->txring != NULL) {
+		if (sc->txring_physaddr != 0) {
+			bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map);
+			sc->txring_physaddr = 0;
+		}
+		bus_dmamem_free(sc->desc_dma_tag, sc->txring,
+				sc->txring_dma_map);
+		sc->txring = NULL;
+		for (i = 0; i < CGEM_NUM_TX_DESCS; i++)
+			if (sc->txring_m_dmamap[i] != NULL) {
+				bus_dmamap_destroy(sc->mbuf_dma_tag,
+						   sc->txring_m_dmamap[i]);
+				sc->txring_m_dmamap[i] = NULL;
+			}
+	}
+	if (sc->desc_dma_tag != NULL) {
+		bus_dma_tag_destroy(sc->desc_dma_tag);
+		sc->desc_dma_tag = NULL;
+	}
+	if (sc->mbuf_dma_tag != NULL) {
+		bus_dma_tag_destroy(sc->mbuf_dma_tag);
+		sc->mbuf_dma_tag = NULL;
+	}
+
+	bus_generic_detach(dev);
+
+	CGEM_LOCK_DESTROY(sc);
+
+	return (0);
+}
+
+static device_method_t cgem_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe,		cgem_probe),
+	DEVMETHOD(device_attach,	cgem_attach),
+	DEVMETHOD(device_detach,	cgem_detach),
+
+	/* Bus interface */
+	DEVMETHOD(bus_child_detached,	cgem_child_detached),
+
+	/* MII interface */
+	DEVMETHOD(miibus_readreg,	cgem_miibus_readreg),
+	DEVMETHOD(miibus_writereg,	cgem_miibus_writereg),
+	DEVMETHOD(miibus_statchg,	cgem_miibus_statchg),
+	DEVMETHOD(miibus_linkchg,	cgem_miibus_linkchg),
+
+	DEVMETHOD_END
+};
+
+static driver_t cgem_driver = {
+	"cgem",
+	cgem_methods,
+	sizeof(struct cgem_softc),
+};
+
+DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL);
+DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL);
+MODULE_DEPEND(cgem, miibus, 1, 1, 1);
+MODULE_DEPEND(cgem, ether, 1, 1, 1);
diff --git a/freebsd/sys/dev/cadence/if_cgem_hw.h b/freebsd/sys/dev/cadence/if_cgem_hw.h
new file mode 100644
index 0000000..30fb6dd
--- /dev/null
+++ b/freebsd/sys/dev/cadence/if_cgem_hw.h
@@ -0,0 +1,382 @@
+/*-
+ * Copyright (c) 2012-2013 Thomas Skibo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Hardware and register defines for Cadence GEM Gigabit Ethernet
+ * controller such as the one used in Zynq-7000 SoC.
+ *
+ * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual.
+ * (v1.4) November 16, 2012.  Xilinx doc UG585.  GEM is covered in Ch. 16
+ * and register definitions are in appendix B.18.
+ */
+
+#ifndef _IF_CGEM_HW_H_
+#define _IF_CGEM_HW_H_
+
+/* Cadence GEM hardware register definitions. */
+#define CGEM_NET_CTRL			0x000	/* Network Control */
+#define   CGEM_NET_CTRL_FLUSH_DPRAM_PKT		(1<<18)
+#define   CGEM_NET_CTRL_TX_PFC_PRI_PAUSE_FRAME	(1<<17)
+#define   CGEM_NET_CTRL_EN_PFC_PRI_PAUSE_RX	(1<<16)
+#define   CGEM_NET_CTRL_STORE_RX_TSTAMP		(1<<15)
+#define   CGEM_NET_CTRL_TX_ZEROQ_PAUSE_FRAME	(1<<12)
+#define   CGEM_NET_CTRL_TX_PAUSE_FRAME		(1<<11)
+#define   CGEM_NET_CTRL_TX_HALT			(1<<10)
+#define   CGEM_NET_CTRL_START_TX		(1<<9)
+#define   CGEM_NET_CTRL_BACK_PRESSURE		(1<<8)
+#define   CGEM_NET_CTRL_WREN_STAT_REGS		(1<<7)
+#define   CGEM_NET_CTRL_INCR_STAT_REGS		(1<<6)
+#define   CGEM_NET_CTRL_CLR_STAT_REGS		(1<<5)
+#define   CGEM_NET_CTRL_MGMT_PORT_EN		(1<<4)
+#define   CGEM_NET_CTRL_TX_EN			(1<<3)
+#define   CGEM_NET_CTRL_RX_EN			(1<<2)
+#define   CGEM_NET_CTRL_LOOP_LOCAL		(1<<1)
+
+#define CGEM_NET_CFG			0x004	/* Netowrk Configuration */
+#define   CGEM_NET_CFG_UNIDIR_EN		(1<<31)
+#define   CGEM_NET_CFG_IGNORE_IPG_RX_ER		(1<<30)
+#define   CGEM_NET_CFG_RX_BAD_PREAMBLE		(1<<29)
+#define   CGEM_NET_CFG_IPG_STRETCH_EN		(1<<28)
+#define   CGEM_NET_CFG_SGMII_EN			(1<<27)
+#define   CGEM_NET_CFG_IGNORE_RX_FCS		(1<<26)
+#define   CGEM_NET_CFG_RX_HD_WHILE_TX		(1<<25)
+#define   CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN	(1<<24)
+#define   CGEM_NET_CFG_DIS_CP_PAUSE_FRAME	(1<<23)
+#define   CGEM_NET_CFG_DBUS_WIDTH_32		(0<<21)
+#define   CGEM_NET_CFG_DBUS_WIDTH_64		(1<<21)
+#define   CGEM_NET_CFG_DBUS_WIDTH_128		(2<<21)
+#define   CGEM_NET_CFG_DBUS_WIDTH_MASK		(3<<21)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_8		(0<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_16		(1<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_32		(2<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_48		(3<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_64		(4<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_96		(5<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_128		(6<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_224		(7<<18)
+#define   CGEM_NET_CFG_MDC_CLK_DIV_MASK		(7<<18)
+#define   CGEM_NET_CFG_FCS_REMOVE		(1<<17)
+#define   CGEM_NET_CFG_LEN_ERR_FRAME_DISC	(1<<16)
+#define   CGEM_NET_CFG_RX_BUF_OFFSET_SHFT	14
+#define   CGEM_NET_CFG_RX_BUF_OFFSET_MASK	(3<<14)
+#define   CGEM_NET_CFG_RX_BUF_OFFSET(n)		((n)<<14)
+#define   CGEM_NET_CFG_PAUSE_EN			(1<<13)
+#define   CGEM_NET_CFG_RETRY_TEST		(1<<12)
+#define   CGEM_NET_CFG_PCS_SEL			(1<<11)
+#define   CGEM_NET_CFG_GIGE_EN			(1<<10)
+#define   CGEM_NET_CFG_EXT_ADDR_MATCH_EN	(1<<9)
+#define   CGEM_NET_CFG_1536RXEN			(1<<8)
+#define   CGEM_NET_CFG_UNI_HASH_EN		(1<<7)
+#define   CGEM_NET_CFG_MULTI_HASH_EN		(1<<6)
+#define   CGEM_NET_CFG_NO_BCAST			(1<<5)
+#define   CGEM_NET_CFG_COPY_ALL			(1<<4)
+#define   CGEM_NET_CFG_DISC_NON_VLAN		(1<<2)
+#define   CGEM_NET_CFG_FULL_DUPLEX		(1<<1)
+#define   CGEM_NET_CFG_SPEED100			(1<<0)
+
+#define CGEM_NET_STAT			0x008	/* Network Status */
+#define   CGEM_NET_STAT_PFC_PRI_PAUSE_NEG	(1<<6)
+#define   CGEM_NET_STAT_PCS_AUTONEG_PAUSE_TX_RES (1<<5)
+#define   CGEM_NET_STAT_PCS_AUTONEG_PAUSE_RX_RES (1<<4)
+#define   CGEM_NET_STAT_PCS_AUTONEG_DUP_RES	(1<<3)
+#define   CGEM_NET_STAT_PHY_MGMT_IDLE		(1<<2)
+#define   CGEM_NET_STAT_MDIO_IN_PIN_STATUS	(1<<1)
+#define   CGEM_NET_STAT_PCS_LINK_STATE		(1<<0)
+
+#define CGEM_USER_IO			0x00C	/* User I/O */
+
+#define CGEM_DMA_CFG			0x010	/* DMA Config */
+#define   CGEM_DMA_CFG_DISC_WHEN_NO_AHB		(1<<24)
+#define   CGEM_DMA_CFG_RX_BUF_SIZE_SHIFT	16
+#define   CGEM_DMA_CFG_RX_BUF_SIZE_MASK		(0xff<<16)
+#define   CGEM_DMA_CFG_RX_BUF_SIZE(sz)		((((sz) + 63) / 64) << 16)
+#define   CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN	(1<<11)
+#define   CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL	(1<<10)
+#define   CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_1K	(0<<8)
+#define   CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_2K	(1<<8)
+#define   CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_4K	(2<<8)
+#define   CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K	(3<<8)
+#define   CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_MASK	(3<<8)
+#define   CGEM_DMA_CFG_AHB_ENDIAN_SWAP_PKT_EN	(1<<7)
+#define   CGEM_DMA_CFG_AHB_ENDIAN_SWAP_MGMT_EN	(1<<6)
+#define   CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_1	(1<<0)
+#define   CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_4	(4<<0)
+#define   CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_8	(8<<0)
+#define   CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16	(16<<0)
+#define   CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_MASK	(0x1f<<0)
+
+#define CGEM_TX_STAT			0x014	/* Transmit Status */
+#define   CGEM_TX_STAT_HRESP_NOT_OK		(1<<8)
+#define   CGEM_TX_STAT_LATE_COLL		(1<<7)
+#define   CGEM_TX_STAT_UNDERRUN			(1<<6)
+#define   CGEM_TX_STAT_COMPLETE			(1<<5)
+#define   CGEM_TX_STAT_CORRUPT_AHB_ERR		(1<<4)
+#define   CGEM_TX_STAT_GO			(1<<3)
+#define   CGEM_TX_STAT_RETRY_LIMIT_EXC		(1<<2)
+#define   CGEM_TX_STAT_COLLISION		(1<<1)
+#define   CGEM_TX_STAT_USED_BIT_READ		(1<<0)
+#define   CGEM_TX_STAT_ALL			0x1ff
+
+#define CGEM_RX_QBAR			0x018	/* Receive Buf Q Base Addr */
+#define CGEM_TX_QBAR			0x01C	/* Transmit Buf Q Base Addr */
+
+#define CGEM_RX_STAT			0x020	/* Receive Status */
+#define   CGEM_RX_STAT_HRESP_NOT_OK		(1<<3)
+#define   CGEM_RX_STAT_OVERRUN			(1<<2)
+#define   CGEM_RX_STAT_FRAME_RECD		(1<<1)
+#define   CGEM_RX_STAT_BUF_NOT_AVAIL		(1<<0)
+#define   CGEM_RX_STAT_ALL			0xf
+
+#define CGEM_INTR_STAT			0x024	/* Interrupt Status */
+#define CGEM_INTR_EN			0x028	/* Interrupt Enable */
+#define CGEM_INTR_DIS			0x02C	/* Interrupt Disable */
+#define CGEM_INTR_MASK			0x030	/* Interrupt Mask */
+#define   CGEM_INTR_TSU_SEC_INCR		(1<<26)
+#define   CGEM_INTR_PDELAY_RESP_TX		(1<<25)
+#define   CGEM_INTR_PDELAY_REQ_TX		(1<<24)
+#define   CGEM_INTR_PDELAY_RESP_RX		(1<<23)
+#define   CGEM_INTR_PDELAY_REQ_RX		(1<<22)
+#define   CGEM_INTR_SYNX_TX			(1<<21)
+#define   CGEM_INTR_DELAY_REQ_TX		(1<<20)
+#define   CGEM_INTR_SYNC_RX			(1<<19)
+#define   CGEM_INTR_DELAY_REQ_RX		(1<<18)
+#define   CGEM_INTR_PARTNER_PG_RX		(1<<17)
+#define   CGEM_INTR_AUTONEG_COMPL		(1<<16)
+#define   CGEM_INTR_EXT_INTR			(1<<15)
+#define   CGEM_INTR_PAUSE_TX			(1<<14)
+#define   CGEM_INTR_PAUSE_ZERO			(1<<13)
+#define   CGEM_INTR_PAUSE_NONZEROQ_RX		(1<<12)
+#define   CGEM_INTR_HRESP_NOT_OK		(1<<11)
+#define   CGEM_INTR_RX_OVERRUN			(1<<10)
+#define   CGEM_INTR_LINK_CHNG			(1<<9)
+#define   CGEM_INTR_TX_COMPLETE			(1<<7)
+#define   CGEM_INTR_TX_CORRUPT_AHB_ERR		(1<<6)
+#define   CGEM_INTR_RETRY_EX_LATE_COLLISION	(1<<5)
+#define   CGEM_INTR_TX_USED_READ		(1<<3)
+#define   CGEM_INTR_RX_USED_READ		(1<<2)
+#define   CGEM_INTR_RX_COMPLETE			(1<<1)
+#define   CGEM_INTR_MGMT_SENT			(1<<0)
+#define   CGEM_INTR_ALL				0x7FFFEFF
+
+#define CGEM_PHY_MAINT			0x034	/* PHY Maintenenace */
+#define   CGEM_PHY_MAINT_CLAUSE_22		(1<<30)
+#define   CGEM_PHY_MAINT_OP_SHIFT		28
+#define   CGEM_PHY_MAINT_OP_MASK		(3<<28)
+#define   CGEM_PHY_MAINT_OP_READ		(2<<28)
+#define   CGEM_PHY_MAINT_OP_WRITE		(1<<28)
+#define   CGEM_PHY_MAINT_PHY_ADDR_SHIFT		23
+#define   CGEM_PHY_MAINT_PHY_ADDR_MASK		(0x1f<<23)
+#define   CGEM_PHY_MAINT_REG_ADDR_SHIFT		18
+#define   CGEM_PHY_MAINT_REG_ADDR_MASK		(0x1f<<18)
+#define   CGEM_PHY_MAINT_MUST_10		(2<<16)
+#define   CGEM_PHY_MAINT_DATA_MASK		0xffff
+
+#define CGEM_RX_PAUSEQ			0x038	/* Received Pause Quantum */
+#define CGEM_TX_PAUSEQ			0x03C	/* Transmit Puase Quantum */
+
+#define CGEM_HASH_BOT			0x080	/* Hash Reg Bottom [31:0] */
+#define CGEM_HASH_TOP			0x084	/* Hash Reg Top [63:32] */
+#define CGEM_SPEC_ADDR_LOW(n)		(0x088+(n)*8)	/* Specific Addr low */
+#define CGEM_SPEC_ADDR_HI(n)		(0x08C+(n)*8)	/* Specific Addr hi */
+
+#define CGEM_TYPE_ID_MATCH1		0x0A8	/* Type ID Match 1 */
+#define   CGEM_TYPE_ID_MATCH_COPY_EN		(1<<31)
+#define CGEM_TYPE_ID_MATCH2		0x0AC	/* Type ID Match 2 */
+#define CGEM_TYPE_ID_MATCH3		0x0B0	/* Type ID Match 3 */
+#define CGEM_TYPE_ID_MATCH4		0x0B4	/* Type ID Match 4 */
+
+#define CGEM_WAKE_ON_LAN		0x0B8	/* Wake on LAN Register */
+#define   CGEM_WOL_MULTI_HASH_EN		(1<<19)
+#define   CGEM_WOL_SPEC_ADDR1_EN		(1<<18)
+#define   CGEM_WOL_ARP_REQ_EN			(1<<17)
+#define   CGEM_WOL_MAGIC_PKT_EN			(1<<16)
+#define   CGEM_WOL_ARP_REQ_IP_ADDR_MASK		0xffff
+
+#define CGEM_IPG_STRETCH		/* IPG Stretch Register */
+
+#define CGEM_STACKED_VLAN		0x0C0	/* Stacked VLAN Register */
+#define   CGEM_STACKED_VLAN_EN			(1<<31)
+
+#define CGEM_TX_PFC_PAUSE		0x0C4	/* Transmit PFC Pause Reg */
+#define   CGEM_TX_PFC_PAUSEQ_SEL_SHIFT		8
+#define   CGEM_TX_PFC_PAUSEQ_SEL_MASK		(0xff<<8)
+#define   CGEM_TX_PFC_PAUSE_PRI_EN_VEC_VAL_MASK 0xff
+
+#define CGEM_SPEC_ADDR1_MASK_BOT	0x0C8	/* Specific Addr Mask1 [31:0]*/
+#define CGEM_SPEC_ADDR1_MASK_TOP	0x0CC	/* Specific Addr Mask1[47:32]*/
+#define CGEM_MODULE_ID			0x0FC	/* Module ID */
+#define CGEM_OCTETS_TX_BOT		0x100	/* Octets xmitted [31:0] */
+#define CGEM_OCTETS_TX_TOP		0x104	/* Octets xmitted [47:32] */
+#define CGEM_FRAMES_TX			0x108	/* Frames xmitted */
+#define CGEM_BCAST_FRAMES_TX		0x10C	/* Broadcast Frames xmitted */
+#define CGEM_MULTI_FRAMES_TX		0x110	/* Multicast Frames xmitted */
+#define CGEM_PAUSE_FRAMES_TX		0x114	/* Pause Frames xmitted */
+#define CGEM_FRAMES_64B_TX		0x118	/* 64-Byte Frames xmitted */
+#define CGEM_FRAMES_65_127B_TX		0x11C	/* 65-127 Byte Frames xmitted*/
+#define CGEM_FRAMES_128_255B_TX		0x120	/* 128-255 Byte Frames xmit */
+#define CGEM_FRAMES_256_511B_TX		0x124	/* 256-511 Byte Frames xmit */
+#define CGEM_FRAMES_512_1023B_TX	0x128	/* 512-1023 Byte frames xmit */
+#define CGEM_FRAMES_1024_1518B_TX	0x12C	/* 1024-1518 Byte frames xmit*/
+#define CGEM_TX_UNDERRUNS		0x134	/* Transmit Under-runs */
+#define CGEM_SINGLE_COLL_FRAMES		0x138	/* Single-Collision Frames */
+#define CGEM_MULTI_COLL_FRAMES		0x13C	/* Multi-Collision Frames */
+#define CGEM_EXCESSIVE_COLL_FRAMES	0x140	/* Excessive Collision Frames*/
+#define CGEM_LATE_COLL			0x144	/* Late Collisions */
+#define CGEM_DEFERRED_TX_FRAMES		0x148	/* Deferred Transmit Frames */
+#define CGEM_CARRIER_SENSE_ERRS		0x14C	/* Carrier Sense Errors */
+#define CGEM_OCTETS_RX_BOT		0x150	/* Octets Received [31:0] */
+#define CGEM_OCTETS_RX_TOP		0x154	/* Octets Received [47:32] */
+#define CGEM_FRAMES_RX			0x158	/* Frames Received */
+#define CGEM_BCAST_FRAMES_RX		0x15C	/* Broadcast Frames Received */
+#define CGEM_MULTI_FRAMES_RX		0x160	/* Multicast Frames Received */
+#define CGEM_PAUSE_FRAMES_RX		0x164	/* Pause Frames Reeived */
+#define CGEM_FRAMES_64B_RX		0x168	/* 64-Byte Frames Received */
+#define CGEM_FRAMES_65_127B_RX		0x16C	/* 65-127 Byte Frames Rx'd */
+#define CGEM_FRAMES_128_255B_RX		0x170	/* 128-255 Byte Frames Rx'd */
+#define CGEM_FRAMES_256_511B_RX		0x174	/* 256-511 Byte Frames Rx'd */
+#define CGEM_FRAMES_512_1023B_RX	0x178	/* 512-1023 Byte Frames Rx'd */
+#define CGEM_FRAMES_1024_1518B_RX	0x17C	/* 1024-1518 Byte Frames Rx'd*/
+#define CGEM_UNDERSZ_RX			0x184	/* Undersize Frames Rx'd */
+#define CGEM_OVERSZ_RX			0x188	/* Oversize Frames Rx'd */
+#define CGEM_JABBERS_RX			0x18C	/* Jabbers received */
+#define CGEM_FCS_ERRS			0x190	/* Frame Check Sequence Errs */
+#define CGEM_LENGTH_FIELD_ERRS		0x194	/* Length Firled Frame Errs */
+#define CGEM_RX_SYMBOL_ERRS		0x198	/* Receive Symbol Errs */
+#define CGEM_ALIGN_ERRS 		0x19C	/* Alignment Errors */
+#define CGEM_RX_RESOURCE_ERRS		0x1A0	/* Receive Resoure Errors */
+#define CGEM_RX_OVERRUN_ERRS		0x1A4	/* Receive Overrun Errors */
+#define CGEM_IP_HDR_CKSUM_ERRS		0x1A8	/* IP Hdr Checksum Errors */
+#define CGEM_TCP_CKSUM_ERRS		0x1AC	/* TCP Checksum Errors */
+#define CGEM_UDP_CKSUM_ERRS		0x1B0	/* UDP Checksum Errors */
+#define CGEM_TIMER_STROBE_S		0x1C8	/* 1588 timer sync strobe s */
+#define CGEM_TIMER_STROBE_NS		0x1CC	/* timer sync strobe ns */
+#define CGEM_TIMER_S			0x1D0	/* 1588 timer seconds */
+#define CGEM_TIMER_NS			0x1D4	/* 1588 timer ns */
+#define CGEM_ADJUST			0x1D8	/* 1588 timer adjust */
+#define CGEM_INCR			0x1DC	/* 1588 timer increment */
+#define CGEM_PTP_TX_S			0x1E0	/* PTP Event Frame xmit secs */
+#define CGEM_PTP_TX_NS			0x1E4	/* PTP Event Frame xmit ns */
+#define CGEM_PTP_RX_S			0x1E8	/* PTP Event Frame rcv'd s */
+#define CGEM_PTP_RX_NS			0x1EC	/* PTP Event Frame rcv'd ns */
+#define CGEM_PTP_PEER_TX_S		0x1F0	/* PTP Peer Event xmit s */
+#define CGEM_PTP_PEER_TX_NS		0x1F4	/* PTP Peer Event xmit ns */
+#define CGEM_PTP_PEER_RX_S		0x1F8	/* PTP Peer Event rcv'd s */
+#define CGEM_PTP_PEER_RX_NS		0x1FC	/* PTP Peer Event rcv'd ns */
+
+#define CGEM_DESIGN_CFG2		0x284	/* Design Configuration 2 */
+#define   CGEM_DESIGN_CFG2_TX_PBUF_ADDR_SHIFT	26
+#define   CGEM_DESIGN_CFG2_TX_PBUF_ADDR_MASK	(0xf<<26)
+#define   CGEM_DESIGN_CFG2_RX_PBUF_ADDR_SHIFT	22
+#define   CGEM_DESIGN_CFG2_RX_PBUF_ADDR_MASK	(0xf<<22)
+#define   CGEM_DESIGN_CFG2_TX_PKT_BUF		(1<<21)
+#define   CGEM_DESIGN_CFG2_RX_PKT_BUF		(1<<20)
+#define   CGEM_DESIGN_CFG2_HPROT_VAL_SHIFT	16
+#define   CGEM_DESIGN_CFG2_HPROT_VAL_MASK	(0xf<<16)
+#define   CGEM_DESIGN_CFG2_JUMBO_MAX_LEN_MASK	0xffff
+
+#define CGEM_DESIGN_CFG3		0x288	/* Design Configuration 3 */
+#define   CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_MASK (0xffff<<16)
+#define   CGEM_DESIGN_CFG3_RX_BASE2_FIFO_SZ_SHIFT 16
+#define   CGEM_DESIGN_CFG3_RX_FIFO_SIZE_MASK	0xffff
+
+#define CGEM_DESIGN_CFG4		0x28C	/* Design Configuration 4 */
+#define   CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_SHIFT 16
+#define   CGEM_DESIGN_CFG4_TX_BASE2_FIFO_SZ_MASK	(0xffff<<16)
+#define   CGEM_DESIGN_CFG4_TX_FIFO_SIZE_MASK	0xffff
+
+#define CGEM_DESIGN_CFG5		0x290	/* Design Configuration 5 */
+#define   CGEM_DESIGN_CFG5_TSU_CLK		(1<<28)
+#define   CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_SHIFT 20
+#define   CGEM_DESIGN_CFG5_RX_BUF_LEN_DEF_MASK	(0xff<<20)
+#define   CGEM_DESIGN_CFG5_TX_PBUF_SIZE_DEF	(1<<19)
+#define   CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_SHIFT 17
+#define   CGEM_DESIGN_CFG5_RX_PBUF_SIZE_DEF_MASK (3<<17)
+#define   CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_SHIFT 15
+#define   CGEM_DESIGN_CFG5_ENDIAN_SWAP_DEF_MASK (3<<15)
+#define   CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_SHIFT	12
+#define   CGEM_DESIGN_CFG5_MDC_CLOCK_DIV_MASK	(7<<12)
+#define   CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_SHIFT	10
+#define   CGEM_DESIGN_CFG5_DMA_BUS_WIDTH_MASK	(3<<10)
+#define   CGEM_DESIGN_CFG5_PHY_IDENT		(1<<9)
+#define   CGEM_DESIGN_CFG5_TSU			(1<<8)
+#define   CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_SHIFT 4
+#define   CGEM_DESIGN_CFG5_TX_FIFO_CNT_WIDTH_MASK (0xf<<4)
+#define   CGEM_DESIGN_CFG5_RX_FIFO_CNT_WIDTH_MASK 0xf
+
+/* Transmit Descriptors */
+struct cgem_tx_desc {
+	uint32_t	addr;
+	uint32_t	ctl;
+#define CGEM_TXDESC_USED			(1<<31) /* done transmitting */
+#define CGEM_TXDESC_WRAP			(1<<30)	/* end of descr ring */
+#define CGEM_TXDESC_RETRY_ERR			(1<<29)
+#define CGEM_TXDESC_AHB_ERR			(1<<27)
+#define CGEM_TXDESC_LATE_COLL			(1<<26)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_MASK		(7<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_VLAN_HDR_ERR (1<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_SNAP_HDR_ERR (2<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_IP_HDR_ERR	(3<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_UNKNOWN_TYPE (4<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_UNSUPP_FRAG	(5<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_NOT_TCPUDP	(6<<20)
+#define CGEM_TXDESC_CKSUM_GEN_STAT_SHORT_PKT	(7<<20)
+#define CGEM_TXDESC_NO_CRC_APPENDED		(1<<16)
+#define CGEM_TXDESC_LAST_BUF			(1<<15)	/* last buf in frame */
+#define CGEM_TXDESC_LENGTH_MASK		0x3fff
+};
+
+struct cgem_rx_desc {
+	uint32_t	addr;
+#define CGEM_RXDESC_WRAP			(1<<1)	/* goes in addr! */
+#define CGEM_RXDESC_OWN				(1<<0)	/* buf filled */
+	uint32_t	ctl;
+#define CGEM_RXDESC_BCAST			(1<<31)	/* all 1's broadcast */
+#define CGEM_RXDESC_MULTI_MATCH			(1<<30)	/* mutlicast match */
+#define CGEM_RXDESC_UNICAST_MATCH		(1<<29)
+#define CGEM_RXDESC_EXTERNAL_MATCH		(1<<28) /* ext addr match */
+#define CGEM_RXDESC_SPEC_MATCH_SHIFT		25
+#define CGEM_RXDESC_SPEC_MATCH_MASK		(3<<25)
+#define CGEM_RXDESC_TYPE_ID_MATCH_SHIFT		22
+#define CGEM_RXDESC_TYPE_ID_MATCH_MASK		(3<<22)
+#define CGEM_RXDESC_CKSUM_STAT_MASK		(3<<22)	/* same field above */
+#define CGEM_RXDESC_CKSUM_STAT_NONE		(0<<22)
+#define CGEM_RXDESC_CKSUM_STAT_IP_GOOD		(1<<22)
+#define CGEM_RXDESC_CKSUM_STAT_TCP_GOOD		(2<<22) /* and ip good */
+#define CGEM_RXDESC_CKSUM_STAT_UDP_GOOD		(3<<22) /* and ip good */
+#define CGEM_RXDESC_VLAN_DETECTED		(1<<21)
+#define CGEM_RXDESC_PRIO_DETECTED		(1<<20)
+#define CGEM_RXDESC_VLAN_PRIO_SHIFT		17
+#define CGEM_RXDESC_VLAN_PRIO_MASK		(7<<17)
+#define CGEM_RXDESC_CFI				(1<<16)
+#define CGEM_RXDESC_EOF				(1<<15)	/* end of frame */
+#define CGEM_RXDESC_SOF				(1<<14) /* start of frame */
+#define CGEM_RXDESC_BAD_FCS			(1<<13)
+#define CGEM_RXDESC_LENGTH_MASK			0x1fff
+};
+
+#endif /* _IF_CGEM_HW_H_ */



More information about the vc mailing list