[PATCH] pc386: Add virtio network driver

Jinhyun jinhyun at konkuk.ac.kr
Mon May 30 11:08:29 UTC 2016


Hi,

This is resubmitting of the virtio network driver.
We got delayed again to write ifdef'ed version of the driver.
We ifdef'ed RTEMS specific things of sources by RTEMS_VIRTIO_NET, and wrote
HOWTOUSE for
explaining how to use the virtio network driver and version of the FreeBSD
we used is there, too.
Let us know if there is any problem.

I'm sorry to send these texts by reply.
I missed these things when I sent the original mail.

Thanks,
Jin-Hyun

-----Original Message-----
From: Jinhyun [mailto:jinhyun at konkuk.ac.kr] 
Sent: Monday, May 30, 2016 4:35 PM
To: devel at rtems.org
Cc: jinh at konkuk.ac.kr
Subject: [PATCH] pc386: Add virtio network driver



>From 1eebb8a11846e1b760f0362c77aac19d5a294bb8 Mon Sep 17 00:00:00 2001
From: Jin-Hyun <jinhyun at konkuk.ac.kr>
Date: Mon, 30 May 2016 15:40:43 +0900
Subject: [PATCH] virtio

---
 c/src/lib/libbsp/i386/pc386/Makefile.am          |   18 +
 c/src/lib/libbsp/i386/pc386/virtio/HOWTOUSE      |   62 +
 c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c    | 4151
++++++++++++++++++++++
 c/src/lib/libbsp/i386/pc386/virtio/if_vtnetvar.h |  391 ++
 c/src/lib/libbsp/i386/pc386/virtio/virtio.c      |  291 ++
 c/src/lib/libbsp/i386/pc386/virtio/virtio.h      |  255 ++
 c/src/lib/libbsp/i386/pc386/virtio/virtio_net.h  |  218 ++
 c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c  | 1526 ++++++++
 c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h  |  166 +
 c/src/lib/libbsp/i386/pc386/virtio/virtio_ring.h |  180 +
 c/src/lib/libbsp/i386/pc386/virtio/virtqueue.c   |  963 +++++
 c/src/lib/libbsp/i386/pc386/virtio/virtqueue.h   |  127 +
 12 files changed, 8348 insertions(+)
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/HOWTOUSE
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/if_vtnetvar.h
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio.c
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio.h
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio_net.h
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtio_ring.h
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtqueue.c
 create mode 100644 c/src/lib/libbsp/i386/pc386/virtio/virtqueue.h

diff --git a/c/src/lib/libbsp/i386/pc386/Makefile.am
b/c/src/lib/libbsp/i386/pc386/Makefile.am
index a8c9ec1..d4ef7a2 100644
--- a/c/src/lib/libbsp/i386/pc386/Makefile.am
+++ b/c/src/lib/libbsp/i386/pc386/Makefile.am
@@ -226,6 +226,23 @@ noinst_PROGRAMS += 3c509.rel
 3c509_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
 endif
 
+if HAS_NETWORKING
+vtnet_CPPFLAGS = -D__INSIDE_RTEMS_BSD_TCPIP_STACK__
+noinst_PROGRAMS += vtnet.rel
+vtnet_rel_SOURCES = virtio/if_vtnet.c
+vtnet_rel_SOURCES += virtio/if_vtnetvar.h
+vtnet_rel_SOURCES += virtio/virtio_net.h
+vtnet_rel_SOURCES += virtio/virtio_pci.c
+vtnet_rel_SOURCES += virtio/virtio_pci.h
+vtnet_rel_SOURCES += virtio/virtqueue.c
+vtnet_rel_SOURCES += virtio/virtqueue.h
+vtnet_rel_SOURCES += virtio/virtio.c
+vtnet_rel_SOURCES += virtio/virtio.h
+vtnet_rel_SOURCES += virtio/virtio_ring.h
+vtnet_rel_CPPFLAGS = $(AM_CPPFLAGS) $(vtnet_CPPFLAGS)
+vtnet_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
+endif
+
 libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/cache.rel
 libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/page.rel
 libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/score.rel
@@ -240,6 +257,7 @@ if HAS_NETWORKING
 libbsp_a_LIBADD += ne2000.rel
 libbsp_a_LIBADD += wd8003.rel
 libbsp_a_LIBADD += 3c509.rel
+libbsp_a_LIBADD += vtnet.rel
 endif
 
 EXTRA_DIST += HOWTO
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/HOWTOUSE
b/c/src/lib/libbsp/i386/pc386/virtio/HOWTOUSE
new file mode 100644
index 0000000..eb91f80
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/HOWTOUSE
@@ -0,0 +1,62 @@
+1. Introduction
+---------------
+
+	This document explains how to setup RTEMS so that RTEMS
applications can be
+built for and run with the virtio network driver. The virtio network
driver is
+ported from FreeBSD release 10.0.0, and RTEMS specific changes of the
driver are
+ ifdef'ed by RTEMS_VIRTIO_NET.
+
+
+2. Building RTEMS kernel with the virtio network driver
+---------------------------------------------------
+
+	When running configure, you need to use following values for the
listed
+options with an i386-rtems toolset. The original virtio network driver of
+FreeBSD works with various CPUs, but the virtio network driver for RTEMS
is only
+tested on i386-pc386, yet.
+	
+		--target=i386-rtems
+		--enable-rtemsbsp=pc386
+		
+
+3. Building RTEMS applications with virtio network driver
+--------------------------------------------------------
+
+	You can use virtio network driver in the same way with other
network drivers
+of RTEMS, but you have to set the mbuf cluster space so that it can be
larger
+than 512KB because the virtio network driver uses 256 mbuf clusters for
Rx. An
+example of network configuration is as follows.
+
+	extern int rtems_vtnet_driver_attach(struct rtems_bsdnet_ifconfig
*, int)
+	static struct rtems_bsdnet_ifconfig virtio_config[]={
+	  {
+	  "vio1", rtems_vtnet_driver_attach, NULL,
+	  },
+	};
+	struct rtems_bsdnet_config rtems_bsdnet_config = {
+		virtio_config,		/* link to next interface */
+		0,				/* Use BOOTP to get
network configuration */
+		150,				/* Network task priority
*/
+		128*1024,			/* MBUF space */
+		512*1024,			/* MBUF cluster space */
+		...
+	}
+
+4. Running virtual machine with virtio network device
+-----------------------------------------------------
+
+	To use the virtio network driver, the hypervisor has to support
virtio
+network device. The current implementation can successfully run with KVM
and
+VirtualBox. Example commands for these hypervisors are as follows.
+	
+	For KVM:
+ 
+	$ qemu-system-i386 -enable-kvm -no-shutdown -m 128 \
+		-boot a -fda rtems-boot.img \
+		-hda hdd.img -net nic,model=virtio -net
tap,ifname=tap0,script=no \
+		-monitor stdio
+		
+	For VirtualBox:
+ 
+ 	$ VBoxManage modifyvm RTEMS-4.11 --nictype1 virtio
+ 	$ VBoxManage startvm RTEMS-4.11
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
new file mode 100644
index 0000000..1ee9199
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
@@ -0,0 +1,4151 @@
+/**
+ * @file if_vtnet.c
+ * @brief Driver for virtio network devices
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+ 
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/network/if_vtnet.c 256066
2013-10-05 18:07:24Z bryanv $
+ */
+
+#define VTNET_LEGACY_TX
+#define RTEMS_VIRTIO_NET
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <bsp.h>
+
+#include <sys/mbuf.h>
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+
+#include <net/if.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include "virtio.h"
+#include "virtqueue.h"
+#include "virtio_pci.h"
+#include "virtio_net.h"
+#include "if_vtnetvar.h"
+
+#ifdef RTEMS_VIRTIO_NET
+static struct vtnet_softc vtnet_softc;
+static rtems_interval     vtnet_ticksPerSecond;
+static const char vtnet_nameunit[] = "vtnet";
+
+#define IFF_DRV_RUNNING IFF_RUNNING
+#define if_drv_flags if_flags
+
+#define device_get_softc(dev) &vtnet_softc
+#define device_get_nameunit(dev) vtnet_nameunit
+#define IF_LLADDR(ifp) (sc->arpcom.ac_enaddr)
+
+#define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen=(len))
+#define IFQ_DRV_IS_EMPTY(ifq) (0==(ifq)->ifq_head)
+#define IFQ_DRV_DEQUEUE(ifq,m) IF_DEQUEUE((ifq),(m))
+#define IFQ_DRV_PREPEND(ifq,m) IF_PREPEND((ifq),(m))
+
+int rtems_vtnet_driver_attach(struct rtems_bsdnet_ifconfig *config, int
attaching);
+static void vtnet_daemon( void *xsc );
+#endif
+
+#ifdef NOTUSED
+static int	vtnet_modevent(module_t, int, void *);
+
+static int	vtnet_probe(device_t);
+static int	vtnet_detach(device_t);
+static int	vtnet_suspend(device_t);
+static int	vtnet_resume(device_t);
+static int	vtnet_shutdown(device_t);
+static int	vtnet_attach_completed(device_t);
+static int	vtnet_config_change(device_t);
+
+static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
+static void	vtnet_free_rx_filters(struct vtnet_softc *);
+static int	vtnet_change_mtu(struct vtnet_softc *, int);
+
+static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
+		     struct virtio_net_hdr *);
+static void	vtnet_rxq_tq_intr(void *, int);
+
+static int	vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
+		    int *, int *, int *);
+static int	vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *,
int,
+		    int, struct virtio_net_hdr *);
+static struct mbuf *
+		vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
+		    struct virtio_net_hdr *);
+#ifdef VTNET_LEGACY_TX
+static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
+#else
+static int	vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf
*);
+static int	vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
+static void	vtnet_txq_tq_deferred(void *, int);
+#endif
+static void	vtnet_txq_tq_intr(void *, int);
+
+#ifndef VTNET_LEGACY_TX
+static void	vtnet_qflush(struct ifnet *);
+#endif
+
+static void	vtnet_rxq_accum_stats(struct vtnet_rxq *,
+		    struct vtnet_rxq_stats *);
+static void	vtnet_txq_accum_stats(struct vtnet_txq *,
+		    struct vtnet_txq_stats *);
+static void	vtnet_accumulate_stats(struct vtnet_softc *);
+
+static void	vtnet_start_taskqueues(struct vtnet_softc *);
+static void	vtnet_free_taskqueues(struct vtnet_softc *);
+static void	vtnet_drain_taskqueues(struct vtnet_softc *);
+
+static void	vtnet_init_rx_filters(struct vtnet_softc *);
+
+static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
+static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
+		    struct sglist *, int, int);
+static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
+static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
+static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
+static int	vtnet_set_promisc(struct vtnet_softc *, int);
+static int	vtnet_set_allmulti(struct vtnet_softc *, int);
+static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
+static void	vtnet_rx_filter(struct vtnet_softc *);
+static void	vtnet_rx_filter_mac(struct vtnet_softc *);
+static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int,
uint16_t);
+static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
+static void	vtnet_update_vlan_filter(struct vtnet_softc *, int,
uint16_t);
+static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
+static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
+
+static int	vtnet_ifmedia_upd(struct ifnet *);
+static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
+static void	vtnet_vlan_tag_remove(struct mbuf *);
+
+static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
+		    struct sysctl_oid_list *, struct vtnet_rxq *);
+static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
+		    struct sysctl_oid_list *, struct vtnet_txq *);
+static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
+static void	vtnet_setup_sysctl(struct vtnet_softc *);
+
+static int	vtnet_tunable_int(struct vtnet_softc *, const char *,
int);
+#endif
+
+static int	vtnet_attach(device_t);
+
+static void	vtnet_negotiate_features(struct vtnet_softc *);
+static void	vtnet_setup_features(struct vtnet_softc *);
+static int	vtnet_init_rxq(struct vtnet_softc *, int);
+static int	vtnet_init_txq(struct vtnet_softc *, int);
+static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
+static void	vtnet_free_rxtx_queues(struct vtnet_softc *);
+static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
+static int	vtnet_setup_interface(struct vtnet_softc *);
+static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
+
+static int	vtnet_rxq_populate(struct vtnet_rxq *);
+static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
+static struct mbuf *
+		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf
**);
+static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
+		    struct mbuf *, int);
+static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *,
int);
+static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
+static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
+static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
+static void	vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
+static int	vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *,
int);
+static void	vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
+		    struct virtio_net_hdr *);
+static int	vtnet_rxq_eof(struct vtnet_rxq *);
+static void	vtnet_rx_vq_intr(void *);
+static void	vtnet_tx_start_all(struct vtnet_softc *);
+
+static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
+static int	vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
+		    struct vtnet_tx_header *);
+static int	vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
+static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
+static void	vtnet_start(struct ifnet *);
+static void	vtnet_txq_eof(struct vtnet_txq *);
+static void	vtnet_tx_vq_intr(void *);
+
+static int	vtnet_watchdog(struct vtnet_txq *);
+static void	vtnet_tick(void *);
+
+static void	vtnet_drain_rxtx_queues(struct vtnet_softc *);
+static void	vtnet_stop_rendezvous(struct vtnet_softc *);
+static void	vtnet_stop(struct vtnet_softc *);
+static int	vtnet_virtio_reinit(struct vtnet_softc *);
+static int	vtnet_init_rx_queues(struct vtnet_softc *);
+static int	vtnet_init_tx_queues(struct vtnet_softc *);
+static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
+static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
+static int	vtnet_reinit(struct vtnet_softc *);
+static void	vtnet_init_locked(struct vtnet_softc *);
+static void	vtnet_init(void *);
+
+static int	vtnet_is_link_up(struct vtnet_softc *);
+static void	vtnet_update_link_status(struct vtnet_softc *);
+static void	vtnet_get_hwaddr(struct vtnet_softc *);
+static void	vtnet_set_hwaddr(struct vtnet_softc *);
+
+static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
+static void	vtnet_rxq_disable_intr(struct vtnet_rxq *);
+static int	vtnet_txq_enable_intr(struct vtnet_txq *);
+static void	vtnet_txq_disable_intr(struct vtnet_txq *);
+static void	vtnet_enable_rx_interrupts(struct vtnet_softc *);
+static void	vtnet_enable_tx_interrupts(struct vtnet_softc *);
+static void	vtnet_enable_interrupts(struct vtnet_softc *);
+static void	vtnet_disable_rx_interrupts(struct vtnet_softc *);
+static void	vtnet_disable_tx_interrupts(struct vtnet_softc *);
+static void	vtnet_disable_interrupts(struct vtnet_softc *);
+
+#ifdef NOTUSED
+/* Tunables. */
+static int vtnet_csum_disable = 0;
+TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
+static int vtnet_tso_disable = 0;
+TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
+static int vtnet_lro_disable = 0;
+TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
+static int vtnet_mq_disable = 0;
+TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
+static int vtnet_mq_max_pairs = 0;
+TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
+static int vtnet_rx_process_limit = 512;
+TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
+
+/*
+ * Reducing the number of transmit completed interrupts can improve
+ * performance. To do so, the define below keeps the Tx vq interrupt
+ * disabled and adds calls to vtnet_txeof() in the start and watchdog
+ * paths. The price to pay for this is the m_free'ing of transmitted
+ * mbufs may be delayed until the watchdog fires.
+ *
+ * BMV: Reintroduce this later as a run-time option, if it makes
+ * sense after the EVENT_IDX feature is supported.
+ *
+ * #define VTNET_TX_INTR_MODERATION
+ */
+
+static uma_zone_t vtnet_tx_header_zone;
+
+static struct virtio_feature_desc vtnet_feature_desc[] = {
+	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
+	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
+	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
+	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
+	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
+	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
+	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
+	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
+	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
+	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
+	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
+	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
+	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
+	{ VIRTIO_NET_F_STATUS,		"Status"	},
+	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
+	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
+	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
+	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
+	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
+	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
+	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
+
+	{ 0, NULL }
+};
+
+static device_method_t vtnet_methods[] = {
+	/* Device methods. */
+	DEVMETHOD(device_probe,			vtnet_probe),
+	DEVMETHOD(device_attach,		vtnet_attach),
+	DEVMETHOD(device_detach,		vtnet_detach),
+	DEVMETHOD(device_suspend,		vtnet_suspend),
+	DEVMETHOD(device_resume,		vtnet_resume),
+	DEVMETHOD(device_shutdown,		vtnet_shutdown),
+
+	/* VirtIO methods. */
+	DEVMETHOD(virtio_attach_completed,	vtnet_attach_completed),
+	DEVMETHOD(virtio_config_change,		vtnet_config_change),
+
+	DEVMETHOD_END
+};
+
+static driver_t vtnet_driver = {
+	"vtnet",
+	vtnet_methods,
+	sizeof(struct vtnet_softc)
+};
+static devclass_t vtnet_devclass;
+
+DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
+    vtnet_modevent, 0);
+MODULE_VERSION(vtnet, 1);
+MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
+
+static int
+vtnet_modevent(module_t mod, int type, void *unused)
+{
+	int error;
+
+	error = 0;
+
+	switch (type) {
+	case MOD_LOAD:
+		vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
+		    sizeof(struct vtnet_tx_header),
+		    NULL, NULL, NULL, NULL, 0, 0);
+		break;
+	case MOD_QUIESCE:
+	case MOD_UNLOAD:
+		if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
+			error = EBUSY;
+		else if (type == MOD_UNLOAD) {
+			uma_zdestroy(vtnet_tx_header_zone);
+			vtnet_tx_header_zone = NULL;
+		}
+		break;
+	case MOD_SHUTDOWN:
+		break;
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+
+	return (error);
+}
+
+static int
+vtnet_probe(device_t dev)
+{
+
+	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
+		return (ENXIO);
+
+	device_set_desc(dev, "VirtIO Networking Adapter");
+
+	return (BUS_PROBE_DEFAULT);
+}
+#endif
+
+#ifdef RTEMS_VIRTIO_NET
+int
+rtems_vtnet_driver_attach(
+  struct rtems_bsdnet_ifconfig *config,
+  int                           attaching
+){
+	struct vtnet_softc *sc;
+	int error;
+	
+	sc = &vtnet_softc;
+	sc->config = config;
+	memset(sc, 0, sizeof(struct vtnet_softc));
+	vtnet_ticksPerSecond = rtems_clock_get_ticks_per_second();
+	
+	error = rtems_vtpci_attach( config, &sc->vtpci_softc );
+	if ( error ) {
+		device_printf(dev, "cannot attach pci device\n");
+		return error;
+	}
+	
+	sc->vtnet_flags = 0;
+	sc->vtnet_hdr_size = 0;
+	sc->vtnet_max_vq_pairs = 1;
+	sc->vtnet_rx_process_limit = 512;
+	
+	error = vtnet_attach(attaching);
+	
+	return (error);
+}
+
+static void
+vtnet_daemon( void *xsc )
+{
+	struct vtpci_softc *sc;
+	struct vtpci_virtqueue *vqx;
+	rtems_event_set     events;
+	int                 i;
+
+	sc = ((struct vtnet_softc *)xsc)->vtpci_softc;
+
+	while ( 1 ) {
+	rtems_bsdnet_event_receive( RTEMS_EVENT_1,
+		RTEMS_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &events );
+		
+		vqx = &sc->vtpci_vqs[0];
+		for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
+			if (vqx->vtv_no_intr == 0)
+				virtqueue_intr(vqx->vtv_vq);
+		}
+	}
+}
+#endif
+
+static int
+vtnet_attach(device_t dev)
+{
+	struct vtnet_softc *sc;
+	int error;
+	
+	sc = device_get_softc(dev);
+	sc->vtnet_dev = dev;
+
+#ifdef NOTUSED
+	/* Register our feature descriptions. */
+	virtio_set_feature_desc(dev, vtnet_feature_desc);
+
+	VTNET_CORE_LOCK_INIT(sc);
+	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
+
+	vtnet_setup_sysctl(sc);
+#endif
+	vtnet_setup_features(sc);
+#ifdef NOTUSED
+
+	error = vtnet_alloc_rx_filters(sc);
+	if (error) {
+		device_printf(dev, "cannot allocate Rx filters\n");
+		goto fail;
+	}
+#endif
+
+	error = vtnet_alloc_rxtx_queues(sc);
+	if (error) {
+		device_printf(dev, "cannot allocate queues\n");
+		goto fail;
+	}
+
+	error = vtnet_alloc_virtqueues(sc);
+	if (error) {
+		device_printf(dev, "cannot allocate virtqueues\n");
+		goto fail;
+	}
+
+	error = vtnet_setup_interface(sc);
+	if (error) {
+		device_printf(dev, "cannot setup interface\n");
+		goto fail;
+	}
+
+	error = virtio_setup_intr(dev, INTR_TYPE_NET);
+	if (error) {
+		device_printf(dev, "cannot setup virtqueue interrupts\n");
+		/* BMV: This will crash if during boot! */
+#ifdef NOTUSED
+		ether_ifdetach(sc->vtnet_ifp);
+#endif
+		goto fail;
+	}
+
+#ifdef NOTUSED
+	vtnet_start_taskqueues(sc);
+#endif
+
+fail:
+#ifdef NOTUSED
+	if (error)
+		vtnet_detach(dev);
+#endif
+
+	return (error);
+}
+
+#ifdef NOTUSED
+static int
+vtnet_detach(device_t dev)
+{
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+	ifp = sc->vtnet_ifp;
+
+	if (device_is_attached(dev)) {
+		VTNET_CORE_LOCK(sc);
+		vtnet_stop(sc);
+		VTNET_CORE_UNLOCK(sc);
+
+		callout_drain(&sc->vtnet_tick_ch);
+		vtnet_drain_taskqueues(sc);
+
+		ether_ifdetach(ifp);
+	}
+
+	vtnet_free_taskqueues(sc);
+
+	if (sc->vtnet_vlan_attach != NULL) {
+		EVENTHANDLER_DEREGISTER(vlan_config,
sc->vtnet_vlan_attach);
+		sc->vtnet_vlan_attach = NULL;
+	}
+	if (sc->vtnet_vlan_detach != NULL) {
+		EVENTHANDLER_DEREGISTER(vlan_unconfg,
sc->vtnet_vlan_detach);
+		sc->vtnet_vlan_detach = NULL;
+	}
+
+	ifmedia_removeall(&sc->vtnet_media);
+
+	if (ifp != NULL) {
+		if_free(ifp);
+		sc->vtnet_ifp = NULL;
+	}
+
+	vtnet_free_rxtx_queues(sc);
+	vtnet_free_rx_filters(sc);
+
+	if (sc->vtnet_ctrl_vq != NULL)
+		vtnet_free_ctrl_vq(sc);
+
+	VTNET_CORE_LOCK_DESTROY(sc);
+
+	return (0);
+}
+
+static int
+vtnet_suspend(device_t dev)
+{
+	struct vtnet_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	VTNET_CORE_LOCK(sc);
+	vtnet_stop(sc);
+	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
+	VTNET_CORE_UNLOCK(sc);
+
+	return (0);
+}
+
+static int
+vtnet_resume(device_t dev)
+{
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+
+	sc = device_get_softc(dev);
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK(sc);
+	if (ifp->if_flags & IFF_UP)
+		vtnet_init_locked(sc);
+	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
+	VTNET_CORE_UNLOCK(sc);
+
+	return (0);
+}
+
+static int
+vtnet_shutdown(device_t dev)
+{
+
+	/*
+	 * Suspend already does all of what we need to
+	 * do here; we just never expect to be resumed.
+	 */
+	return (vtnet_suspend(dev));
+}
+
+static int
+vtnet_attach_completed(device_t dev)
+{
+
+	vtnet_attach_disable_promisc(device_get_softc(dev));
+
+	return (0);
+}
+#endif
+
+static int
+vtnet_config_change(device_t dev)
+{
+	struct vtnet_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	VTNET_CORE_LOCK(sc);
+	vtnet_update_link_status(sc);
+	if (sc->vtnet_link_active != 0)
+		vtnet_tx_start_all(sc);
+	VTNET_CORE_UNLOCK(sc);
+
+	return (0);
+}
+
+static void
+vtnet_negotiate_features(struct vtnet_softc *sc)
+{
+	device_t dev;
+	uint64_t mask, features;
+
+	dev = sc->vtnet_dev;
+	mask = 0;
+
+#ifdef NOTUSED
+	/*
+	 * TSO and LRO are only available when their corresponding
checksum
+	 * offload feature is also negotiated.
+	 */
+	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
+		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
+		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
+	}
+	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
+		mask |= VTNET_TSO_FEATURES;
+	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
+		mask |= VTNET_LRO_FEATURES;
+	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
+		mask |= VIRTIO_NET_F_MQ;
+#endif
+#ifdef VTNET_LEGACY_TX
+	mask |= VIRTIO_NET_F_MQ;
+#endif
+
+	features = VTNET_FEATURES & ~mask;
+	sc->vtnet_features = virtio_negotiate_features(dev, features);
+
+	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) == 0)
+		return;
+	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF))
+		return;
+
+#ifdef NOTUSED
+	/*
+	 * LRO without mergeable buffers requires special care. This is
not
+	 * ideal because every receive buffer must be large enough to hold
+	 * the maximum TCP packet, the Ethernet header, and the header.
This
+	 * requires up to 34 descriptors with MCLBYTES clusters. If we do
+	 * not have indirect descriptors, LRO is disabled since the
virtqueue
+	 * will not contain very many receive buffers.
+	 */
+	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
+		device_printf(dev,
+		    "LRO disabled due to both mergeable buffers and
indirect "
+		    "descriptors not negotiated\n");
+
+		features &= ~VTNET_LRO_FEATURES;
+		sc->vtnet_features = virtio_negotiate_features(dev,
features);
+	} else
+		sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
+#endif
+}
+
+static void
+vtnet_setup_features(struct vtnet_softc *sc)
+{
+	device_t dev;
+	int max_pairs, max;
+
+	dev = sc->vtnet_dev;
+
+	vtnet_negotiate_features(sc);
+
+	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
+		sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
+		/* This feature should always be negotiated. */
+		sc->vtnet_flags |= VTNET_FLAG_MAC;
+	}
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
+		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
+		sc->vtnet_hdr_size = sizeof(struct
virtio_net_hdr_mrg_rxbuf);
+	} else
+		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
+		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
+
+		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
+			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
+		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
+			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
+		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
+			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
+	}
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
+	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
+		max_pairs = virtio_read_dev_config_2(dev,
+		    offsetof(struct virtio_net_config,
max_virtqueue_pairs));
+		if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+		    max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
+			max_pairs = 1;
+	} else
+		max_pairs = 1;
+
+#ifdef NOTUSED
+	if (max_pairs > 1) {
+		/*
+		 * Limit the maximum number of queue pairs to the number
of
+		 * CPUs or the configured maximum. The actual number of
+		 * queues that get used may be less.
+		 */
+		max = vtnet_tunable_int(sc, "mq_max_pairs",
vtnet_mq_max_pairs);
+		if (max > 0 && max_pairs > max)
+			max_pairs = max;
+		if (max_pairs > mp_ncpus)
+			max_pairs = mp_ncpus;
+		if (max_pairs > VTNET_MAX_QUEUE_PAIRS)
+			max_pairs = VTNET_MAX_QUEUE_PAIRS;
+		if (max_pairs > 1)
+			sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
+	}
+#endif
+
+	sc->vtnet_max_vq_pairs = max_pairs;
+}
+
+static int
+vtnet_init_rxq(struct vtnet_softc *sc, int id)
+{
+	struct vtnet_rxq *rxq;
+
+	rxq = &sc->vtnet_rxqs[id];
+
+	snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
+	    device_get_nameunit(sc->vtnet_dev), id);
+	mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
+
+	rxq->vtnrx_sc = sc;
+	rxq->vtnrx_id = id;
+
+#ifdef NOTUSED
+	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
+	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
+	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
+
+	return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
+#endif
+
+	return (0);
+}
+
+static int
+vtnet_init_txq(struct vtnet_softc *sc, int id)
+{
+	struct vtnet_txq *txq;
+
+	txq = &sc->vtnet_txqs[id];
+
+	snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
+	    device_get_nameunit(sc->vtnet_dev), id);
+	mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
+
+	txq->vtntx_sc = sc;
+	txq->vtntx_id = id;
+
+#ifndef VTNET_LEGACY_TX
+	txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE,
M_DEVBUF,
+	    M_NOWAIT, &txq->vtntx_mtx);
+	if (txq->vtntx_br == NULL)
+		return (ENOMEM);
+
+	TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
+#endif
+#ifdef NOTUSED
+	TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
+	txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
+	    taskqueue_thread_enqueue, &txq->vtntx_tq);
+	if (txq->vtntx_tq == NULL)
+		return (ENOMEM);
+#endif
+
+	return (0);
+}
+
+static int
+vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
+{
+	int i, npairs, error;
+
+	npairs = sc->vtnet_max_vq_pairs;
+
+	sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs,
M_DEVBUF,
+	    M_NOWAIT | M_ZERO);
+	sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs,
M_DEVBUF,
+	    M_NOWAIT | M_ZERO);
+	if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
+		return (ENOMEM);
+
+	for (i = 0; i < npairs; i++) {
+		error = vtnet_init_rxq(sc, i);
+		if (error)
+			return (error);
+		error = vtnet_init_txq(sc, i);
+		if (error)
+			return (error);
+	}
+
+#ifdef NOTUSED
+	vtnet_setup_queue_sysctl(sc);
+#endif
+
+	return (0);
+}
+
+static void
+vtnet_destroy_rxq(struct vtnet_rxq *rxq)
+{
+
+	rxq->vtnrx_sc = NULL;
+	rxq->vtnrx_id = -1;
+
+	if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
+		mtx_destroy(&rxq->vtnrx_mtx);
+}
+
+static void
+vtnet_destroy_txq(struct vtnet_txq *txq)
+{
+
+	txq->vtntx_sc = NULL;
+	txq->vtntx_id = -1;
+
+#ifndef VTNET_LEGACY_TX
+	if (txq->vtntx_br != NULL) {
+		buf_ring_free(txq->vtntx_br, M_DEVBUF);
+		txq->vtntx_br = NULL;
+	}
+#endif
+
+	if (mtx_initialized(&txq->vtntx_mtx) != 0)
+		mtx_destroy(&txq->vtntx_mtx);
+}
+
+static void
+vtnet_free_rxtx_queues(struct vtnet_softc *sc)
+{
+	int i;
+
+	if (sc->vtnet_rxqs != NULL) {
+		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
+			vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
+		free(sc->vtnet_rxqs, M_DEVBUF);
+		sc->vtnet_rxqs = NULL;
+	}
+
+	if (sc->vtnet_txqs != NULL) {
+		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
+			vtnet_destroy_txq(&sc->vtnet_txqs[i]);
+		free(sc->vtnet_txqs, M_DEVBUF);
+		sc->vtnet_txqs = NULL;
+	}
+}
+
+#ifdef NOTUSED
+static int
+vtnet_alloc_rx_filters(struct vtnet_softc *sc)
+{
+
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
+		sc->vtnet_mac_filter = malloc(sizeof(struct
vtnet_mac_filter),
+		    M_DEVBUF, M_NOWAIT | M_ZERO);
+		if (sc->vtnet_mac_filter == NULL)
+			return (ENOMEM);
+	}
+
+	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
+		sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
+		    VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT |
M_ZERO);
+		if (sc->vtnet_vlan_filter == NULL)
+			return (ENOMEM);
+	}
+
+	return (0);
+}
+
+static void
+vtnet_free_rx_filters(struct vtnet_softc *sc)
+{
+
+	if (sc->vtnet_mac_filter != NULL) {
+		free(sc->vtnet_mac_filter, M_DEVBUF);
+		sc->vtnet_mac_filter = NULL;
+	}
+
+	if (sc->vtnet_vlan_filter != NULL) {
+		free(sc->vtnet_vlan_filter, M_DEVBUF);
+		sc->vtnet_vlan_filter = NULL;
+	}
+}
+#endif
+
+static int
+vtnet_alloc_virtqueues(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct vq_alloc_info *info;
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i, idx, flags, nvqs, rxsegs, error;
+
+	dev = sc->vtnet_dev;
+	flags = 0;
+
+	/*
+	 * Indirect descriptors are not needed for the Rx virtqueue when
+	 * mergeable buffers are negotiated. The header is placed inline
+	 * with the data, not in a separate descriptor, and mbuf clusters
+	 * are always physically contiguous.
+	 */
+	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
+		rxsegs = 0;
+	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
+		rxsegs = VTNET_MAX_RX_SEGS;
+	else
+		rxsegs = VTNET_MIN_RX_SEGS;
+
+	nvqs = sc->vtnet_max_vq_pairs * 2;
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
+		nvqs++;
+
+	info = malloc(sizeof(struct vq_alloc_info) * nvqs , M_TEMP,
M_NOWAIT);
+	if (info == NULL)
+		return (ENOMEM);
+
+	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
+		rxq = &sc->vtnet_rxqs[i];
+		VQ_ALLOC_INFO_INIT(&info[idx], rxsegs,
+		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
+		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
+
+		txq = &sc->vtnet_txqs[i];
+		VQ_ALLOC_INFO_INIT(&info[idx+1], VTNET_MAX_TX_SEGS,
+		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
+		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
+	}
+
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
+		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
+		    &sc->vtnet_ctrl_vq, "%s ctrl",
device_get_nameunit(dev));
+	}
+
+	/*
+	 * Enable interrupt binding if this is multiqueue. This only
matters
+	 * when per-vq MSIX is available.
+	 */
+	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
+		flags |= 0;
+
+	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
+	free(info, M_TEMP);
+
+	return (error);
+}
+
+static int
+vtnet_setup_interface(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+	int limit;
+
+#ifdef NOTUSED
+	dev = sc->vtnet_dev;
+
+	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "cannot allocate ifnet structure\n");
+		return (ENOSPC);
+	}
+
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+	if_initbaudrate(ifp, IF_Gbps(10));	/* Approx. */
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	ifp = &sc->arpcom.ac_if;
+	sc->vtnet_ifp = ifp;
+#endif
+	ifp->if_softc = sc;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_init = vtnet_init;
+	ifp->if_ioctl = vtnet_ioctl;
+
+#ifdef NOTUSED
+#ifndef VTNET_LEGACY_TX
+	ifp->if_transmit = vtnet_txq_mq_start;
+	ifp->if_qflush = vtnet_qflush;
+#else
+	struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
+	ifp->if_start = vtnet_start;
+	IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
+	ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
+	IFQ_SET_READY(&ifp->if_snd);
+#endif
+
+	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
+	    vtnet_ifmedia_sts);
+	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
+	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
+#endif
+
+	/* Read (or generate) the MAC address for the adapter. */
+	vtnet_get_hwaddr(sc);
+
+#ifdef RTEMS_VIRTIO_NET
+	struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
+	
+	ifp->if_start = vtnet_start;
+	IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
+	ifp->if_unit = sc->vtpci_softc->unit_number;
+	ifp->if_name = sc->vtpci_softc->unit_name;
+	ifp->if_mtu = sc->config->mtu ? sc->config->mtu : ETHERMTU;
+	ifp->if_output = ether_output;
+	if_attach(ifp);
+#endif
+	ether_ifattach(ifp);
+	
+#ifdef NOTUSED
+	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
+		ifp->if_capabilities |= IFCAP_LINKSTATE;
+
+	/* Tell the upper layer(s) we support long frames. */
+	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
+		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
+
+		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
+			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
+			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
+		} else {
+			if (virtio_with_feature(dev,
VIRTIO_NET_F_HOST_TSO4))
+				ifp->if_capabilities |= IFCAP_TSO4;
+			if (virtio_with_feature(dev,
VIRTIO_NET_F_HOST_TSO6))
+				ifp->if_capabilities |= IFCAP_TSO6;
+			if (virtio_with_feature(dev,
VIRTIO_NET_F_HOST_ECN))
+				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
+		}
+
+		if (ifp->if_capabilities & IFCAP_TSO)
+			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+	}
+
+	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM))
+		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
+
+	if (ifp->if_capabilities & IFCAP_HWCSUM) {
+		/*
+		 * VirtIO does not support VLAN tagging, but we can fake
+		 * it by inserting and removing the 802.1Q header during
+		 * transmit and receive. We are then able to do checksum
+		 * offloading of VLAN frames.
+		 */
+		ifp->if_capabilities |=
+		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+	}
+
+	ifp->if_capenable = ifp->if_capabilities;
+
+	/*
+	 * Capabilities after here are not enabled by default.
+	 */
+
+	if (ifp->if_capabilities & IFCAP_RXCSUM) {
+		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
+		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
+			ifp->if_capabilities |= IFCAP_LRO;
+	}
+
+	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
+		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
+		sc->vtnet_vlan_detach =
EVENTHANDLER_REGISTER(vlan_unconfig,
+		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
+	}
+
+	limit = vtnet_tunable_int(sc, "rx_process_limit",
+	    vtnet_rx_process_limit);
+	if (limit < 0)
+		limit = INT_MAX;
+
+	sc->vtnet_rx_process_limit = limit;
+#endif
+
+	return (0);
+}
+
+#ifdef NOTUSED
+static int
+vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
+{
+	struct ifnet *ifp;
+	int frame_size, clsize;
+
+	ifp = sc->vtnet_ifp;
+
+	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
+		return (EINVAL);
+
+	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header)
+
+	    new_mtu;
+
+	/*
+	 * Based on the new MTU (and hence frame size) determine which
+	 * cluster size is most appropriate for the receive queues.
+	 */
+	if (frame_size <= MCLBYTES) {
+		clsize = MCLBYTES;
+	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+		/* Avoid going past 9K jumbos. */
+		if (frame_size > MJUM9BYTES)
+			return (EINVAL);
+		clsize = MJUM9BYTES;
+	} else
+		clsize = MJUMPAGESIZE;
+
+	ifp->if_mtu = new_mtu;
+	sc->vtnet_rx_new_clsize = clsize;
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+		vtnet_init_locked(sc);
+	}
+
+	return (0);
+}
+#endif
+
+static int
+vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+	struct vtnet_softc *sc;
+	struct ifreq *ifr;
+	int reinit, mask, error;
+
+	sc = ifp->if_softc;
+	ifr = (struct ifreq *) data;
+	error = 0;
+	
+	switch (cmd) {
+#ifdef NOTUSED
+	case SIOCSIFMTU:
+		if (ifp->if_mtu != ifr->ifr_mtu) {
+			VTNET_CORE_LOCK(sc);
+			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
+			VTNET_CORE_UNLOCK(sc);
+		}
+		break;
+#endif
+	case SIOCSIFFLAGS:
+		VTNET_CORE_LOCK(sc);
+		if ((ifp->if_flags & IFF_UP) == 0) {
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING){
+				vtnet_stop(sc);
+			}
+#ifdef NOTUSED
+		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
+			    (IFF_PROMISC | IFF_ALLMULTI)) {
+				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
+					vtnet_rx_filter(sc);
+				else
+					error = ENOTSUP;
+			}
+#endif
+		} else{
+			vtnet_init_locked(sc);
+		}
+
+		if (error == 0)
+			sc->vtnet_if_flags = ifp->if_flags;
+		VTNET_CORE_UNLOCK(sc);
+		break;
+#ifdef NOTUSED
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
+			break;
+		VTNET_CORE_LOCK(sc);
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+			vtnet_rx_filter_mac(sc);
+		VTNET_CORE_UNLOCK(sc);
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
+		break;
+	case SIOCSIFCAP:
+		VTNET_CORE_LOCK(sc);
+		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+		if (mask & IFCAP_TXCSUM)
+			ifp->if_capenable ^= IFCAP_TXCSUM;
+		if (mask & IFCAP_TXCSUM_IPV6)
+			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
+		if (mask & IFCAP_TSO4)
+			ifp->if_capenable ^= IFCAP_TSO4;
+		if (mask & IFCAP_TSO6)
+			ifp->if_capenable ^= IFCAP_TSO6;
+
+		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
+		    IFCAP_VLAN_HWFILTER)) {
+			/* These Rx features require us to renegotiate. */
+			reinit = 1;
+
+			if (mask & IFCAP_RXCSUM)
+				ifp->if_capenable ^= IFCAP_RXCSUM;
+			if (mask & IFCAP_RXCSUM_IPV6)
+				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+			if (mask & IFCAP_LRO)
+				ifp->if_capenable ^= IFCAP_LRO;
+			if (mask & IFCAP_VLAN_HWFILTER)
+				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+		} else
+			reinit = 0;
+
+		if (mask & IFCAP_VLAN_HWTSO)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+		if (mask & IFCAP_VLAN_HWTAGGING)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+			vtnet_init_locked(sc);
+		}
+
+		VTNET_CORE_UNLOCK(sc);
+		VLAN_CAPABILITIES(ifp);
+
+		break;
+#endif
+	default:
+		error = ether_ioctl(ifp, cmd, data);
+		break;
+	}
+
+	VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
+
+	return (error);
+}
+
+static int
+vtnet_rxq_populate(struct vtnet_rxq *rxq)
+{
+	struct virtqueue *vq;
+	int nbufs, error;
+
+	vq = rxq->vtnrx_vq;
+	error = ENOSPC;
+
+	for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
+		error = vtnet_rxq_new_buf(rxq);
+		if (error)
+			break;
+	}
+
+	if (nbufs > 0) {
+		virtqueue_notify(vq);
+		/*
+		 * EMSGSIZE signifies the virtqueue did not have enough
+		 * entries available to hold the last mbuf. This is not
+		 * an error.
+		 */
+		if (error == EMSGSIZE)
+			error = 0;
+	}
+
+	return (error);
+}
+
+static void
+vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
+{
+	struct virtqueue *vq;
+	struct mbuf *m;
+	int last;
+
+	vq = rxq->vtnrx_vq;
+	last = 0;
+
+	while ((m = virtqueue_drain(vq, &last)) != NULL)
+		m_freem(m);
+
+	KASSERT(virtqueue_empty(vq),
+	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
+}
+
+static struct mbuf *
+vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf
**m_tailp)
+{
+	struct mbuf *m_head, *m_tail, *m;
+	int i, clsize;
+
+	clsize = sc->vtnet_rx_clsize;
+
+	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
+	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__,
nbufs));
+
+#ifdef NOTUSED
+	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	m_head = m_gethdr(M_NOWAIT, MT_DATA);
+	if (m_head == NULL)
+		goto fail;
+	MCLGET(m_head, M_NOWAIT);
+#endif
+
+	m_head->m_len = clsize;
+	m_tail = m_head;
+
+	/* Allocate the rest of the chain. */
+	for (i = 1; i < nbufs; i++) {
+#ifdef NOTUSED	
+		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+		m = m_gethdr(M_NOWAIT, MT_DATA);
+		if (m == NULL)
+			goto fail;
+		MCLGET(m_head, M_NOWAIT);
+#endif
+
+		m->m_len = clsize;
+		m_tail->m_next = m;
+		m_tail = m;
+	}
+
+	if (m_tailp != NULL)
+		*m_tailp = m_tail;
+
+	return (m_head);
+
+fail:
+	sc->vtnet_stats.mbuf_alloc_failed++;
+	m_freem(m_head);
+
+	return (NULL);
+}
+
+/*
+ * Slow path for when LRO without mergeable buffers is negotiated.
+ */
+static int
+vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
+    int len0)
+{
+	struct vtnet_softc *sc;
+	struct mbuf *m, *m_prev;
+	struct mbuf *m_new, *m_tail;
+	int len, clsize, nreplace, error;
+
+	sc = rxq->vtnrx_sc;
+	clsize = sc->vtnet_rx_clsize;
+
+	m_prev = NULL;
+	m_tail = NULL;
+	nreplace = 0;
+
+	m = m0;
+	len = len0;
+
+	/*
+	 * Since these mbuf chains are so large, we avoid allocating an
+	 * entire replacement chain if possible. When the received frame
+	 * did not consume the entire chain, the unused mbufs are moved
+	 * to the replacement chain.
+	 */
+	while (len > 0) {
+		/*
+		 * Something is seriously wrong if we received a frame
+		 * larger than the chain. Drop it.
+		 */
+		if (m == NULL) {
+			sc->vtnet_stats.rx_frame_too_large++;
+			return (EMSGSIZE);
+		}
+
+		/* We always allocate the same cluster size. */
+		KASSERT(m->m_len == clsize,
+		    ("%s: mbuf size %d is not the cluster size %d",
+		    __func__, m->m_len, clsize));
+
+		m->m_len = MIN(m->m_len, len);
+		len -= m->m_len;
+
+		m_prev = m;
+		m = m->m_next;
+		nreplace++;
+	}
+
+	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
+	    ("%s: too many replacement mbufs %d max %d", __func__,
nreplace,
+	    sc->vtnet_rx_nmbufs));
+
+	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
+	if (m_new == NULL) {
+		m_prev->m_len = clsize;
+		return (ENOBUFS);
+	}
+
+	/*
+	 * Move any unused mbufs from the received chain onto the end
+	 * of the new chain.
+	 */
+	if (m_prev->m_next != NULL) {
+		m_tail->m_next = m_prev->m_next;
+		m_prev->m_next = NULL;
+	}
+
+	error = vtnet_rxq_enqueue_buf(rxq, m_new);
+	if (error) {
+		/*
+		 * BAD! We could not enqueue the replacement mbuf chain.
We
+		 * must restore the m0 chain to the original state if it
was
+		 * modified so we can subsequently discard it.
+		 *
+		 * NOTE: The replacement is suppose to be an identical
copy
+		 * to the one just dequeued so this is an unexpected
error.
+		 */
+		sc->vtnet_stats.rx_enq_replacement_failed++;
+
+		if (m_tail->m_next != NULL) {
+			m_prev->m_next = m_tail->m_next;
+			m_tail->m_next = NULL;
+		}
+
+		m_prev->m_len = clsize;
+		m_freem(m_new);
+	}
+
+	return (error);
+}
+
+static int
+vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
+{
+	struct vtnet_softc *sc;
+	struct mbuf *m_new;
+	int error;
+
+	sc = rxq->vtnrx_sc;
+
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next ==
NULL,
+	    ("%s: chained mbuf without LRO_NOMRG", __func__));
+
+	if (m->m_next == NULL) {
+		/* Fast-path for the common case of just one mbuf. */
+		if (m->m_len < len)
+			return (EINVAL);
+
+		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
+		if (m_new == NULL)
+			return (ENOBUFS);
+
+		error = vtnet_rxq_enqueue_buf(rxq, m_new);
+		if (error) {
+			/*
+			 * The new mbuf is suppose to be an identical
+			 
+			 * copy of the one just dequeued so this is an
+			 * unexpected error.
+			 */
+			m_freem(m_new);
+			sc->vtnet_stats.rx_enq_replacement_failed++;
+		} else
+			m->m_len = len;
+	} else
+		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
+
+	return (error);
+}
+
+static int
+vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
+{
+#ifdef NOTUSED
+	struct sglist sg;
+	struct sglist_seg segs[VTNET_MAX_RX_SEGS];
+	struct vtnet_softc *sc;
+	struct vtnet_rx_header *rxhdr;
+	uint8_t *mdata;
+	int offset, error, nsegs;
+
+	sc = rxq->vtnrx_sc;
+	mdata = mtod(m, uint8_t *);
+
+	VTNET_RXQ_LOCK_ASSERT(rxq);
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next ==
NULL,
+	    ("%s: chained mbuf without LRO_NOMRG", __func__));
+	KASSERT(m->m_len == sc->vtnet_rx_clsize,
+	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
+	     sc->vtnet_rx_clsize));
+
+	sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
+	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+		MPASS(sc->vtnet_hdr_size == sizeof(struct
virtio_net_hdr));
+		rxhdr = (struct vtnet_rx_header *) mdata;
+		sglist_append(&sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
+		offset = sizeof(struct vtnet_rx_header);
+	} else
+		offset = 0;
+
+	sglist_append(&sg, mdata + offset, m->m_len - offset);
+	if (m->m_next != NULL) {
+		error = sglist_append_mbuf(&sg, m->m_next);
+		MPASS(error == 0);
+	}
+	
+	error = virtqueue_enqueue(rxq->vtnrx_vq, m, &sg, 0, sg.sg_nsegs);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	struct mbuf *m_rxhdr;
+	struct vtnet_softc *sc;
+	int error, nsegs;
+	
+	sc = rxq->vtnrx_sc;
+	nsegs = 0;
+	
+	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+		MGET(m_rxhdr, M_DONTWAIT, MT_DATA);
+		if( m_rxhdr == NULL ){
+			m_freem(m);
+			m_freem(m_rxhdr);
+			return ENOBUFS;
+		}
+		memcpy(m_rxhdr->m_data, mtod(m, uint8_t *),
sc->vtnet_hdr_size); 
+		m_adj(m, sc->vtnet_hdr_size);
+		m_rxhdr->m_len = sc->vtnet_hdr_size;
+		m_rxhdr->m_next = m;
+	}
+	else{
+		m_rxhdr = m;
+	}
+	
+	for( m=m_rxhdr ; m!=NULL ; m=m->m_next )
+		nsegs++;
+
+	error = rtems_virtqueue_enqueue(rxq->vtnrx_vq, m_rxhdr, 0, nsegs);
+#endif
+
+	return (error);
+}
+
+static int
+vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
+{
+	struct vtnet_softc *sc;
+	struct mbuf *m;
+	int error;
+
+	sc = rxq->vtnrx_sc;
+
+	m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
+	if (m == NULL)
+		return (ENOBUFS);
+
+	error = vtnet_rxq_enqueue_buf(rxq, m);
+	if (error)
+		m_freem(m);
+
+	return (error);
+}
+
+#ifdef NOTUSED
+/*
+ * Use the checksum offset in the VirtIO header to set the
+ * correct CSUM_* flags.
+ */
+static int
+vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
+    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
+{
+	struct vtnet_softc *sc;
+#if defined(INET) || defined(INET6)
+	int offset = hdr->csum_start + hdr->csum_offset;
+#endif
+
+	sc = rxq->vtnrx_sc;
+
+	/* Only do a basic sanity check on the offset. */
+	switch (eth_type) {
+#if defined(INET)
+	case ETHERTYPE_IP:
+		if (__predict_false(offset < ip_start + sizeof(struct
ip)))
+			return (1);
+		break;
+#endif
+#if defined(INET6)
+	case ETHERTYPE_IPV6:
+		if (__predict_false(offset < ip_start + sizeof(struct
ip6_hdr)))
+			return (1);
+		break;
+#endif
+	default:
+		sc->vtnet_stats.rx_csum_bad_ethtype++;
+		return (1);
+	}
+
+	/*
+	 * Use the offset to determine the appropriate CSUM_* flags. This
is
+	 * a bit dirty, but we can get by with it since the checksum
offsets
+	 * happen to be different. We assume the host host does not do
IPv4
+	 * header checksum offloading.
+	 */
+	switch (hdr->csum_offset) {
+	case offsetof(struct udphdr, uh_sum):
+	case offsetof(struct tcphdr, th_sum):
+		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
+		m->m_pkthdr.csum_data = 0xFFFF;
+		break;
+	case offsetof(struct sctphdr, checksum):
+		m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+		break;
+	default:
+		sc->vtnet_stats.rx_csum_bad_offset++;
+		return (1);
+	}
+
+	return (0);
+}
+
+static int
+vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
+    uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
+{
+	struct vtnet_softc *sc;
+	int offset, proto;
+
+	sc = rxq->vtnrx_sc;
+
+	switch (eth_type) {
+#if defined(INET)
+	case ETHERTYPE_IP: {
+		struct ip *ip;
+		if (__predict_false(m->m_len < ip_start + sizeof(struct
ip)))
+			return (1);
+		ip = (struct ip *)(m->m_data + ip_start);
+		proto = ip->ip_p;
+		offset = ip_start + (ip->ip_hl << 2);
+		break;
+	}
+#endif
+#if defined(INET6)
+	case ETHERTYPE_IPV6:
+		if (__predict_false(m->m_len < ip_start +
+		    sizeof(struct ip6_hdr)))
+			return (1);
+		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
+		if (__predict_false(offset < 0))
+			return (1);
+		break;
+#endif
+	default:
+		sc->vtnet_stats.rx_csum_bad_ethtype++;
+		return (1);
+	}
+
+	switch (proto) {
+	case IPPROTO_TCP:
+		if (__predict_false(m->m_len < offset + sizeof(struct
tcphdr)))
+			return (1);
+		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
+		m->m_pkthdr.csum_data = 0xFFFF;
+		break;
+	case IPPROTO_UDP:
+		if (__predict_false(m->m_len < offset + sizeof(struct
udphdr)))
+			return (1);
+		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
CSUM_PSEUDO_HDR;
+		m->m_pkthdr.csum_data = 0xFFFF;
+		break;
+	case IPPROTO_SCTP:
+		if (__predict_false(m->m_len < offset + sizeof(struct
sctphdr)))
+			return (1);
+		m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+		break;
+	default:
+		/*
+		 * For the remaining protocols, FreeBSD does not support
+		 * checksum offloading, so the checksum will be
recomputed.
+		 */
+#if 0
+		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
+		    "protocol eth_type=%#x proto=%d csum_start=%d "
+		    "csum_offset=%d\n", __func__, eth_type, proto,
+		    hdr->csum_start, hdr->csum_offset);
+#endif
+		break;
+	}
+
+	return (0);
+}
+
+/*
+ * Set the appropriate CSUM_* flags. Unfortunately, the information
+ * provided is not directly useful to us. The VirtIO header gives the
+ * offset of the checksum, which is all Linux needs, but this is not
+ * how FreeBSD does things. We are forced to peek inside the packet
+ * a bit.
+ *
+ * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
+ * could accept the offsets and let the stack figure it out.
+ */
+static int
+vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
+    struct virtio_net_hdr *hdr)
+{
+	struct ether_header *eh;
+	struct ether_vlan_header *evh;
+	uint16_t eth_type;
+	int offset, error;
+
+	eh = mtod(m, struct ether_header *);
+	eth_type = ntohs(eh->ether_type);
+	if (eth_type == ETHERTYPE_VLAN) {
+		/* BMV: We should handle nested VLAN tags too. */
+		evh = mtod(m, struct ether_vlan_header *);
+		eth_type = ntohs(evh->evl_proto);
+		offset = sizeof(struct ether_vlan_header);
+	} else
+		offset = sizeof(struct ether_header);
+
+	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset,
hdr);
+	else
+		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset,
hdr);
+
+	return (error);
+}
+#endif
+
+static void
+vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
+{
+	struct mbuf *m;
+
+	while (--nbufs > 0) {
+		m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
+		if (m == NULL)
+			break;
+		vtnet_rxq_discard_buf(rxq, m);
+	}
+}
+
+static void
+vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
+{
+	int error;
+
+	/*
+	 * Requeue the discarded mbuf. This should always be successful
+	 * since it was just dequeued.
+	 */
+	error = vtnet_rxq_enqueue_buf(rxq, m);
+	KASSERT(error == 0,
+	    ("%s: cannot requeue discarded mbuf %d", __func__, error));
+}
+
+static int
+vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int
nbufs)
+{
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+	struct virtqueue *vq;
+	struct mbuf *m, *m_tail;
+#ifdef NOTUSED
+	int len;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	uint32_t len;
+#endif
+
+	sc = rxq->vtnrx_sc;
+	vq = rxq->vtnrx_vq;
+	ifp = sc->vtnet_ifp;
+	m_tail = m_head;
+
+	while (--nbufs > 0) {
+		m = virtqueue_dequeue(vq, &len);
+		if (m == NULL) {
+			rxq->vtnrx_stats.vrxs_ierrors++;
+			goto fail;
+		}
+
+		if (vtnet_rxq_new_buf(rxq) != 0) {
+			rxq->vtnrx_stats.vrxs_iqdrops++;
+			vtnet_rxq_discard_buf(rxq, m);
+			if (nbufs > 1)
+				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
+			goto fail;
+		}
+
+		if (m->m_len < len)
+			len = m->m_len;
+
+		m->m_len = len;
+		m->m_flags &= ~M_PKTHDR;
+
+		m_head->m_pkthdr.len += len;
+		m_tail->m_next = m;
+		m_tail = m;
+	}
+
+	return (0);
+
+fail:
+	sc->vtnet_stats.rx_mergeable_failed++;
+	m_freem(m_head);
+
+	return (1);
+}
+
+static void
+vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
+    struct virtio_net_hdr *hdr)
+{
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+	struct ether_header *eh;
+
+	sc = rxq->vtnrx_sc;
+	ifp = sc->vtnet_ifp;
+
+#ifdef NOTUSED
+	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+		eh = mtod(m, struct ether_header *);
+		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+			vtnet_vlan_tag_remove(m);
+			/*
+			 * With the 802.1Q header removed, update the
+			 * checksum starting location accordingly.
+			 */
+			if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+				hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
+		}
+	}
+
+	m->m_pkthdr.flowid = rxq->vtnrx_id;
+	m->m_flags |= M_FLOWID;
+
+	/*
+	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
+	 * distinction that Linux does. Need to reevaluate if performing
+	 * offloading for the NEEDS_CSUM case is really appropriate.
+	 */
+	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
+	    VIRTIO_NET_HDR_F_DATA_VALID)) {
+		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
+			rxq->vtnrx_stats.vrxs_csum++;
+		else
+			rxq->vtnrx_stats.vrxs_csum_failed++;
+	}
+#endif
+
+	rxq->vtnrx_stats.vrxs_ipackets++;
+	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
+
+#ifdef NOTUSED
+	VTNET_RXQ_UNLOCK(rxq);
+	(*ifp->if_input)(ifp, m);
+	VTNET_RXQ_LOCK(rxq);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	eh = mtod(m, struct ether_header *);
+    m_adj( m, sizeof( struct ether_header ) );
+	ether_input(ifp, eh, m);
+#endif
+}
+
+static int
+vtnet_rxq_eof(struct vtnet_rxq *rxq)
+{
+	struct virtio_net_hdr lhdr, *hdr;
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+	struct virtqueue *vq;
+	struct mbuf *m;
+	struct virtio_net_hdr_mrg_rxbuf *mhdr;
+#ifdef NOTUSED
+	int len, deq, nbufs, adjsz, count;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	uint32_t len;
+	int deq, nbufs, adjsz, count;
+#endif
+
+	sc = rxq->vtnrx_sc;
+	vq = rxq->vtnrx_vq;
+	ifp = sc->vtnet_ifp;
+	hdr = &lhdr;
+	deq = 0;
+	count = sc->vtnet_rx_process_limit;
+
+	VTNET_RXQ_LOCK_ASSERT(rxq);
+
+	while (count-- > 0) {
+		m = virtqueue_dequeue(vq, &len);
+		if (m == NULL)
+			break;
+		deq++;
+		
+		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
+			rxq->vtnrx_stats.vrxs_ierrors++;
+			vtnet_rxq_discard_buf(rxq, m);
+			continue;
+		}
+		
+		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
+			nbufs = 1;
+			adjsz = sizeof(struct vtnet_rx_header);
+			/*
+			 * Account for our pad inserted between the header
+			 * and the actual start of the frame.
+			 */
+			len += VTNET_RX_HEADER_PAD;
+		} else {
+			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
+			nbufs = mhdr->num_buffers;
+			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+		}
+
+		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
+			rxq->vtnrx_stats.vrxs_iqdrops++;
+			vtnet_rxq_discard_buf(rxq, m);
+			if (nbufs > 1)
+				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
+			continue;
+		}
+
+		m->m_pkthdr.len = len;
+		m->m_pkthdr.rcvif = ifp;
+#ifdef NOTUSED
+		m->m_pkthdr.csum_flags = 0;
+#endif
+
+		if (nbufs > 1) {
+			/* Dequeue the rest of chain. */
+			if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
+				continue;
+		}
+
+		/*
+		 * Save copy of header before we strip it. For both
mergeable
+		 * and non-mergeable, the header is at the beginning of
the
+		 * mbuf data. We no longer need num_buffers, so always use
a
+		 * regular header.
+		 *
+		 * BMV: Is this memcpy() expensive? We know the mbuf data
is
+		 * still valid even after the m_adj().
+		 */
+		memcpy(hdr, mtod(m, void *), sizeof(struct
virtio_net_hdr));
+		m_adj(m, adjsz);
+
+		vtnet_rxq_input(rxq, m, hdr);
+
+		/* Must recheck after dropping the Rx lock. */
+		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+			break;
+	}
+
+	if (deq > 0)
+		virtqueue_notify(vq);
+
+	return (count > 0 ? 0 : EAGAIN);
+}
+
+static void
+vtnet_rx_vq_intr(void *xrxq)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_rxq *rxq;
+	struct ifnet *ifp;
+	int tries, more;
+
+	rxq = xrxq;
+	sc = rxq->vtnrx_sc;
+	ifp = sc->vtnet_ifp;
+	tries = 0;
+	
+	if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
+		/*
+		 * Ignore this interrupt. Either this is a spurious
interrupt
+		 * or multiqueue without per-VQ MSIX so every queue needs
to
+		 * be polled (a brain dead configuration we could try
harder
+		 * to avoid).
+		 */
+		vtnet_rxq_disable_intr(rxq);
+		return;
+	}
+
+again:
+	VTNET_RXQ_LOCK(rxq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		VTNET_RXQ_UNLOCK(rxq);
+		return;
+	}
+
+	more = vtnet_rxq_eof(rxq);
+	if (more || vtnet_rxq_enable_intr(rxq) != 0) {
+		if (!more)
+			vtnet_rxq_disable_intr(rxq);
+		/*
+		 * This is an occasional condition or race (when !more),
+		 * so retry a few times before scheduling the taskqueue.
+		 */
+		rxq->vtnrx_stats.vrxs_rescheduled++;
+		VTNET_RXQ_UNLOCK(rxq);
+		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
+			goto again;
+#ifdef NOTUSED
+		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
+#endif
+	} else
+		VTNET_RXQ_UNLOCK(rxq);
+}
+
+#ifdef NOTUSED
+static void
+vtnet_rxq_tq_intr(void *xrxq, int pending)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_rxq *rxq;
+	struct ifnet *ifp;
+	int more;
+
+	rxq = xrxq;
+	sc = rxq->vtnrx_sc;
+	ifp = sc->vtnet_ifp;
+
+	VTNET_RXQ_LOCK(rxq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		VTNET_RXQ_UNLOCK(rxq);
+		return;
+	}
+
+	more = vtnet_rxq_eof(rxq);
+	if (more || vtnet_rxq_enable_intr(rxq) != 0) {
+		if (!more)
+			vtnet_rxq_disable_intr(rxq);
+		rxq->vtnrx_stats.vrxs_rescheduled++;
+		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
+	}
+
+	VTNET_RXQ_UNLOCK(rxq);
+}
+#endif
+
+static void
+vtnet_txq_free_mbufs(struct vtnet_txq *txq)
+{
+	struct virtqueue *vq;
+	struct mbuf *m_txhdr;
+	int last;
+
+	vq = txq->vtntx_vq;
+	last = 0;
+
+#ifdef NOTUSED
+	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
+		m_freem(txhdr->vth_mbuf);
+		uma_zfree(vtnet_tx_header_zone, txhdr);
+	}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	while ((m_txhdr = virtqueue_drain(vq, &last)) != NULL) {
+		m_freem(m_txhdr);
+	}
+#endif
+
+	KASSERT(virtqueue_empty(vq),
+	    ("%s: mbufs remaining in tx queue %p", __func__, txq));
+}
+
+#ifdef NOTUSED
+/*
+ * BMV: Much of this can go away once we finally have offsets in
+ * the mbuf packet header. Bug andre at .
+ */
+static int
+vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
+    int *etype, int *proto, int *start)
+{
+	struct vtnet_softc *sc;
+	struct ether_vlan_header *evh;
+	int offset;
+
+	sc = txq->vtntx_sc;
+
+	evh = mtod(m, struct ether_vlan_header *);
+	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+		/* BMV: We should handle nested VLAN tags too. */
+		*etype = ntohs(evh->evl_proto);
+		offset = sizeof(struct ether_vlan_header);
+	} else {
+		*etype = ntohs(evh->evl_encap_proto);
+		offset = sizeof(struct ether_header);
+	}
+
+	switch (*etype) {
+#if defined(INET)
+	case ETHERTYPE_IP: {
+		struct ip *ip, iphdr;
+		if (__predict_false(m->m_len < offset + sizeof(struct
ip))) {
+			m_copydata(m, offset, sizeof(struct ip),
+			    (caddr_t) &iphdr);
+			ip = &iphdr;
+		} else
+			ip = (struct ip *)(m->m_data + offset);
+		*proto = ip->ip_p;
+		*start = offset + (ip->ip_hl << 2);
+		break;
+	}
+#endif
+#if defined(INET6)
+	case ETHERTYPE_IPV6:
+		*proto = -1;
+		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
+		/* Assert the network stack sent us a valid packet. */
+		KASSERT(*start > offset,
+		    ("%s: mbuf %p start %d offset %d proto %d", __func__,
m,
+		    *start, offset, *proto));
+		break;
+#endif
+	default:
+		sc->vtnet_stats.tx_csum_bad_ethtype++;
+		return (EINVAL);
+	}
+
+	return (0);
+}
+
+static int
+vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int
eth_type,
+    int offset, struct virtio_net_hdr *hdr)
+{
+	static struct timeval lastecn;
+	static int curecn;
+	struct vtnet_softc *sc;
+	struct tcphdr *tcp, tcphdr;
+
+	sc = txq->vtntx_sc;
+
+	if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
+		m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t)
&tcphdr);
+		tcp = &tcphdr;
+	} else
+		tcp = (struct tcphdr *)(m->m_data + offset);
+
+	hdr->hdr_len = offset + (tcp->th_off << 2);
+	hdr->gso_size = m->m_pkthdr.tso_segsz;
+	hdr->gso_type = eth_type == ETHERTYPE_IP ?
VIRTIO_NET_HDR_GSO_TCPV4 :
+	    VIRTIO_NET_HDR_GSO_TCPV6;
+
+	if (tcp->th_flags & TH_CWR) {
+		/*
+		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
FreeBSD,
+		 * ECN support is not on a per-interface basis, but
globally via
+		 * the net.inet.tcp.ecn.enable sysctl knob. The default is
off.
+		 */
+		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
+			if (ppsratecheck(&lastecn, &curecn, 1))
+				if_printf(sc->vtnet_ifp,
+				    "TSO with ECN not negotiated with
host\n");
+			return (ENOTSUP);
+		}
+		hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+	}
+
+	txq->vtntx_stats.vtxs_tso++;
+
+	return (0);
+}
+
+static struct mbuf *
+vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
+    struct virtio_net_hdr *hdr)
+{
+	struct vtnet_softc *sc;
+	int flags, etype, csum_start, proto, error;
+
+	sc = txq->vtntx_sc;
+	flags = m->m_pkthdr.csum_flags;
+
+	error = vtnet_txq_offload_ctx(txq, m, &etype, &proto,
&csum_start);
+	if (error)
+		goto drop;
+
+	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
+	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6))
{
+		/*
+		 * We could compare the IP protocol vs the CSUM_ flag too,
+		 * but that really should not be necessary.
+		 */
+		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
+		hdr->csum_start = csum_start;
+		hdr->csum_offset = m->m_pkthdr.csum_data;
+		txq->vtntx_stats.vtxs_csum++;
+	}
+
+	if (flags & CSUM_TSO) {
+		if (__predict_false(proto != IPPROTO_TCP)) {
+			/* Likely failed to correctly parse the mbuf. */
+			sc->vtnet_stats.tx_tso_not_tcp++;
+			goto drop;
+		}
+
+		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
+		    ("%s: mbuf %p TSO without checksum offload", __func__,
m));
+
+		error = vtnet_txq_offload_tso(txq, m, etype, csum_start,
hdr);
+		if (error)
+			goto drop;
+	}
+
+	return (m);
+
+drop:
+	m_freem(m);
+	return (NULL);
+}
+#endif
+
+static int
+vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
+    struct vtnet_tx_header *txhdr)
+{
+#ifdef NOTUSED
+	struct sglist sg;
+	struct sglist_seg segs[VTNET_MAX_TX_SEGS];
+	struct vtnet_softc *sc;
+	struct virtqueue *vq;
+	struct mbuf *m;
+	int collapsed, error;
+
+	vq = txq->vtntx_vq;
+	sc = txq->vtntx_sc;
+	m = *m_head;
+	collapsed = 0;
+
+	sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
+	error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
+	KASSERT(error == 0 && sg.sg_nseg == 1,
+	    ("%s: error %d adding header to sglist", __func__, error));
+
+again:
+	error = sglist_append_mbuf(&sg, m);
+	if (error) {
+		if (collapsed)
+			goto fail;
+
+		m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1);
+		if (m == NULL)
+			goto fail;
+
+		*m_head = m;
+		collapsed = 1;
+		txq->vtntx_stats.vtxs_collapsed++;
+		goto again;
+	}
+
+	txhdr->vth_mbuf = m;
+	error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	struct mbuf *m;
+	struct virtqueue *vq;
+	int nsegs, error;
+	
+	vq = txq->vtntx_vq;
+	nsegs = 0;
+	
+	txhdr->vth_mbuf = (*m_head)->m_next;
+	for( m=*m_head ; m!=NULL ; m=m->m_next )
+		nsegs++;
+
+	error = rtems_virtqueue_enqueue(vq, *m_head, nsegs, 0);
+#endif
+
+	return (error);
+#ifdef NOTUSED
+fail:
+	m_freem(*m_head);
+	*m_head = NULL;
+
+	return (ENOBUFS);
+#endif
+}
+
+static int
+vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_tx_header *txhdr;
+	struct virtio_net_hdr *hdr;
+	struct mbuf *m;
+	int error;
+
+	sc = txq->vtntx_sc;
+	m = *m_head;
+#ifdef NOTUSED
+	M_ASSERTPKTHDR(m);
+
+	txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
+	if( txhdr == NULL ){
+		m_freem(m);
+		*m_head = NULL;
+		return (ENOMEM);
+	}
+	
+	/*
+	 * Always use the non-mergeable header, regardless if the feature
+	 * was negotiated. For transmit, num_buffers is always zero. The
+	 * vtnet_hdr_size is used to enqueue the correct header size.
+	 */
+	hdr = &txhdr->vth_uhdr.hdr;
+
+	if (m->m_flags & M_VLANTAG) {
+		m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
+		if ((*m_head = m) == NULL) {
+			error = ENOBUFS;
+			goto fail;
+		}
+		m->m_flags &= ~M_VLANTAG;
+	}
+
+	if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
+		m = vtnet_txq_offload(txq, m, hdr);
+		if ((*m_head = m) == NULL) {
+			error = ENOBUFS;
+			goto fail;
+		}
+	}
+
+	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	struct mbuf *m_txhdr;
+	
+	MGET(m_txhdr, M_DONTWAIT, MT_DATA);
+	if( m_txhdr == NULL ){
+		m_freem(m);
+		*m_head = NULL;
+		return (ENOMEM);
+	}
+	memset(m_txhdr->m_data, 0, sc->vtnet_hdr_size);
+	m_txhdr->m_len = sc->vtnet_hdr_size;
+	m_txhdr->m_next = *m_head;
+	txhdr = mtod(m_txhdr, struct vtnet_tx_header *);
+	
+	error = vtnet_txq_enqueue_buf(txq, &m_txhdr, txhdr);
+#endif
+	if (error == 0)
+		return (0);
+
+#ifdef NOTUSED
+fail:
+	uma_zfree(vtnet_tx_header_zone, txhdr);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	m_free(m_txhdr);
+#endif
+
+	return (error);
+}
+
+#ifdef VTNET_LEGACY_TX
+static void
+vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
+{
+	struct vtnet_softc *sc;
+	struct virtqueue *vq;
+	struct mbuf *m0;
+	int enq;
+
+	sc = txq->vtntx_sc;
+	vq = txq->vtntx_vq;
+	enq = 0;
+
+	VTNET_TXQ_LOCK_ASSERT(txq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+	    sc->vtnet_link_active == 0)
+		return;
+
+	vtnet_txq_eof(txq);
+
+	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+		if (virtqueue_full(vq))
+			break;
+
+		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
+		if (m0 == NULL)
+			break;
+
+		if (vtnet_txq_encap(txq, &m0) != 0) {
+			if (m0 != NULL)
+				IFQ_DRV_PREPEND(&ifp->if_snd, m0);
+			break;
+		}
+
+		enq++;
+#ifdef NOTUSED
+		ETHER_BPF_MTAP(ifp, m0);
+#endif
+	}
+
+	if (enq > 0) {
+		virtqueue_notify(vq);
+		txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
+	}
+}
+
+static void
+vtnet_start(struct ifnet *ifp)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+
+	sc = ifp->if_softc;
+	txq = &sc->vtnet_txqs[0];
+
+	VTNET_TXQ_LOCK(txq);
+	vtnet_start_locked(txq, ifp);
+	VTNET_TXQ_UNLOCK(txq);
+}
+
+#else /* !VTNET_LEGACY_TX */
+
+static int
+vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
+{
+	struct vtnet_softc *sc;
+	struct virtqueue *vq;
+	struct buf_ring *br;
+	struct ifnet *ifp;
+	int enq, error;
+
+	sc = txq->vtntx_sc;
+	vq = txq->vtntx_vq;
+	br = txq->vtntx_br;
+	ifp = sc->vtnet_ifp;
+	enq = 0;
+	error = 0;
+
+	VTNET_TXQ_LOCK_ASSERT(txq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+	    sc->vtnet_link_active == 0) {
+		if (m != NULL)
+			error = drbr_enqueue(ifp, br, m);
+		return (error);
+	}
+
+	if (m != NULL) {
+		error = drbr_enqueue(ifp, br, m);
+		if (error)
+			return (error);
+	}
+
+	vtnet_txq_eof(txq);
+
+	while ((m = drbr_peek(ifp, br)) != NULL) {
+		error = vtnet_txq_encap(txq, &m);
+		if (error) {
+			if (m != NULL)
+				drbr_putback(ifp, br, m);
+			else
+				drbr_advance(ifp, br);
+			break;
+		}
+		drbr_advance(ifp, br);
+
+		enq++;
+		ETHER_BPF_MTAP(ifp, m);
+	}
+
+	if (enq > 0) {
+		virtqueue_notify(vq);
+		txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
+	}
+
+	return (error);
+}
+
+static int
+vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+	int i, npairs, error;
+
+	sc = ifp->if_softc;
+	npairs = sc->vtnet_act_vq_pairs;
+
+	if (m->m_flags & M_FLOWID)
+		i = m->m_pkthdr.flowid % npairs;
+	else
+		i = curcpu % npairs;
+
+	txq = &sc->vtnet_txqs[i];
+
+	if (VTNET_TXQ_TRYLOCK(txq) != 0) {
+		error = vtnet_txq_mq_start_locked(txq, m);
+		VTNET_TXQ_UNLOCK(txq);
+	} else {
+		error = drbr_enqueue(ifp, txq->vtntx_br, m);
+		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
+	}
+
+	return (error);
+}
+
+static void
+vtnet_txq_tq_deferred(void *xtxq, int pending)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+
+	txq = xtxq;
+	sc = txq->vtntx_sc;
+
+	VTNET_TXQ_LOCK(txq);
+	if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
+		vtnet_txq_mq_start_locked(txq, NULL);
+	VTNET_TXQ_UNLOCK(txq);
+}
+
+#endif /* VTNET_LEGACY_TX */
+
+#ifdef NOTUSED
+static void
+vtnet_txq_tq_intr(void *xtxq, int pending)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+	struct ifnet *ifp;
+
+	txq = xtxq;
+	sc = txq->vtntx_sc;
+	ifp = sc->vtnet_ifp;
+
+	VTNET_TXQ_LOCK(txq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		VTNET_TXQ_UNLOCK(txq);
+		return;
+	}
+
+	vtnet_txq_eof(txq);
+
+#ifdef VTNET_LEGACY_TX
+	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+		vtnet_start_locked(txq, ifp);
+#else
+	if (!drbr_empty(ifp, txq->vtntx_br))
+		vtnet_txq_mq_start_locked(txq, NULL);
+#endif
+
+	if (vtnet_txq_enable_intr(txq) != 0) {
+		vtnet_txq_disable_intr(txq);
+		txq->vtntx_stats.vtxs_rescheduled++;
+		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
+	}
+
+	VTNET_TXQ_UNLOCK(txq);
+}al
+#endif
+
+static void
+vtnet_txq_eof(struct vtnet_txq *txq)
+{
+	struct virtqueue *vq;
+	struct vtnet_tx_header *txhdr;
+	struct mbuf *m;
+
+	vq = txq->vtntx_vq;
+	VTNET_TXQ_LOCK_ASSERT(txq);
+
+#ifdef NOTUSED
+	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
+		m = txhdr->vth_mbuf;
+
+		txq->vtntx_stats.vtxs_opackets++;
+		txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
+		if (m->m_flags & M_MCAST)
+			txq->vtntx_stats.vtxs_omcasts++;
+
+		m_freem(m);
+		uma_zfree(vtnet_tx_header_zone, txhdr);
+	}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	struct mbuf *m_txhdr;
+	
+	while((m_txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
+		txq->vtntx_stats.vtxs_opackets++;
+		txhdr = mtod(m_txhdr, struct vtnet_tx_header *);
+		txq->vtntx_stats.vtxs_obytes +=
txhdr->vth_mbuf->m_pkthdr.len;
+		if (m->m_flags & M_MCAST)
+			txq->vtntx_stats.vtxs_omcasts++;
+			
+		m_freem(m_txhdr);
+	}
+#endif
+	
+	if (virtqueue_empty(vq))
+		txq->vtntx_watchdog = 0;
+}
+
+static void
+vtnet_tx_vq_intr(void *xtxq)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+	struct ifnet *ifp;
+	int tries;
+
+	txq = xtxq;
+	sc = txq->vtntx_sc;
+	ifp = sc->vtnet_ifp;
+	tries = 0;
+	
+	if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
+		/*
+		 * Ignore this interrupt. Either this is a spurious
interrupt
+		 * or multiqueue without per-VQ MSIX so every queue needs
to
+		 * be polled (a brain dead configuration we could try
harder
+		 * to avoid).
+		 */
+		vtnet_txq_disable_intr(txq);
+		return;
+	}
+
+again:
+	VTNET_TXQ_LOCK(txq);
+
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+		VTNET_TXQ_UNLOCK(txq);
+		return;
+	}
+	
+	vtnet_txq_eof(txq);
+
+#ifdef VTNET_LEGACY_TX
+	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+		vtnet_start_locked(txq, ifp);
+#else
+	if (!drbr_empty(ifp, txq->vtntx_br))
+		vtnet_txq_mq_start_locked(txq, NULL);
+#endif
+
+	if (vtnet_txq_enable_intr(txq) != 0) {
+		vtnet_txq_disable_intr(txq);
+		/*
+		 * This is an occasional race, so retry a few times
+		 * before scheduling the taskqueue.
+		 */
+		VTNET_TXQ_UNLOCK(txq);
+		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
+			goto again;
+		txq->vtntx_stats.vtxs_rescheduled++;
+#ifdef NOTUSED
+		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
+#endif
+	} else
+		VTNET_TXQ_UNLOCK(txq);
+}
+
+static void
+vtnet_tx_start_all(struct vtnet_softc *sc)
+{
+	struct ifnet *ifp;
+	struct vtnet_txq *txq;
+	int i;
+
+	ifp = sc->vtnet_ifp;
+	VTNET_CORE_LOCK_ASSERT(sc);
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
+		txq = &sc->vtnet_txqs[i];
+
+		VTNET_TXQ_LOCK(txq);
+#ifdef VTNET_LEGACY_TX
+		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+			vtnet_start_locked(txq, ifp);
+#else
+		if (!drbr_empty(ifp, txq->vtntx_br))
+			vtnet_txq_mq_start_locked(txq, NULL);
+#endif
+		VTNET_TXQ_UNLOCK(txq);
+	}
+}
+
+#ifndef VTNET_LEGACY_TX
+static void
+vtnet_qflush(struct ifnet *ifp)
+{
+	struct vtnet_softc *sc;
+	struct vtnet_txq *txq;
+	struct mbuf *m;
+	int i;
+
+	sc = ifp->if_softc;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
+		txq = &sc->vtnet_txqs[i];
+
+		VTNET_TXQ_LOCK(txq);
+		while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
+			m_freem(m);
+		VTNET_TXQ_UNLOCK(txq);
+	}
+
+	if_qflush(ifp);
+}
+#endif
+
+static int
+vtnet_watchdog(struct vtnet_txq *txq)
+{
+	struct vtnet_softc *sc;
+
+	sc = txq->vtntx_sc;
+
+	VTNET_TXQ_LOCK(txq);
+	if (sc->vtnet_flags & VTNET_FLAG_EVENT_IDX)
+		vtnet_txq_eof(txq);
+	if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
+		VTNET_TXQ_UNLOCK(txq);
+		return (0);
+	}
+	VTNET_TXQ_UNLOCK(txq);
+
+#ifdef NOTUSED
+	if_printf(sc->vtnet_ifp, "watchdog timeout on queue %d\n",
+	    txq->vtntx_id);
+#endif
+
+	return (1);
+}
+
+#ifdef NOTUSED
+static void
+vtnet_rxq_accum_stats(struct vtnet_rxq *rxq, struct vtnet_rxq_stats
*accum)
+{
+	struct vtnet_rxq_stats *st;
+
+	st = &rxq->vtnrx_stats;
+
+	accum->vrxs_ipackets += st->vrxs_ipackets;
+	accum->vrxs_ibytes += st->vrxs_ibytes;
+	accum->vrxs_iqdrops += st->vrxs_iqdrops;
+	accum->vrxs_csum += st->vrxs_csum;
+	accum->vrxs_csum_failed += st->vrxs_csum_failed;
+	accum->vrxs_rescheduled += st->vrxs_rescheduled;
+}
+
+static void
+vtnet_txq_accum_stats(struct vtnet_txq *txq, struct vtnet_txq_stats
*accum)
+{
+	struct vtnet_txq_stats *st;
+
+	st = &txq->vtntx_stats;
+
+	accum->vtxs_opackets += st->vtxs_opackets;
+	accum->vtxs_obytes += st->vtxs_obytes;
+	accum->vtxs_csum += st->vtxs_csum;
+	accum->vtxs_tso += st->vtxs_tso;
+	accum->vtxs_collapsed += st->vtxs_collapsed;
+	accum->vtxs_rescheduled += st->vtxs_rescheduled;
+}
+
+static void
+vtnet_accumulate_stats(struct vtnet_softc *sc)
+{
+	struct ifnet *ifp;
+	struct vtnet_statistics *st;
+	struct vtnet_rxq_stats rxaccum;
+	struct vtnet_txq_stats txaccum;
+	int i;
+
+	ifp = sc->vtnet_ifp;
+	st = &sc->vtnet_stats;
+	bzero(&rxaccum, sizeof(struct vtnet_rxq_stats));
+	bzero(&txaccum, sizeof(struct vtnet_txq_stats));
+
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		vtnet_rxq_accum_stats(&sc->vtnet_rxqs[i], &rxaccum);
+		vtnet_txq_accum_stats(&sc->vtnet_txqs[i], &txaccum);
+	}
+
+	st->rx_csum_offloaded = rxaccum.vrxs_csum;
+	st->rx_csum_failed = rxaccum.vrxs_csum_failed;
+	st->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
+	st->tx_csum_offloaded = txaccum.vtxs_csum;
+	st->tx_tso_offloaded = txaccum.vtxs_tso;
+	st->tx_task_rescheduled = txaccum.vtxs_rescheduled;
+
+	/*
+	 * With the exception of if_ierrors, these ifnet statistics are
+	 * only updated in the driver, so just set them to our accumulated
+	 * values. if_ierrors is updated in ether_input() for malformed
+	 * frames that we should have already discarded.
+	 */
+	ifp->if_ipackets = rxaccum.vrxs_ipackets;
+	ifp->if_iqdrops = rxaccum.vrxs_iqdrops;
+	ifp->if_ierrors = rxaccum.vrxs_ierrors;
+	ifp->if_opackets = txaccum.vtxs_opackets;
+#ifndef VTNET_LEGACY_TX
+	ifp->if_obytes = txaccum.vtxs_obytes;
+	ifp->if_omcasts = txaccum.vtxs_omcasts;
+#endif
+}
+#endif
+
+static void
+vtnet_tick(void *xsc)
+{
+	struct vtnet_softc *sc;
+	struct ifnet *ifp;
+	int i, timedout;
+
+	sc = xsc;
+	ifp = sc->vtnet_ifp;
+	timedout = 0;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+#ifdef NOTUSED	
+	vtnet_accumulate_stats(sc);
+#endif
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
+		timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
+
+	if (timedout != 0) {
+		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+		vtnet_init_locked(sc);
+	} else
+#ifdef NOTUSED
+		callout_schedule(&sc->vtnet_tick_ch, hz);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	{
+		if( sc->stat_ch == vtnet_timeout_running ) {
+			timeout(vtnet_tick, sc, hz);
+		} else if ( sc->stat_ch == vtnet_timeout_stop_rq ) {
+			sc->stat_ch = vtnet_timeout_stopped;
+		}
+	}
+#endif
+}
+
+#ifdef NOTUSED
+static void
+vtnet_start_taskqueues(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i, error;
+
+	dev = sc->vtnet_dev;
+
+	/*
+	 * Errors here are very difficult to recover from - we cannot
+	 * easily fail because, if this is during boot, we will hang
+	 * when freeing any successfully started taskqueues because
+	 * the scheduler isn't up yet.
+	 *
+	 * Most drivers just ignore the return value - it only 
+	 * with ENOMEM so an error is not likely.
+	 */
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
+		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
+		if (error) {
+			device_printf(dev, "failed to start rx taskq
%d\n",
+			    rxq->vtnrx_id);
+		}
+
+		txq = &sc->vtnet_txqs[i];
+		error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
+		    "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
+		if (error) {
+			device_printf(dev, "failed to start tx taskq
%d\n",
+			    txq->vtntx_id);
+		}
+	}
+}
+
+static void
+vtnet_free_taskqueues(struct vtnet_softc *sc)
+{
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i;
+
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+		if (rxq->vtnrx_tq != NULL) {
+			taskqueue_free(rxq->vtnrx_tq);
+			rxq->vtnrx_vq = NULL;
+		}
+
+		txq = &sc->vtnet_txqs[i];
+		if (txq->vtntx_tq != NULL) {
+			taskqueue_free(txq->vtntx_tq);
+			txq->vtntx_tq = NULL;
+		}
+	}
+}
+
+static void
+vtnet_drain_taskqueues(struct vtnet_softc *sc)
+{
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i;
+
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+		if (rxq->vtnrx_tq != NULL)
+			taskqueue_drain(rxq->vtnrx_tq,
&rxq->vtnrx_intrtask);
+
+		txq = &sc->vtnet_txqs[i];
+		if (txq->vtntx_tq != NULL) {
+			taskqueue_drain(txq->vtntx_tq,
&txq->vtntx_intrtask);
+#ifndef VTNET_LEGACY_TX
+			taskqueue_drain(txq->vtntx_tq,
&txq->vtntx_defrtask);
+#endif
+		}
+	}
+}
+#endif
+
+static void
+vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
+{
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+		vtnet_rxq_free_mbufs(rxq);
+
+		txq = &sc->vtnet_txqs[i];
+		vtnet_txq_free_mbufs(txq);
+	}
+}
+
+static void
+vtnet_stop_rendezvous(struct vtnet_softc *sc)
+{
+	struct vtnet_rxq *rxq;
+	struct vtnet_txq *txq;
+	int i;
+
+	/*
+	 * Lock and unlock the per-queue mutex so we known the stop
+	 * state is visible. Doing only the active queues should be
+	 * sufficient, but it does not cost much extra to do all the
+	 * queues. Note we hold the core mutex here too.
+	 */
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+		VTNET_RXQ_LOCK(rxq);
+		VTNET_RXQ_UNLOCK(rxq);
+
+		txq = &sc->vtnet_txqs[i];
+		VTNET_TXQ_LOCK(txq);
+		VTNET_TXQ_UNLOCK(txq);
+	}
+}
+
+static void
+vtnet_stop(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+
+	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+	sc->vtnet_link_active = 0;
+#ifdef NOTUSED
+	callout_stop(&sc->vtnet_tick_ch);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	if( sc->stat_ch == vtnet_timeout_running ){
+		sc->stat_ch = vtnet_timeout_stop_rq;
+		while( sc->stat_ch != vtnet_timeout_stopped ){
+			rtems_bsdnet_semaphore_release();
+			rtems_task_wake_after( vtnet_ticksPerSecond );
+			rtems_bsdnet_semaphore_obtain();
+		}
+	}
+#endif
+
+	/* Only advisory. */
+	vtnet_disable_interrupts(sc);
+
+	/*
+	 * Stop the host adapter. This resets it to the pre-initialized
+	 * state. It will not generate any interrupts until after it is
+	 * reinitialized.
+	 */
+	virtio_stop(dev);
+	vtnet_stop_rendezvous(sc);
+
+	/* Free any mbufs left in the virtqueues. */
+	vtnet_drain_rxtx_queues(sc);
+}
+
+static int
+vtnet_virtio_reinit(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+	uint64_t features;
+	int mask, error;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+	features = sc->vtnet_features;
+	
+	mask = 0;
+#ifdef NOTUSED
+#if defined(INET)
+	mask |= IFCAP_RXCSUM;
+#endif
+#if defined (INET6)
+	mask |= IFCAP_RXCSUM_IPV6;
+#endif
+
+	/*
+	 * Re-negotiate with the host, removing any disabled receive
+	 * features. Transmit features are disabled only on our side
+	 * via if_capenable and if_hwassist.
+	 */
+
+	if (ifp->if_capabilities & mask) {
+		/*
+		 * We require both IPv4 and IPv6 offloading to be enabled
+		 * in order to negotiated it: VirtIO does not distinguish
+		 * between the two.
+		 */
+		if ((ifp->if_capenable & mask) != mask)
+			features &= ~VIRTIO_NET_F_GUEST_CSUM;
+	}
+
+	if (ifp->if_capabilities & IFCAP_LRO) {
+		if ((ifp->if_capenable & IFCAP_LRO) == 0)
+			features &= ~VTNET_LRO_FEATURES;
+	}
+
+	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
+		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
+			features &= ~VIRTIO_NET_F_CTRL_VLAN;
+	}
+#endif
+
+	error = virtio_reinit(dev, features);
+	if (error)
+		device_printf(dev, "virtio reinit error %d\n", error);
+
+	return (error);
+}
+
+#ifdef NOTUSED
+static void
+vtnet_init_rx_filters(struct vtnet_softc *sc)
+{
+	struct ifnet *ifp;
+
+	ifp = sc->vtnet_ifp;
+
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
+		/* Restore promiscuous and all-multicast modes. */
+		vtnet_rx_filter(sc);
+		/* Restore filtered MAC addresses. */
+		vtnet_rx_filter_mac(sc);
+	}
+
+	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+		vtnet_rx_filter_vlan(sc);
+}
+#endif
+
+static int
+vtnet_init_rx_queues(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct vtnet_rxq *rxq;
+	int i, clsize, error;
+
+	dev = sc->vtnet_dev;
+
+	/*
+	 * Use the new cluster size if one has been set (via a MTU
+	 * change). Otherwise, use the standard 2K clusters.
+	 *
+	 * BMV: It might make sense to use page sized clusters as
+	 * the default (depending on the features negotiated).
+	 */
+	if (sc->vtnet_rx_new_clsize != 0) {
+		clsize = sc->vtnet_rx_new_clsize;
+		sc->vtnet_rx_new_clsize = 0;
+	} else
+		clsize = MCLBYTES;
+
+	sc->vtnet_rx_clsize = clsize;
+	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
+
+	/* The first segment is reserved for the header. */
+	KASSERT(sc->vtnet_rx_nmbufs < VTNET_MAX_RX_SEGS,
+	    ("%s: too many rx mbufs %d", __func__, sc->vtnet_rx_nmbufs));
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
+		rxq = &sc->vtnet_rxqs[i];
+
+		/* Hold the lock to satisfy asserts. */
+		VTNET_RXQ_LOCK(rxq);
+		error = vtnet_rxq_populate(rxq);
+		VTNET_RXQ_UNLOCK(rxq);
+
+		if (error) {
+			device_printf(dev,
+			    "cannot allocate mbufs for Rx queue %d\n", i);
+			return (error);
+		}
+	}
+
+	return (0);
+}
+
+static int
+vtnet_init_tx_queues(struct vtnet_softc *sc)
+{
+	struct vtnet_txq *txq;
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
+		txq = &sc->vtnet_txqs[i];
+		txq->vtntx_watchdog = 0;
+	}
+
+	return (0);
+}
+
+static int
+vtnet_init_rxtx_queues(struct vtnet_softc *sc)
+{
+	int error;
+
+	error = vtnet_init_rx_queues(sc);
+	if (error)
+		return (error);
+
+	error = vtnet_init_tx_queues(sc);
+	if (error)
+		return (error);
+
+	return (0);
+}
+
+static void
+vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
+{
+	device_t dev;
+	int npairs;
+
+	dev = sc->vtnet_dev;
+
+	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
+		MPASS(sc->vtnet_max_vq_pairs == 1);
+		sc->vtnet_act_vq_pairs = 1;
+		return;
+	}
+
+#ifdef NOTUSED
+	/* BMV: Just use the maximum configured for now. */
+	npairs = sc->vtnet_max_vq_pairs;
+
+	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
+		device_printf(dev,
+		    "cannot set active queue pairs to %d\n", npairs);
+		npairs = 1;
+	}
+
+	sc->vtnet_act_vq_pairs = npairs;
+#endif
+
+	return;
+}
+
+static int
+vtnet_reinit(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+	int error;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+
+	/* Use the current MAC address. */
+	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+	vtnet_set_hwaddr(sc);
+
+	vtnet_set_active_vq_pairs(sc);
+
+#ifdef NOTUSED
+	ifp->if_hwassist = 0;
+	if (ifp->if_capenable & IFCAP_TXCSUM)
+		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
+	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
+	if (ifp->if_capenable & IFCAP_TSO4)
+		ifp->if_hwassist |= CSUM_TSO;
+	if (ifp->if_capenable & IFCAP_TSO6)
+		ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
+
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
+		vtnet_init_rx_filters(sc);
+#endif
+
+	error = vtnet_init_rxtx_queues(sc);
+	if (error)
+		return (error);
+
+	vtnet_enable_interrupts(sc);
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	return (0);
+}
+
+static void
+vtnet_init_locked(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+		return;
+
+	vtnet_stop(sc);
+
+	/* Reinitialize with the host. */
+	if (vtnet_virtio_reinit(sc) != 0)
+		goto fail;
+
+	if (vtnet_reinit(sc) != 0)
+		goto fail;
+
+	virtio_reinit_complete(dev);
+
+	vtnet_update_link_status(sc);
+#ifdef NOTUSED
+	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	/* start vtnet_daemon */
+	if ( sc->vtpci_softc->daemonTid == 0 ) {
+		sc->vtpci_softc->daemonTid = rtems_bsdnet_newproc( "VTNd",
4096, vtnet_daemon, sc );
+	}
+  
+	sc->stat_ch = vtnet_timeout_running;
+	timeout(vtnet_tick, sc, hz);
+#endif
+	return;
+
+fail:
+	vtnet_stop(sc);
+}
+
+static void
+vtnet_init(void *xsc)
+{
+	struct vtnet_softc *sc;
+
+	sc = xsc;
+
+	VTNET_CORE_LOCK(sc);
+	vtnet_init_locked(sc);
+	VTNET_CORE_UNLOCK(sc);
+}
+
+#ifdef NOTUSED
+static void
+vtnet_free_ctrl_vq(struct vtnet_softc *sc)
+{
+	struct virtqueue *vq;
+
+	vq = sc->vtnet_ctrl_vq;
+
+	/*
+	 * The control virtqueue is only polled and therefore it should
+	 * already be empty.
+	 */
+	KASSERT(virtqueue_empty(vq),
+	    ("%s: ctrl vq %p not empty", __func__, vq));
+}
+
+static void
+vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
+    struct sglist *sg, int readable, int writable)
+{
+	struct virtqueue *vq;
+
+	vq = sc->vtnet_ctrl_vq;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
+	    ("%s: CTRL_VQ feature not negotiated", __func__));
+
+	if (!virtqueue_empty(vq))
+		return;
+	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
+		return;
+
+	/*
+	 * Poll for the response, but the command is likely already
+	 * done when we return from the notify.
+	 */
+	virtqueue_notify(vq);
+	virtqueue_poll(vq, NULL);
+}
+
+static int
+vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
+{
+	struct virtio_net_ctrl_hdr hdr;
+	struct sglist_seg segs[3];
+	struct sglist sg;
+	uint8_t ack;
+	int error;
+
+	hdr.class = VIRTIO_NET_CTRL_MAC;
+	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
+	ack = VIRTIO_NET_ERR;
+
+	sglist_init(&sg, 3, segs);
+	error = 0;
+	error |= sglist_append(&sg, &hdr, sizeof(struct
virtio_net_ctrl_hdr));
+	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
+	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
+	KASSERT(error == 0 && sg.sg_nseg == 3,
+	    ("%s: error %d adding set MAC msg to sglist", __func__,
error));
+
+	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
+
+	return (ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static int
+vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
+{
+	struct sglist_seg segs[3];
+	struct sglist sg;
+	struct {
+		struct virtio_net_ctrl_hdr hdr;
+		uint8_t pad1;
+		struct virtio_net_ctrl_mq mq;
+		uint8_t pad2;
+		uint8_t ack;
+	} s;
+	int error;
+
+	s.hdr.class = VIRTIO_NET_CTRL_MQ;
+	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
+	s.mq.virtqueue_pairs = npairs;
+	s.ack = VIRTIO_NET_ERR;
+
+	sglist_init(&sg, 3, segs);
+	error = 0;
+	error |= sglist_append(&sg, &s.hdr, sizeof(struct
virtio_net_ctrl_hdr));
+	error |= sglist_append(&sg, &s.mq, sizeof(struct
virtio_net_ctrl_mq));
+	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
+	KASSERT(error == 0 && sg.sg_nseg == 3,
+	    ("%s: error %d adding MQ message to sglist", __func__,
error));
+
+	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
+
+	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static int
+vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
+{
+	struct sglist_seg segs[3];
+	struct sglist sg;
+	struct {
+		struct virtio_net_ctrl_hdr hdr;
+		uint8_t pad1;
+		uint8_t onoff;
+		uint8_t pad2;
+		uint8_t ack;
+	} s;
+	int error;
+
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
+	    ("%s: CTRL_RX feature not negotiated", __func__));
+
+	s.hdr.class = VIRTIO_NET_CTRL_RX;
+	s.hdr.cmd = cmd;
+	s.onoff = !!on;
+	s.ack = VIRTIO_NET_ERR;
+
+	sglist_init(&sg, 3, segs);
+	error = 0;
+	error |= sglist_append(&sg, &s.hdr, sizeof(struct
virtio_net_ctrl_hdr));
+	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
+	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
+	KASSERT(error == 0 && sg.sg_nseg == 3,
+	    ("%s: error %d adding Rx message to sglist", __func__,
error));
+
+	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
+
+	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static int
+vtnet_set_promisc(struct vtnet_softc *sc, int on)
+{
+
+	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
+}
+
+static int
+vtnet_set_allmulti(struct vtnet_softc *sc, int on)
+{
+
+	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
+}
+
+/*
+ * The device defaults to promiscuous mode for backwards compatibility.
+ * Turn it off at attach time if possible.
+ */
+static void
+vtnet_attach_disable_promisc(struct vtnet_softc *sc)
+{
+	struct ifnet *ifp;
+
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK(sc);
+	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
+		ifp->if_flags |= IFF_PROMISC;
+	} else if (vtnet_set_promisc(sc, 0) != 0) {
+		ifp->if_flags |= IFF_PROMISC;
+		device_printf(sc->vtnet_dev,
+		    "cannot disable default promiscuous mode\n");
+	}
+	VTNET_CORE_UNLOCK(sc);
+}
+
+static void
+vtnet_rx_filter(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+
+	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
+		device_printf(dev, "cannot %s promiscuous mode\n",
+		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
+
+	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
+		device_printf(dev, "cannot %s all-multicast mode\n",
+		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
+}
+
+static void
+vtnet_rx_filter_mac(struct vtnet_softc *sc)
+{
+	struct virtio_net_ctrl_hdr hdr;
+	struct vtnet_mac_filter *filter;
+	struct sglist_seg segs[4];
+	struct sglist sg;
+	struct ifnet *ifp;
+	struct ifaddr *ifa;
+	struct ifmultiaddr *ifma;
+	int ucnt, mcnt, promisc, allmulti, error;
+	uint8_t ack;
+
+	ifp = sc->vtnet_ifp;
+	filter = sc->vtnet_mac_filter;
+	ucnt = 0;
+	mcnt = 0;
+	promisc = 0;
+	allmulti = 0;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
+	    ("%s: CTRL_RX feature not negotiated", __func__));
+
+	/* Unicast MAC addresses: */
+	if_addr_rlock(ifp);
+	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+		if (ifa->ifa_addr->sa_family != AF_LINK)
+			continue;
+		else if (memcmp(LLADDR((struct sockaddr_dl
*)ifa->ifa_addr),
+		    sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
+			continue;
+		else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
+			promisc = 1;
+			break;
+		}
+
+		bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
+		    &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
+		ucnt++;
+	}
+	if_addr_runlock(ifp);
+
+	if (promisc != 0) {
+		filter->vmf_unicast.nentries = 0;
+		if_printf(ifp, "more than %d MAC addresses assigned, "
+		    "falling back to promiscuous mode\n",
+		    VTNET_MAX_MAC_ENTRIES);
+	} else
+		filter->vmf_unicast.nentries = ucnt;
+
+	/* Multicast MAC addresses: */
+	if_maddr_rlock(ifp);
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
+			allmulti = 1;
+			break;
+		}
+
+		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+		    &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
+		mcnt++;
+	}
+	if_maddr_runlock(ifp);
+
+	if (allmulti != 0) {
+		filter->vmf_multicast.nentries = 0;
+		if_printf(ifp, "more than %d multicast MAC addresses "
+		    "assigned, falling back to all-multicast mode\n",
+		    VTNET_MAX_MAC_ENTRIES);
+	} else
+		filter->vmf_multicast.nentries = mcnt;
+
+	if (promisc != 0 && allmulti != 0)
+		goto out;
+
+	hdr.class = VIRTIO_NET_CTRL_MAC;
+	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
+	ack = VIRTIO_NET_ERR;
+
+	sglist_init(&sg, 4, segs);
+	error = 0;
+	error |= sglist_append(&sg, &hdr, sizeof(struct
virtio_net_ctrl_hdr));
+	error |= sglist_append(&sg, &filter->vmf_unicast,
+	    sizeof(uint32_t) + filter->vmf_unicast.nentries *
ETHER_ADDR_LEN);
+	error |= sglist_append(&sg, &filter->vmf_multicast,
+	    sizeof(uint32_t) + filter->vmf_multicast.nentries *
ETHER_ADDR_LEN);
+	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
+	KASSERT(error == 0 && sg.sg_nseg == 4,
+	    ("%s: error %d adding MAC filter msg to sglist", __func__,
error));
+
+	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
+
+	if (ack != VIRTIO_NET_OK)
+		if_printf(ifp, "error setting host MAC filter table\n");
+
+out:
+	if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
+		if_printf(ifp, "cannot enable promiscuous mode\n");
+	if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
+		if_printf(ifp, "cannot enable all-multicast mode\n");
+}
+
+static int
+vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
+{
+	struct sglist_seg segs[3];
+	struct sglist sg;
+	struct {
+		struct virtio_net_ctrl_hdr hdr;
+		uint8_t pad1;
+		uint16_t tag;
+		uint8_t pad2;
+		uint8_t ack;
+	} s;
+	int error;
+
+	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
+	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD :
VIRTIO_NET_CTRL_VLAN_DEL;
+	s.tag = tag;
+	s.ack = VIRTIO_NET_ERR;
+
+	sglist_init(&sg, 3, segs);
+	error = 0;
+	error |= sglist_append(&sg, &s.hdr, sizeof(struct
virtio_net_ctrl_hdr));
+	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
+	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
+	KASSERT(error == 0 && sg.sg_nseg == 3,
+	    ("%s: error %d adding VLAN message to sglist", __func__,
error));
+
+	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
+
+	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
+}
+
+static void
+vtnet_rx_filter_vlan(struct vtnet_softc *sc)
+{
+	uint32_t w;
+	uint16_t tag;
+	int i, bit;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
+	    ("%s: VLAN_FILTER feature not negotiated", __func__));
+
+	/* Enable the filter for each configured VLAN. */
+	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
+		w = sc->vtnet_vlan_filter[i];
+
+		while ((bit = ffs(w) - 1) != -1) {
+			w &= ~(1 << bit);
+			tag = sizeof(w) * CHAR_BIT * i + bit;
+
+			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
+				device_printf(sc->vtnet_dev,
+				    "cannot enable VLAN %d filter\n",
tag);
+			}
+		}
+	}
+}
+
+static void
+vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
+{
+	struct ifnet *ifp;
+	int idx, bit;
+
+	ifp = sc->vtnet_ifp;
+	idx = (tag >> 5) & 0x7F;
+	bit = tag & 0x1F;
+
+	if (tag == 0 || tag > 4095)
+		return;
+
+	VTNET_CORE_LOCK(sc);
+
+	if (add)
+		sc->vtnet_vlan_filter[idx] |= (1 << bit);
+	else
+		sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
+
+	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
+	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
+		device_printf(sc->vtnet_dev,
+		    "cannot %s VLAN %d %s the host filter table\n",
+		    add ? "add" : "remove", tag, add ? "to" : "from");
+	}
+
+	VTNET_CORE_UNLOCK(sc);
+}
+
+static void
+vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+	if (ifp->if_softc != arg)
+		return;
+
+	vtnet_update_vlan_filter(arg, 1, tag);
+}
+
+static void
+vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
+{
+
+	if (ifp->if_softc != arg)
+		return;
+
+	vtnet_update_vlan_filter(arg, 0, tag);
+}
+#endif
+
+static int
+vtnet_is_link_up(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct ifnet *ifp;
+	uint16_t status;
+
+	dev = sc->vtnet_dev;
+	ifp = sc->vtnet_ifp;
+
+#ifdef NOTUSED
+	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
+		status = VIRTIO_NET_S_LINK_UP;
+	else
+		status = virtio_read_dev_config_2(dev,
+		    offsetof(struct virtio_net_config, status));
+
+	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	return (1);
+#endif
+}
+
+static void
+vtnet_update_link_status(struct vtnet_softc *sc)
+{
+	struct ifnet *ifp;
+	int link;
+
+	ifp = sc->vtnet_ifp;
+
+	VTNET_CORE_LOCK_ASSERT(sc);
+	link = vtnet_is_link_up(sc);
+
+	/* Notify if the link status has changed. */
+	if (link != 0 && sc->vtnet_link_active == 0) {
+		sc->vtnet_link_active = 1;
+#ifdef NOTUSED
+		if_link_state_change(ifp, LINK_STATE_UP);
+#endif
+	} else if (link == 0 && sc->vtnet_link_active != 0) {
+		sc->vtnet_link_active = 0;
+#ifdef NOTUSED
+		if_link_state_change(ifp, LINK_STATE_DOWN);
+#endif
+	}
+}
+
+#ifdef NOTUSED
+static int
+vtnet_ifmedia_upd(struct ifnet *ifp)
+{
+	struct vtnet_softc *sc;
+	struct ifmedia *ifm;
+
+	sc = ifp->if_softc;
+	ifm = &sc->vtnet_media;
+
+	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+		return (EINVAL);
+
+	return (0);
+}
+
+static void
+vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+	struct vtnet_softc *sc;
+
+	sc = ifp->if_softc;
+
+	ifmr->ifm_status = IFM_AVALID;
+	ifmr->ifm_active = IFM_ETHER;
+
+	VTNET_CORE_LOCK(sc);
+	if (vtnet_is_link_up(sc) != 0) {
+		ifmr->ifm_status |= IFM_ACTIVE;
+		ifmr->ifm_active |= VTNET_MEDIATYPE;
+	} else
+		ifmr->ifm_active |= IFM_NONE;
+	VTNET_CORE_UNLOCK(sc);
+}
+#endif
+
+static void
+vtnet_set_hwaddr(struct vtnet_softc *sc)
+{
+	device_t dev;
+
+	dev = sc->vtnet_dev;
+
+	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
+#ifdef NOTUSED
+		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
+			device_printf(dev, "unable to set MAC address\n");
+#endif
+	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
+		virtio_write_device_config(dev,
+		    offsetof(struct virtio_net_config, mac),
+		    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+	}
+}
+
+static void
+vtnet_get_hwaddr(struct vtnet_softc *sc)
+{
+	device_t dev;
+
+	dev = sc->vtnet_dev;
+
+	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
+		/*
+		 * Generate a random locally administered unicast address.
+		 *
+		 * It would be nice to generate the same MAC address
across
+		 * reboots, but it seems all the hosts currently available
+		 * support the MAC feature, so this isn't too important.
+		 */
+		 
+#ifdef NOTUSED
+		sc->vtnet_hwaddr[0] = 0xB2;
+		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+		sc->arpcom.ac_enaddr[ 0 ] = 0x08;
+		sc->arpcom.ac_enaddr[ 1 ] = 0x00;
+		sc->arpcom.ac_enaddr[ 2 ] = 0x27;
+		sc->arpcom.ac_enaddr[ 3 ] = 0x98;
+		sc->arpcom.ac_enaddr[ 4 ] = 0xe7;
+		sc->arpcom.ac_enaddr[ 5 ] = 0x0f;
+		bcopy(sc->arpcom.ac_enaddr, sc->vtnet_hwaddr,
ETHER_ADDR_LEN);
+#endif
+		vtnet_set_hwaddr(sc);
+		return;
+	}
+
+	virtio_read_device_config(dev, offsetof(struct virtio_net_config,
mac),
+	    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
+}
+
+#ifdef NOTUSED
+static void
+vtnet_vlan_tag_remove(struct mbuf *m)
+{
+	struct ether_vlan_header *evh;
+
+	evh = mtod(m, struct ether_vlan_header *);
+	m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
+	m->m_flags |= M_VLANTAG;
+
+	/* Strip the 802.1Q header. */
+	bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
+	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
+	m_adj(m, ETHER_VLAN_ENCAP_LEN);
+}
+
+static void
+vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
+    struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
+{
+	struct sysctl_oid *node;
+	struct sysctl_oid_list *list;
+	struct vtnet_rxq_stats *stats;
+	char namebuf[16];
+
+	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
+	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+	    CTLFLAG_RD, NULL, "Receive Queue");
+	list = SYSCTL_CHILDREN(node);
+
+	stats = &rxq->vtnrx_stats;
+
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
+	    &stats->vrxs_ipackets, "Receive packets");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
+	    &stats->vrxs_ibytes, "Receive bytes");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
+	    &stats->vrxs_iqdrops, "Receive drops");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
+	    &stats->vrxs_ierrors, "Receive errors");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+	    &stats->vrxs_csum, "Receive checksum offloaded");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
+	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+	    &stats->vrxs_rescheduled,
+	    "Receive interrupt handler rescheduled");
+}
+
+static void
+vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
+    struct sysctl_oid_list *child, struct vtnet_txq *txq)
+{
+	struct sysctl_oid *node;
+	struct sysctl_oid_list *list;
+	struct vtnet_txq_stats *stats;
+	char namebuf[16];
+
+	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
+	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+	    CTLFLAG_RD, NULL, "Transmit Queue");
+	list = SYSCTL_CHILDREN(node);
+
+	stats = &txq->vtntx_stats;
+
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
+	    &stats->vtxs_opackets, "Transmit packets");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
+	    &stats->vtxs_obytes, "Transmit bytes");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
+	    &stats->vtxs_omcasts, "Transmit multicasts");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+	    &stats->vtxs_csum, "Transmit checksum offloaded");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
+	    &stats->vtxs_tso, "Transmit segmentation offloaded");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "collapsed", CTLFLAG_RD,
+	    &stats->vtxs_collapsed, "Transmit mbufs collapsed");
+	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+	    &stats->vtxs_rescheduled,
+	    "Transmit interrupt handler rescheduled");
+}
+
+static void
+vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct sysctl_ctx_list *ctx;
+	struct sysctl_oid *tree;
+	struct sysctl_oid_list *child;
+	int i;
+
+	dev = sc->vtnet_dev;
+	ctx = device_get_sysctl_ctx(dev);
+	tree = device_get_sysctl_tree(dev);
+	child = SYSCTL_CHILDREN(tree);
+
+	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
+		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
+	}
+}
+
+static void
+vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
+    struct sysctl_oid_list *child, struct vtnet_softc *sc)
+{
+	struct vtnet_statistics *stats;
+
+	stats = &sc->vtnet_stats;
+
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
+	    CTLFLAG_RD, &stats->mbuf_alloc_failed,
+	    "Mbuf cluster allocation failures");
+
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
+	    CTLFLAG_RD, &stats->rx_frame_too_large,
+	    "Received frame larger than the mbuf chain");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO,
"rx_enq_replacement_failed",
+	    CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+	    "Enqueuing the replacement receive mbuf failed");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
+	    CTLFLAG_RD, &stats->rx_mergeable_failed,
+	    "Mergeable buffers receive failures");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
+	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+	    "Received checksum offloaded buffer with unsupported "
+	    "Ethernet type");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
+	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+	    "Received checksum offloaded buffer with incorrect IP
protocol");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
+	    CTLFLAG_RD, &stats->rx_csum_bad_offset,
+	    "Received checksum offloaded buffer with incorrect offset");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
+	    CTLFLAG_RD, &stats->rx_csum_bad_proto,
+	    "Received checksum offloaded buffer with incorrect protocol");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
+	    CTLFLAG_RD, &stats->rx_csum_failed,
+	    "Received buffer checksum offload failed");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
+	    CTLFLAG_RD, &stats->rx_csum_offloaded,
+	    "Received buffer checksum offload succeeded");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
+	    CTLFLAG_RD, &stats->rx_task_rescheduled,
+	    "Times the receive interrupt task rescheduled itself");
+
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
+	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
+	    "Aborted transmit of checksum offloaded buffer with unknown "
+	    "Ethernet type");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
+	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
+	    "Aborted transmit of TSO buffer with unknown Ethernet type");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
+	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
+	    "Aborted transmit of TSO buffer with non TCP protocol");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
+	    CTLFLAG_RD, &stats->tx_csum_offloaded,
+	    "Offloaded checksum of transmitted buffer");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
+	    CTLFLAG_RD, &stats->tx_tso_offloaded,
+	    "Segmentation offload of transmitted buffer");
+	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
+	    CTLFLAG_RD, &stats->tx_task_rescheduled,
+	    "Times the transmit interrupt task rescheduled itself");
+}
+
+static void
+vtnet_setup_sysctl(struct vtnet_softc *sc)
+{
+	device_t dev;
+	struct sysctl_ctx_list *ctx;
+	struct sysctl_oid *tree;
+	struct sysctl_oid_list *child;
+
+	dev = sc->vtnet_dev;
+	ctx = device_get_sysctl_ctx(dev);
+	tree = device_get_sysctl_tree(dev);
+	child = SYSCTL_CHILDREN(tree);
+
+	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
+	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
+	    "Maximum number of supported virtqueue pairs");
+	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
+	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
+	    "Number of active virtqueue pairs");
+
+	vtnet_setup_stat_sysctl(ctx, child, sc);
+}
+#endif
+
+static int
+vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
+{
+
+	return (virtqueue_enable_intr(rxq->vtnrx_vq));
+}
+
+static void
+vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
+{
+
+	virtqueue_disable_intr(rxq->vtnrx_vq);
+}
+
+static int
+vtnet_txq_enable_intr(struct vtnet_txq *txq)
+{
+
+	return (virtqueue_postpone_intr(txq->vtntx_vq, VQ_POSTPONE_LONG));
+}
+
+static void
+vtnet_txq_disable_intr(struct vtnet_txq *txq)
+{
+
+	virtqueue_disable_intr(txq->vtntx_vq);
+}
+
+static void
+vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
+		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
+}
+
+static void
+vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
+		vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
+}
+
+static void
+vtnet_enable_interrupts(struct vtnet_softc *sc)
+{
+
+	vtnet_enable_rx_interrupts(sc);
+	vtnet_enable_tx_interrupts(sc);
+}
+
+static void
+vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
+		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
+}
+
+static void
+vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
+		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
+}
+
+static void
+vtnet_disable_interrupts(struct vtnet_softc *sc)
+{
+
+	vtnet_disable_rx_interrupts(sc);
+	vtnet_disable_tx_interrupts(sc);
+}
+
+#ifdef NOTUSED
+static int
+vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
+{
+	char path[64];
+
+	snprintf(path, sizeof(path),
+	    "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
+	TUNABLE_INT_FETCH(path, &def);
+
+	return (def);
+}
+#endif
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/if_vtnetvar.h
b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnetvar.h
new file mode 100644
index 0000000..5d2bd78
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnetvar.h
@@ -0,0 +1,391 @@
+/**
+ * @file if_vtnetvar.h
+ * @brief Header for if_vtnet.c
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+ 
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/network/if_vtnetvar.h 255167
2013-09-03 02:28:31Z bryanv $
+ */
+
+#ifndef _IF_VTNETVAR_H
+#define _IF_VTNETVAR_H
+
+struct vtnet_softc;
+struct vtpci_softc;
+
+struct vtnet_statistics {
+	uint64_t	mbuf_alloc_failed;
+
+	uint64_t	rx_frame_too_large;
+	uint64_t	rx_enq_replacement_failed;
+	uint64_t	rx_mergeable_failed;
+	uint64_t	rx_csum_bad_ethtype;
+	uint64_t	rx_csum_bad_ipproto;
+	uint64_t	rx_csum_bad_offset;
+	uint64_t	rx_csum_bad_proto;
+	uint64_t	tx_csum_bad_ethtype;
+	uint64_t	tx_tso_bad_ethtype;
+	uint64_t	tx_tso_not_tcp;
+
+	/*
+	 * These are accumulated from each Rx/Tx queue.
+	 */
+	uint64_t	rx_csum_failed;
+	uint64_t	rx_csum_offloaded;
+	uint64_t	rx_task_rescheduled;
+	uint64_t	tx_csum_offloaded;
+	uint64_t	tx_tso_offloaded;
+	uint64_t	tx_task_rescheduled;
+};
+
+struct vtnet_rxq_stats {
+	uint64_t	vrxs_ipackets;	/* if_ipackets */
+	uint64_t	vrxs_ibytes;	/* if_ibytes */
+	uint64_t	vrxs_iqdrops;	/* if_iqdrops */
+	uint64_t	vrxs_ierrors;	/* if_ierrors */
+	uint64_t	vrxs_csum;
+	uint64_t	vrxs_csum_failed;
+	uint64_t	vrxs_rescheduled;
+};
+
+struct vtnet_rxq {
+	struct mtx		 vtnrx_mtx;
+	struct vtnet_softc	*vtnrx_sc;
+	struct virtqueue	*vtnrx_vq;
+	int			 vtnrx_id;
+	int			 vtnrx_process_limit;
+	struct vtnet_rxq_stats	 vtnrx_stats;
+#ifdef NOTUSED
+	struct taskqueue	*vtnrx_tq;
+	struct task		 vtnrx_intrtask;
+#endif
+	char			 vtnrx_name[16];
+} __aligned(CACHE_LINE_SIZE);
+
+#define VTNET_RXQ_LOCK(_rxq)	mtx_lock(&(_rxq)->vtnrx_mtx)
+#define VTNET_RXQ_UNLOCK(_rxq)	mtx_unlock(&(_rxq)->vtnrx_mtx)
+#define VTNET_RXQ_LOCK_ASSERT(_rxq)		\
+    mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED)
+#define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq)	\
+    mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED)
+
+struct vtnet_txq_stats {
+	uint64_t vtxs_opackets;	/* if_opackets */
+	uint64_t vtxs_obytes;	/* if_obytes */
+	uint64_t vtxs_omcasts;	/* if_omcasts */
+	uint64_t vtxs_csum;
+	uint64_t vtxs_tso;
+	uint64_t vtxs_collapsed;
+	uint64_t vtxs_rescheduled;
+};
+
+struct vtnet_txq {
+	struct mtx		 vtntx_mtx;
+	struct vtnet_softc	*vtntx_sc;
+	struct virtqueue	*vtntx_vq;
+#ifndef VTNET_LEGACY_TX
+	struct buf_ring		*vtntx_br;
+#endif
+	int			 vtntx_id;
+	int			 vtntx_watchdog;
+	struct vtnet_txq_stats	 vtntx_stats;
+#ifdef NOTUSED
+	struct taskqueue	*vtntx_tq;
+	struct task		 vtntx_intrtask;
+#endif
+#ifndef VTNET_LEGACY_TX
+	struct task		 vtntx_defrtask;
+#endif
+	char			 vtntx_name[16];
+} __aligned(CACHE_LINE_SIZE);
+
+#define VTNET_TXQ_LOCK(_txq)	mtx_lock(&(_txq)->vtntx_mtx)
+#define VTNET_TXQ_TRYLOCK(_txq)	mtx_trylock(&(_txq)->vtntx_mtx)
+#define VTNET_TXQ_UNLOCK(_txq)	mtx_unlock(&(_txq)->vtntx_mtx)
+#define VTNET_TXQ_LOCK_ASSERT(_txq)		\
+    mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED)
+#define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq)	\
+    mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED)
+	
+#define VTNET_FLAG_SUSPENDED	 0x0001
+#define VTNET_FLAG_MAC		 0x0002
+#define VTNET_FLAG_CTRL_VQ	 0x0004
+#define VTNET_FLAG_CTRL_RX	 0x0008
+#define VTNET_FLAG_CTRL_MAC	 0x0010
+#define VTNET_FLAG_VLAN_FILTER	 0x0020
+#define VTNET_FLAG_TSO_ECN	 0x0040
+#define VTNET_FLAG_MRG_RXBUFS	 0x0080
+#define VTNET_FLAG_LRO_NOMRG	 0x0100
+#define VTNET_FLAG_MULTIQ	 0x0200
+#define VTNET_FLAG_EVENT_IDX	 0x0400
+
+struct vtnet_softc {
+#ifdef RTEMS_VIRTIO_NET
+	struct arpcom arpcom;
+	struct rtems_bsdnet_ifconfig *config;
+	struct vtpci_softc *vtpci_softc;
+	enum { vtnet_timeout_stopped, vtnet_timeout_running,
vtnet_timeout_stop_rq }
+	stat_ch;
+#endif
+	device_t		 vtnet_dev;
+	struct ifnet		*vtnet_ifp;
+	struct vtnet_rxq	*vtnet_rxqs;
+	struct vtnet_txq	*vtnet_txqs;
+
+	uint32_t		 vtnet_flags;
+
+	int			 vtnet_link_active;
+	int			 vtnet_hdr_size;
+	int			 vtnet_rx_process_limit;
+	int			 vtnet_rx_nmbufs;
+	int			 vtnet_rx_clsize;
+	int			 vtnet_rx_new_clsize;
+	int			 vtnet_if_flags;
+	int			 vtnet_act_vq_pairs;
+	int			 vtnet_max_vq_pairs;
+
+	struct virtqueue	*vtnet_ctrl_vq;
+#ifdef NOTUSED
+	struct vtnet_mac_filter	*vtnet_mac_filter;
+	uint32_t		*vtnet_vlan_filter;
+#endif
+
+	uint64_t		 vtnet_features;
+	struct vtnet_statistics	 vtnet_stats;
+#ifdef NOTUSED
+	struct callout		 vtnet_tick_ch;
+	struct ifmedia		 vtnet_media;
+	eventhandler_tag	 vtnet_vlan_attach;
+	eventhandler_tag	 vtnet_vlan_detach;
+#endif
+
+	struct mtx		 vtnet_mtx;
+	char			 vtnet_mtx_name[16];
+	char			 vtnet_hwaddr[ETHER_ADDR_LEN];
+};
+
+/*
+ * Maximum number of queue pairs we will autoconfigure to.
+ */
+#define VTNET_MAX_QUEUE_PAIRS	8
+
+/*
+ * Additional completed entries can appear in a virtqueue before we can
+ * reenable interrupts. Number of times to retry before scheduling the
+ * taskqueue to process the completed entries.
+ */
+#define VTNET_INTR_DISABLE_RETRIES	4
+
+#ifdef NOTUSED
+/*
+ * Fake the media type. The host does not provide us with any real media
+ * information.
+ */
+#define VTNET_MEDIATYPE		 (IFM_ETHER | IFM_10G_T | IFM_FDX)
+#endif
+
+/*
+ * Number of words to allocate for the VLAN shadow table. There is one
+ * bit for each VLAN.
+ */
+#define VTNET_VLAN_FILTER_NWORDS	(4096 / 32)
+
+/*
+ * When mergeable buffers are not negotiated, the vtnet_rx_header
structure
+ * below is placed at the beginning of the mbuf data. Use 4 bytes of pad
to
+ * both keep the VirtIO header and the data non-contiguous and to keep
the
+ * frame's payload 4 byte aligned.
+ *
+ * When mergeable buffers are negotiated, the host puts the VirtIO header
in
+ * the beginning of the first mbuf's data.
+ */
+#define VTNET_RX_HEADER_PAD	4
+struct vtnet_rx_header {
+	struct virtio_net_hdr	vrh_hdr;
+	char			vrh_pad[VTNET_RX_HEADER_PAD];
+} __packed;
+
+/*
+ * For each outgoing frame, the vtnet_tx_header below is allocated from
+ * the vtnet_tx_header_zone.
+ */
+struct vtnet_tx_header {
+	union {
+		struct virtio_net_hdr		hdr;
+		struct virtio_net_hdr_mrg_rxbuf	mhdr;
+	} vth_uhdr;
+
+	struct mbuf *vth_mbuf;
+};
+
+/*
+ * The VirtIO specification does not place a limit on the number of MAC
+ * addresses the guest driver may request to be filtered. In practice,
+ * the host is constrained by available resources. To simplify this
driver,
+ * impose a reasonably high limit of MAC addresses we will filter before
+ * falling back to promiscuous or all-multicast modes.
+ */
+#define VTNET_MAX_MAC_ENTRIES	128
+#
+struct vtnet_mac_table {
+	uint32_t	nentries;
+	uint8_t		macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
+} __packed;
+
+struct vtnet_mac_filter {
+	struct vtnet_mac_table	vmf_unicast;
+	uint32_t		vmf_pad; /* Make tables non-contiguous. */
+	struct vtnet_mac_table	vmf_multicast;
+};
+
+#ifdef NOTUSED
+/*
+ * The MAC filter table is malloc(9)'d when needed. Ensure it will
+ * always fit in one segment.
+ */
+CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
+#endif
+
+#define VTNET_TX_TIMEOUT	5
+#define VTNET_CSUM_OFFLOAD	(CSUM_TCP | CSUM_UDP | CSUM_SCTP)
+#define VTNET_CSUM_OFFLOAD_IPV6	(CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |
CSUM_SCTP_IPV6)
+
+#define VTNET_CSUM_ALL_OFFLOAD	\
+    (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
+
+/* Features desired/implemented by this driver. */
+#ifdef NOTUSED
+#define VTNET_FEATURES \
+    (VIRTIO_NET_F_MAC			| \
+     VIRTIO_NET_F_STATUS		| \
+     VIRTIO_NET_F_CTRL_VQ		| \
+     VIRTIO_NET_F_CTRL_RX		| \
+     VIRTIO_NET_F_CTRL_MAC_ADDR		| \
+     VIRTIO_NET_F_CTRL_VLAN		| \
+     VIRTIO_NET_F_CSUM			| \
+     VIRTIO_NET_F_GSO			| \
+     VIRTIO_NET_F_HOST_TSO4		| \
+     VIRTIO_NET_F_HOST_TSO6		| \
+     VIRTIO_NET_F_HOST_ECN		| \
+     VIRTIO_NET_F_GUEST_CSUM		| \
+     VIRTIO_NET_F_GUEST_TSO4		| \
+     VIRTIO_NET_F_GUEST_TSO6		| \
+     VIRTIO_NET_F_GUEST_ECN		| \
+     VIRTIO_NET_F_MRG_RXBUF		| \
+     VIRTIO_NET_F_MQ			| \
+     VIRTIO_RING_F_EVENT_IDX		| \
+     VIRTIO_RING_F_INDIRECT_DESC)
+#endif
+#ifdef RTEMS_VIRTIO_NET
+#define VTNET_FEATURES \
+  ( VIRTIO_NET_F_CSUM | \
+    VIRTIO_NET_F_GSO | \
+    VIRTIO_NET_F_HOST_TSO4 | \
+    VIRTIO_NET_F_HOST_TSO6 | \
+    VIRTIO_NET_F_HOST_ECN | \
+    VIRTIO_NET_F_HOST_UFO | \
+    VIRTIO_NET_F_MRG_RXBUF )
+#endif
+
+/*
+ * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
+ * frames larger than 1514 bytes.
+ */
+#define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \
+    VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN)
+
+/*
+ * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
+ * frames larger than 1514 bytes. We do not yet support software LRO
+ * via tcp_lro_rx().
+ */
+#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
+    VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
+
+#define VTNET_MAX_MTU		65536
+#define VTNET_MAX_RX_SIZE	65550
+
+/*
+ * Used to preallocate the Vq indirect descriptors. The first segment
+ * is reserved for the header.
+ */
+#define VTNET_MIN_RX_SEGS	2
+#define VTNET_MAX_RX_SEGS	34
+#define VTNET_MAX_TX_SEGS	34
+
+#ifdef NOTUSED
+/*
+ * Assert we can receive and transmit the maximum with regular
+ * size clusters.
+ */
+CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
+CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
+#endif
+
+/*
+ * Number of slots in the Tx bufrings. This value matches most other
+ * multiqueue drivers.
+ */
+#define VTNET_DEFAULT_BUFRING_SIZE	4096
+
+/*
+ * Determine how many mbufs are in each receive buffer. For LRO without
+ * mergeable descriptors, we must allocate an mbuf chain large enough to
+ * hold both the vtnet_rx_header and the maximum receivable data.
+ */
+#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize)				\
+	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
+	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
+	        (_clsize))
+
+#define VTNET_CORE_MTX(_sc)		&(_sc)->vtnet_mtx
+#define VTNET_CORE_LOCK(_sc)		mtx_lock(VTNET_CORE_MTX((_sc)))
+#define VTNET_CORE_UNLOCK(_sc)		mtx_unlock(VTNET_CORE_MTX((_sc)))
+#define VTNET_CORE_LOCK_DESTROY(_sc)	mtx_destroy(VTNET_CORE_MTX((_sc)))
+#define VTNET_CORE_LOCK_ASSERT(_sc)		\
+    mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED)
+#define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc)	\
+    mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED)
+
+#define VTNET_CORE_LOCK_INIT(_sc) do {					\
+    snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name),	\
+        "%s", device_get_nameunit((_sc)->vtnet_dev));			\
+    mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name,		\
+        "VTNET Core Lock", MTX_DEF);					\
+} while (0)
+
+#endif /* _IF_VTNETVAR_H */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
b/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
new file mode 100644
index 0000000..5a99122
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
@@ -0,0 +1,291 @@
+/**
+ * @file virtio.c
+ * @brief
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtio.c 252707 2013-07-04
17:57:26Z bryanv $
+ */
+
+#define VTNET_LEGACY_TX
+#define RTEMS_VIRTIO_NET
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include "virtio.h"
+#include "virtqueue.h"
+#include "virtio_pci.h"
+
+#ifdef NOTUSED
+static int virtio_modevent(module_t, int, void *);
+static const char *virtio_feature_name(uint64_t, struct
virtio_feature_desc *);
+
+static struct virtio_ident {
+	uint16_t	devid;
+	const char	*name;
+} virtio_ident_table[] = {
+	{ VIRTIO_ID_NETWORK,	"Network"	}
+	{ VIRTIO_ID_BLOCK,	"Block"		},
+	{ VIRTIO_ID_CONSOLE,	"Console"	},
+	{ VIRTIO_ID_ENTROPY,	"Entropy"	},
+	{ VIRTIO_ID_BALLOON,	"Balloon"	},
+	{ VIRTIO_ID_IOMEMORY,	"IOMemory"	},
+	{ VIRTIO_ID_SCSI,	"SCSI"		},
+	{ VIRTIO_ID_9P,		"9P Transport"	},
+
+	{ 0, NULL }
+};
+
+/* Device independent features. */
+static struct virtio_feature_desc virtio_common_feature_desc[] = {
+	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"	},
+	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirect"	},
+	{ VIRTIO_RING_F_EVENT_IDX,	"EventIdx"	},
+	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"	},
+
+	{ 0, NULL }
+};
+
+const char *
+virtio_device_name(uint16_t devid)
+{
+	struct virtio_ident *ident;
+
+	for (ident = virtio_ident_table; ident->name != NULL; ident++) {
+		if (ident->devid == devid)
+			return (ident->name);
+	}
+
+	return (NULL);
+}
+
+static const char *
+virtio_feature_name(uint64_t val, struct virtio_feature_desc *desc)
+{
+	int i, j;
+	struct virtio_feature_desc *descs[2] = { desc,
+	    virtio_common_feature_desc };
+
+	for (i = 0; i < 2; i++) {
+		if (descs[i] == NULL)
+			continue;
+
+		for (j = 0; descs[i][j].vfd_val != 0; j++) {
+			if (val == descs[i][j].vfd_val)
+				return (descs[i][j].vfd_str);
+		}
+	}
+
+	return (NULL);
+}
+#endif
+
+void
+virtio_describe(device_t dev, const char *msg,
+    uint64_t features, struct virtio_feature_desc *desc)
+{
+#ifdef NOTUSED
+	struct sbuf sb;
+	uint64_t val;
+	char *buf;
+	const char *name;
+	int n;
+
+	if ((buf = malloc(512, M_TEMP, M_NOWAIT)) == NULL) {
+		device_printf(dev, "%s features: %#jx\n", msg, (uintmax_t)
features);
+		return;
+	}
+
+	sbuf_new(&sb, buf, 512, SBUF_FIXEDLEN);
+	sbuf_printf(&sb, "%s features: %#jx", msg, (uintmax_t) features);
+
+	for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) {
+		/*
+		 * BAD_FEATURE is used to detect broken Linux clients
+		 * and therefore is not applicable to FreeBSD.
+		 */
+		if (((features & val) == 0) || val ==
VIRTIO_F_BAD_FEATURE)
+			continue;
+
+		if (n++ == 0)
+			sbuf_cat(&sb, " <");
+		else
+			sbuf_cat(&sb, ",");
+
+		name = virtio_feature_name(val, desc);
+		if (name == NULL)
+			sbuf_printf(&sb, "%#jx", (uintmax_t) val);
+		else
+			sbuf_cat(&sb, name);
+	}
+
+	if (n > 0)
+		sbuf_cat(&sb, ">");
+
+#if __FreeBSD_version < 900020
+	sbuf_finish(&sb);
+	if (sbuf_overflowed(&sb) == 0)
+#else
+	if (sbuf_finish(&sb) == 0)
+#endif
+		device_printf(dev, "%s\n", sbuf_data(&sb));
+
+	sbuf_delete(&sb);
+	free(buf, M_TEMP);
+#endif
+}
+
+/*
+ * VirtIO bus method wrappers.
+ */
+
+#ifdef NOTUSED
+void
+virtio_read_ivar(device_t dev, int ivar, uintptr_t *val)
+{
+
+	*val = -1;
+	BUS_READ_IVAR(device_get_parent(dev), dev, ivar, val);
+}
+
+void
+virtio_write_ivar(device_t dev, int ivar, uintptr_t val)
+{
+
+	BUS_WRITE_IVAR(device_get_parent(dev), dev, ivar, val);
+}
+#endif
+
+uint64_t
+virtio_negotiate_features(device_t dev, uint64_t child_features)
+{
+
+	return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev),
+	    child_features));
+}
+
+int
+virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
+    struct vq_alloc_info *info)
+{
+
+	return (VIRTIO_BUS_ALLOC_VIRTQUEUES(device_get_parent(dev), flags,
+	    nvqs, info));
+}
+
+int
+virtio_setup_intr(device_t dev, enum intr_type type)
+{
+
+	return (VIRTIO_BUS_SETUP_INTR(device_get_parent(dev), type));
+}
+
+int
+virtio_with_feature(device_t dev, uint64_t feature)
+{
+
+	return (VIRTIO_BUS_WITH_FEATURE(device_get_parent(dev), feature));
+}
+
+void
+virtio_stop(device_t dev)
+{
+
+	VIRTIO_BUS_STOP(device_get_parent(dev));
+}
+
+int
+virtio_reinit(device_t dev, uint64_t features)
+{
+
+	return (VIRTIO_BUS_REINIT(device_get_parent(dev), features));
+}
+
+void
+virtio_reinit_complete(device_t dev)
+{
+
+	VIRTIO_BUS_REINIT_COMPLETE(device_get_parent(dev));
+}
+
+void
+virtio_read_device_config(device_t dev, bus_size_t offset, void *dst, int
len)
+{
+
+	VIRTIO_BUS_READ_DEVICE_CONFIG(device_get_parent(dev),
+	    offset, dst, len);
+}
+
+void
+virtio_write_device_config(device_t dev, bus_size_t offset, void *dst,
int len)
+{
+
+	VIRTIO_BUS_WRITE_DEVICE_CONFIG(device_get_parent(dev),
+	    offset, dst, len);
+}
+
+#ifdef NOTUSED
+static int
+virtio_modevent(module_t mod, int type, void *unused)
+{
+	int error;
+
+	switch (type) {
+	case MOD_LOAD:
+	case MOD_QUIESCE:
+	case MOD_UNLOAD:
+	case MOD_SHUTDOWN:
+		error = 0;
+		break;
+	default:
+		error = EOPNOTSUPP;
+		break;
+	}
+
+	return (error);
+}
+
+static moduledata_t virtio_mod = {
+	"virtio",
+	virtio_modevent,
+	0
+};
+
+DECLARE_MODULE(virtio, virtio_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(virtio, 1);
+#endif
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
new file mode 100644
index 0000000..294cac4
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
@@ -0,0 +1,255 @@
+/**
+ * @file virtio.h
+ * @brief Header for virtio.c
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+ 
+/*-
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this
software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE
LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtio.h 252708 2013-07-04
17:59:09Z bryanv $
+ */
+
+#ifndef _VIRTIO_H_
+#define _VIRTIO_H_
+
+#ifdef RTEMS_VIRTIO_NET
+/* Some adaptation replacements for RTEMS */
+
+#define mtx_init(a,b,c,d)
+#define mtx_initialized(a) (1)
+#define mtx_destroy(a) do {} while(0)
+#define mtx_lock(a) do {} while(0)
+#define mtx_unlock(a) do {} while(0)
+#define mtx_assert(a,b) do {} while(0)
+#define howmany(x,y) (((x)+((y)-1))/(y))
+
+typedef uint32_t u_long;
+typedef int device_t;
+typedef uint32_t bus_size_t;
+typedef uint32_t vm_paddr_t;
+
+struct mtx { int dummy; };
+
+enum intr_type {
+	INTR_TYPE_NET = 4,
+	INTR_MPSAFE = 512
+};
+
+#define CACHE_LINE_SHIFT 6
+#define CACHE_LINE_SIZE (1<<CACHE_LINE_SHIFT)
+#define M_ZERO 0
+#define MTX_DEF 0
+#define MIN(a,b) ((a)>(b)?(b):(a))
+
+#define device_get_parent(dev) (dev)
+#define device_printf(dev,format,args...) printk(format,## args)
+#define vtophys(m) (m)
+#define snprintf(buf, size, str,args...) sprintf(buf, str,## args)
+
+#define mb() asm volatile ( "lock; addl $0,0(%%esp) " ::: "memory" )
+#define rmb() mb()
+#define wmb() asm volatile ( "lock; addl $0, (%%esp)" ::: "memory", "cc"
)
+
+#define VIRTIO_BUS_NEGOTIATE_FEATURES(dev, feature)
vtpci_negotiate_features(dev, feature)
+#define VIRTIO_BUS_ALLOC_VIRTQUEUES(dev, flags, nvqs, info)
vtpci_alloc_virtqueues(dev, flags, nvqs, info)
+#define VIRTIO_BUS_SETUP_INTR(dev, type) vtpci_setup_intr(dev, type)
+#define VIRTIO_BUS_WITH_FEATURE(dev, feature) vtpci_with_feature(dev,
feature)
+#define VIRTIO_BUS_STOP(dev) vtpci_stop(dev)
+#define VIRTIO_BUS_REINIT(dev, features) vtpci_reinit(dev, features)
+#define VIRTIO_BUS_REINIT_COMPLETE(dev) vtpci_reinit_complete(dev)
+#define VIRTIO_BUS_READ_DEVICE_CONFIG(dev,offset,dst,len)
vtpci_read_dev_config(dev,offset,dst,len)
+#define VIRTIO_BUS_WRITE_DEVICE_CONFIG(dev,offset,dst,len)
vtpci_write_dev_config(dev,offset,dst,len)
+#define VIRTIO_BUS_NOTIFY_VQ(dev, index) vtpci_notify_virtqueue(dev,
index)
+
+#define KASSERT(exp,msg) do {\
+	if(!(exp)){\
+		printk msg;\
+		printk("\n");\
+	}\
+} while(0)
+#define MPASS(exp) \
+	KASSERT((exp), ("Assertion %s failed at %s:%d", #exp, __FILE__,
__LINE__))
+#endif /* RTEMS_VIRTIO_NET */
+
+struct vq_alloc_info;
+
+/* VirtIO device IDs. */
+#define VIRTIO_ID_NETWORK	0x01
+#define VIRTIO_ID_BLOCK		0x02
+#define VIRTIO_ID_CONSOLE	0x03
+#define VIRTIO_ID_ENTROPY	0x04
+#define VIRTIO_ID_BALLOON	0x05
+#define VIRTIO_ID_IOMEMORY	0x06
+#define VIRTIO_ID_SCSI		0x08
+#define VIRTIO_ID_9P		0x09
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET	0x00
+#define VIRTIO_CONFIG_STATUS_ACK	0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER	0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK	0x04
+#define VIRTIO_CONFIG_STATUS_FAILED	0x80
+
+/*
+ * Generate interrupt when the virtqueue ring is
+ * completely used, even if we've suppressed them.
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
+
+/*
+ * The guest should never negotiate this feature; it
+ * is used to detect faulty drivers.
+ */
+#define VIRTIO_F_BAD_FEATURE (1 << 30)
+
+/*
+ * Some VirtIO feature bits (currently bits 28 through 31) are
+ * reserved for the transport being used (eg. virtio_ring), the
+ * rest are per-device feature bits.
+ */
+#define VIRTIO_TRANSPORT_F_START	28
+#define VIRTIO_TRANSPORT_F_END		32
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a
limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/*
+ * VirtIO instance variables indices.
+ */
+#define VIRTIO_IVAR_DEVTYPE		1
+#define VIRTIO_IVAR_FEATURE_DESC	2
+#define VIRTIO_IVAR_VENDOR		3
+#define VIRTIO_IVAR_DEVICE		4
+#define VIRTIO_IVAR_SUBVENDOR		5
+#define VIRTIO_IVAR_SUBDEVICE		6
+
+struct virtio_feature_desc {
+	uint64_t	 vfd_val;
+	const char	*vfd_str;
+};
+
+const char *virtio_device_name(uint16_t devid);
+void	 virtio_describe(device_t dev, const char *msg,
+	     uint64_t features, struct virtio_feature_desc *feature_desc);
+
+/*
+ * VirtIO Bus Methods.
+ */
+void	 virtio_read_ivar(device_t dev, int ivar, uintptr_t *val);
+void	 virtio_write_ivar(device_t dev, int ivar, uintptr_t val);
+uint64_t virtio_negotiate_features(device_t dev, uint64_t
child_features);
+int	 virtio_alloc_virtqueues(device_t dev, int flags, int nvqs,
+	     struct vq_alloc_info *info);
+int	 virtio_setup_intr(device_t dev, enum intr_type type);
+int	 virtio_with_feature(device_t dev, uint64_t feature);
+void	 virtio_stop(device_t dev);
+int	 virtio_reinit(device_t dev, uint64_t features);
+void	 virtio_reinit_complete(device_t dev);
+
+/*
+ * Read/write a variable amount from the device specific (ie, network)
+ * configuration region. This region is encoded in the same endian as
+ * the guest.
+ */
+void	 virtio_read_device_config(device_t dev, bus_size_t offset,
+	     void *dst, int length);
+void	 virtio_write_device_config(device_t dev, bus_size_t offset,
+	     void *src, int length);
+
+
+/* Inlined device specific read/write functions for common lengths. */
+#define VIRTIO_RDWR_DEVICE_CONFIG(size, type)				\
+static inline type							\
+__CONCAT(virtio_read_dev_config_,size)(device_t dev,			\
+    bus_size_t offset)							\
+{									\
+	type val;							\
+	virtio_read_device_config(dev, offset, &val, sizeof(type));	\
+	return (val);							\
+}									\
+									\
+static inline void							\
+__CONCAT(virtio_write_dev_config_,size)(device_t dev,			\
+    bus_size_t offset, type val)					\
+{									\
+	virtio_write_device_config(dev, offset, &val, sizeof(type));	\
+}
+
+VIRTIO_RDWR_DEVICE_CONFIG(1, uint8_t);
+VIRTIO_RDWR_DEVICE_CONFIG(2, uint16_t);
+VIRTIO_RDWR_DEVICE_CONFIG(4, uint32_t);
+
+#undef VIRTIO_RDWR_DEVICE_CONFIG
+
+#define VIRTIO_READ_IVAR(name, ivar)					\
+static inline int							\
+__CONCAT(virtio_get_,name)(device_t dev)				\
+{									\
+	uintptr_t val;							\
+	virtio_read_ivar(dev, ivar, &val);				\
+	return ((int) val);						\
+}
+
+VIRTIO_READ_IVAR(device_type,	VIRTIO_IVAR_DEVTYPE);
+VIRTIO_READ_IVAR(vendor,	VIRTIO_IVAR_VENDOR);
+VIRTIO_READ_IVAR(device,	VIRTIO_IVAR_DEVICE);
+VIRTIO_READ_IVAR(subvendor,	VIRTIO_IVAR_SUBVENDOR);
+VIRTIO_READ_IVAR(subdevice,	VIRTIO_IVAR_SUBDEVICE);
+
+#undef VIRTIO_READ_IVAR
+
+#define VIRTIO_WRITE_IVAR(name, ivar)					\
+static inline void							\
+__CONCAT(virtio_set_,name)(device_t dev, void *val)			\
+{									\
+	virtio_write_ivar(dev, ivar, (uintptr_t) val);			\
+}
+
+VIRTIO_WRITE_IVAR(feature_desc,	VIRTIO_IVAR_FEATURE_DESC);
+
+#undef VIRTIO_WRITE_IVAR
+
+#endif /* _VIRTIO_H_ */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_net.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_net.h
new file mode 100644
index 0000000..f6604cd
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_net.h
@@ -0,0 +1,218 @@
+/**
+ * @file virtio_net.h
+ * @brief Header for if_vtnet.c
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this
software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE
LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/network/virtio_net.h 255111
2013-09-01 04:23:54Z bryanv $
+ */
+
+#ifndef _VIRTIO_NET_H
+#define _VIRTIO_NET_H
+
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM	0x00001 /* Host handles pkts w/ partial
csum */
+#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial
csum*/
+#define VIRTIO_NET_F_MAC	0x00020 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO	0x00040 /* Host handles pkts w/ any GSO
type */
+#define VIRTIO_NET_F_GUEST_TSO4	0x00080 /* Guest can handle TSOv4
in. */
+#define VIRTIO_NET_F_GUEST_TSO6	0x00100 /* Guest can handle TSOv6
in. */
+#define VIRTIO_NET_F_GUEST_ECN	0x00200 /* Guest can handle TSO[6] w/ ECN
in.*/
+#define VIRTIO_NET_F_GUEST_UFO	0x00400 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4	0x00800 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6	0x01000 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN	0x02000 /* Host can handle TSO[6] w/ ECN
in. */
+#define VIRTIO_NET_F_HOST_UFO	0x04000 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF	0x08000 /* Host can merge receive buffers.
*/
+#define VIRTIO_NET_F_STATUS	0x10000 /* virtio_net_config.status
available*/
+#define VIRTIO_NET_F_CTRL_VQ	0x20000 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX	0x40000 /* Control channel RX mode support
*/
+#define VIRTIO_NET_F_CTRL_VLAN	0x80000 /* Control channel VLAN filtering
*/
+#define VIRTIO_NET_F_CTRL_RX_EXTRA 0x100000 /* Extra RX mode control
support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 0x200000 /* Announce device on
network */
+#define VIRTIO_NET_F_MQ		0x400000 /* Device supports RFS */
+#define VIRTIO_NET_F_CTRL_MAC_ADDR 0x800000 /* Set MAC address */
+
+#define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
+
+struct virtio_net_config {
+	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
+	uint8_t		mac[ETHER_ADDR_LEN];
+	/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+	uint16_t	status;
+	/* Maximum number of each of transmit and receive queues;
+	 * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
+	 * Legal values are between 1 and 0x8000.
+	 */
+	uint16_t	max_virtqueue_pairs;
+} __packed;
+
+/*
+ * This is the first element of the scatter-gather list.  If you don't
+ * specify GSO or CSUM features, you can simply ignore the header.
+ */
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1	/* Use
csum_start,csum_offset*/
+#define VIRTIO_NET_HDR_F_DATA_VALID	2	/* Csum is valid */
+#define VIRTIO_NET_HDR_GSO_NONE		0	/* Not a GSO frame
*/
+#define VIRTIO_NET_HDR_GSO_TCPV4	1	/* GSO frame, IPv4 TCP
(TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP		3	/* GSO frame, IPv4 UDP
(UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6	4	/* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN		0x80	/* TCP has ECN set */q	
+
+struct virtio_net_hdr {
+	uint8_t	flags;
+	uint8_t gso_type;
+	uint16_t hdr_len;	/* Ethernet + IP + tcp/udp hdrs */
+	uint16_t gso_size;	/* Bytes to append to hdr_len per frame */
+	uint16_t csum_start;	/* Position to start checksumming from */
+	uint16_t csum_offset;	/* Offset after that to place checksum */
+};
+
+/*
+ * This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated.
+ */
+struct virtio_net_hdr_mrg_rxbuf {
+	struct virtio_net_hdr hdr;
+	uint16_t num_buffers;	/* Number of merged rx buffers */
+};
+
+/*
+ * Control virtqueue data structures
+ *
+ * The control virtqueue expects a header in the first sg entry
+ * and an ack/status response in the last entry.  Data for the
+ * command goes in between.
+ */
+struct virtio_net_ctrl_hdr {
+	uint8_t class;
+	uint8_t cmd;
+} __packed;
+
+#define VIRTIO_NET_OK	0
+#define VIRTIO_NET_ERR	1
+
+/*
+ * Control the RX mode, ie. promiscuous, allmulti, etc...
+ * All commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable.  Commands
+ * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX	0
+#define VIRTIO_NET_CTRL_RX_PROMISC	0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI	1
+#define VIRTIO_NET_CTRL_RX_ALLUNI	2
+#define VIRTIO_NET_CTRL_RX_NOMULTI	3
+#define VIRTIO_NET_CTRL_RX_NOUNI	4
+#define VIRTIO_NET_CTRL_RX_NOBCAST	5
+
+/*
+ * Control the MAC filter table.
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite.  Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists.  Each contains a 4 byte count of entries
followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses.  The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ *
+ * The ADDR_SET command requests one out scatterlist, it contains a
+ * 6 bytes MAC address. This functionality is present if the
+ * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
+ */
+struct virtio_net_ctrl_mac {
+	uint32_t	entries;
+	uint8_t		macs[][ETHER_ADDR_LEN];
+} __packed;
+
+#define VIRTIO_NET_CTRL_MAC	1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET	0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET	1
+
+/*
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filtered by the hypervisor.  Del is the
+ * opposite of add.  Both commands expect an out entry containing a 2
+ * byte VLAN ID.  VLAN filtering is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN	2
+#define VIRTIO_NET_CTRL_VLAN_ADD	0
+#define VIRTIO_NET_CTRL_VLAN_DEL	1
+
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE	3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK	0
+
+/*
+ * Control Receive Flow Steering
+ *
+ * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET enables Receive Flow
+ * Steering, specifying the number of the transmit and receive queues
+ * that will be used. After the command is consumed and acked by the
+ * device, the device will not steer new packets on receive virtqueues
+ * other than specified nor read from transmit virtqueues other than
+ * specified. Accordingly, driver should not transmit new packets on
+ * virtqueues other than specified.
+ */
+struct virtio_net_ctrl_mq {
+	uint16_t	virtqueue_pairs;
+} __packed;
+
+#define VIRTIO_NET_CTRL_MQ	4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET		0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN		1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX		0x8000
+
+#endif /* _VIRTIO_NET_H */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
new file mode 100644
index 0000000..c9b7709
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
@@ -0,0 +1,1526 @@
+/**
+ * @file virtio_pci.c
+ * @brief Driver for the virtio PCI interface
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/pci/virtio_pci.c 255110
2013-09-01 04:20:23Z bryanv $
+ */
+
+#define VTNET_LEGACY_TX
+#define RTEMS_VIRTIO_NET
+
+/* Driver for the VirtIO PCI interface. */
+
+#ifdef __i386__
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <bsp.h>
+#include <bsp/irq.h>
+
+#include <sys/mbuf.h>
+#include <sys/param.h>
+
+#include <pcibios.h>
+
+#include "virtio.h"
+#include "virtqueue.h"
+#include "virtio_pci.h"
+
+#ifdef RTEMS_VIRTIO_NET
+/* Some adaptation replacements for RTEMS */
+
+static struct vtpci_softc vtpci_softc;
+#define device_get_softc(dev) &vtpci_softc
+
+static uint8_t vtpci_read_config_1(struct vtpci_softc *sc, int offset){
+	uint8_t val;
+	inport_byte(sc->pci_io_base+offset, val);
+	return val;
+}
+static uint16_t vtpci_read_config_2(struct vtpci_softc *sc, int offset){
+	uint16_t val;
+	inport_word(sc->pci_io_base+offset, val);
+	return val;
+}
+static uint32_t vtpci_read_config_4(struct vtpci_softc *sc, int offset){
+	uint32_t val;
+	inport_long(sc->pci_io_base+offset, val);
+	return val;
+}
+
+static void vtpci_write_config_1(struct vtpci_softc *sc, int offset,
uint8_t val){
+	outport_byte(sc->pci_io_base+offset, val);
+}
+static void vtpci_write_config_2(struct vtpci_softc *sc, int offset,
uint16_t val){
+	outport_word(sc->pci_io_base+offset, val);
+}
+static void vtpci_write_config_4(struct vtpci_softc *sc, int offset,
uint32_t val){
+	outport_long(sc->pci_io_base+offset, val);
+}
+#endif
+
+#ifdef NOTUSED
+struct vtpci_interrupt {
+	struct resource		*vti_irq;
+	int			 vti_rid;
+	uint32_t		isr_number;
+	void			*vti_handler;
+};
+
+struct vtpci_virtqueue {
+	struct virtqueue	*vtv_vq;
+	int			 vtv_no_intr;
+};
+#endif
+
+#define VTPCI_FLAG_NO_MSI		0x0001
+#define VTPCI_FLAG_NO_MSIX		0x0002
+#define VTPCI_FLAG_LEGACY		0x1000
+#define VTPCI_FLAG_MSI			0x2000
+#define VTPCI_FLAG_MSIX			0x4000
+#define VTPCI_FLAG_SHARED_MSIX		0x8000
+#define VTPCI_FLAG_ITYPE_MASK		0xF000
+
+#ifdef NOTUSED
+struct vtpci_softc {
+	device_t			 vtpci_dev;
+	struct resource			*vtpci_res;
+	struct resource			*vtpci_msix_res;
+	uint64_t			 vtpci_features;
+	uint32_t			 vtpci_flags;
+
+	/* This "bus" will only ever have one child. */
+	device_t			 vtpci_child_dev;
+	struct virtio_feature_desc	*vtpci_child_feat_desc;
+
+	int				 vtpci_nvqs;
+	struct vtpci_virtqueue		*vtpci_vqs;
+
+	/*
+	 * Ideally, each virtqueue that the driver provides a callback for
will
+	 * receive its own MSIX vector. If there are not sufficient
vectors
+	 * available, then attempt to have all the VQs share one vector.
For
+	 * MSIX, the configuration changed notifications must be on their
own
+	 * vector.
+	 *
+	 * If MSIX is not available, we will attempt to have the whole
device
+	 * share one MSI vector, and then, finally, one legacy interrupt.
+	 */
+	struct vtpci_interrupt		 vtpci_device_interrupt;
+	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
+	int				 vtpci_nmsix_resources;
+};
+#endif
+
+#ifdef NOTUSED
+static int	vtpci_probe(device_t);
+static int	vtpci_detach(device_t);
+static int	vtpci_suspend(device_t);
+static int	vtpci_resume(device_t);
+static int	vtpci_shutdown(device_t);
+static void	vtpci_driver_added(device_t, driver_t *);
+static void	vtpci_child_detached(device_t, device_t);
+static int	vtpci_read_config_ivar(device_t, device_t, int, uintptr_t
*);
+static int	vtpci_write_ivar(device_t, device_t, int, uintptr_t);
+
+static uint64_t	vtpci_negotiate_features(device_t, uint64_t);
+static int	vtpci_with_feature(device_t, uint64_t);
+static int	vtpci_alloc_virtqueues(device_t, int, int,
+		    struct vq_alloc_info *);
+static int	vtpci_setup_intr(device_t, enum intr_type);
+static void	vtpci_stop(device_t);
+static int	vtpci_reinit(device_t, uint64_t);
+static void	vtpci_reinit_complete(device_t);
+static void	vtpci_notify_virtqueue(device_t, uint16_t);
+static void	vtpci_read_dev_config(device_t, bus_size_t, void *, int);
+static void	vtpci_write_dev_config(device_t, bus_size_t, void *, int);
+
+static int	vtpci_alloc_msix(struct vtpci_softc *, int);
+static int	vtpci_alloc_msi(struct vtpci_softc *);
+static int	vtpci_alloc_intr_msix_pervq(struct vtpci_softc *);
+static int	vtpci_alloc_intr_msix_shared(struct vtpci_softc *);
+static int	vtpci_alloc_intr_msi(struct vtpci_softc *);
+
+static int	vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *,
+		    enum intr_type);
+static int	vtpci_setup_msix_interrupts(struct vtpci_softc *,
+		    enum intr_type);
+
+static int	vtpci_register_msix_vector(struct vtpci_softc *, int,
+		    struct vtpci_interrupt *);
+static int	vtpci_set_host_msix_vectors(struct vtpci_softc *);
+
+static void	vtpci_release_child_resources(struct vtpci_softc *);
+
+static int	vtpci_vq_shared_intr_filter(void *);
+static void	vtpci_vq_shared_intr(void *);
+static int	vtpci_vq_intr_filter(void *);
+static void	vtpci_vq_intr(void *);
+
+#define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt
+
+/*
+ * I/O port read/write wrappers.
+ */
+#define vtpci_read_config_1(sc, o)	bus_read_config_1((sc)->vtpci_res,
(o))
+#define vtpci_read_config_2(sc, o)	bus_read_config_2((sc)->vtpci_res,
(o))
+#define vtpci_read_config_4(sc, o)	bus_read_config_4((sc)->vtpci_res,
(o))
+#define vtpci_write_config_1(sc, o, v)	bus_write_1((sc)->vtpci_res, (o),
(v))
+#define vtpci_write_config_2(sc, o, v)	bus_write_2((sc)->vtpci_res, (o),
(v))
+#define vtpci_write_config_4(sc, o, v)	bus_write_4((sc)->vtpci_res, (o),
(v))
+#endif
+
+static int	vtpci_attach(device_t);
+
+static uint8_t	vtpci_get_status(device_t);
+static void	vtpci_set_status(device_t, uint8_t);
+
+static void	vtpci_describe_features(struct vtpci_softc *, const char
*,
+		    uint64_t);
+static void	vtpci_probe_and_attach_child(struct vtpci_softc *);
+
+static int	vtpci_alloc_intr_legacy(struct vtpci_softc *);
+static int	vtpci_alloc_interrupt(struct vtpci_softc *, int, int,
+		    struct vtpci_interrupt *);
+static int	vtpci_alloc_intr_resources(struct vtpci_softc *);
+
+static int	vtpci_setup_legacy_interrupt(struct vtpci_softc *,
+		    enum intr_type);
+static int	vtpci_setup_interrupts(struct vtpci_softc *, enum
intr_type);
+		    
+static int	vtpci_reinit_virtqueue(struct vtpci_softc *, int);
+
+static void	vtpci_free_interrupt(struct vtpci_softc *,
+		    struct vtpci_interrupt *);
+static void	vtpci_free_interrupts(struct vtpci_softc *);
+static void	vtpci_free_virtqueues(struct vtpci_softc *);
+static void	vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *);
+static void	vtpci_reset(struct vtpci_softc *);
+
+static void	vtpci_select_virtqueue(struct vtpci_softc *, int);
+
+static void	vtpci_legacy_intr(void *);
+static void	vtpci_config_intr(void *);
+
+#ifdef NOTUSED
+/* Tunables. */
+static int vtpci_disable_msix = 0;
+TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix);
+
+static device_method_t vtpci_methods[] = {
+	/* Device interface. */
+	DEVMETHOD(device_probe,			  vtpci_probe),
+	DEVMETHOD(device_attach,		  vtpci_attach),
+	DEVMETHOD(device_detach,		  vtpci_detach),
+	DEVMETHOD(device_suspend,		  vtpci_suspend),
+	DEVMETHOD(device_resume,		  vtpci_resume),
+	DEVMETHOD(device_shutdown,		  vtpci_shutdown),
+
+	/* Bus interface. */
+	DEVMETHOD(bus_driver_added,		  vtpci_driver_added),
+	DEVMETHOD(bus_child_detached,		  vtpci_child_detached),
+	DEVMETHOD(bus_read_config_ivar,		  vtpci_read_config_ivar),
+	DEVMETHOD(bus_write_ivar,		  vtpci_write_ivar),
+
+	/* VirtIO bus interface. */
+	DEVMETHOD(virtio_bus_negotiate_features,
vtpci_negotiate_features),
+	DEVMETHOD(virtio_bus_with_feature,	  vtpci_with_feature),
+	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtpci_alloc_virtqueues),
+	DEVMETHOD(virtio_bus_setup_intr,	  vtpci_setup_intr),
+	DEVMETHOD(virtio_bus_stop,		  vtpci_stop),
+	DEVMETHOD(virtio_bus_reinit,		  vtpci_reinit),
+	DEVMETHOD(virtio_bus_reinit_complete,	  vtpci_reinit_complete),
+	DEVMETHOD(virtio_bus_notify_vq,		  vtpci_notify_virtqueue),
+	DEVMETHOD(virtio_bus_read_device_config,  vtpci_read_dev_config),
+	DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config),
+
+	DEVMETHOD_END
+};
+
+static driver_t vtpci_driver = {
+	"virtio_pci",
+	vtpci_methods,
+	sizeof(struct vtpci_softc)
+};
+
+devclass_t vtpci_devclass;
+
+DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0);
+MODULE_VERSION(virtio_pci, 1);
+MODULE_DEPEND(virtio_pci, pci, 1, 1, 1);
+MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1);
+
+static int
+vtpci_probe(device_t dev)
+{
+	char desc[36];
+	const char *name;
+
+	if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
+		return (ENXIO);
+
+	if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN ||
+	    pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX)
+		return (ENXIO);
+
+	if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION)
+		return (ENXIO);
+
+	name = virtio_device_name(pci_get_subdevice(dev));
+	if (name == NULL)
+		name = "Unknown";
+
+	snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name);
+	device_set_desc_copy(dev, desc);
+
+	return (BUS_PROBE_DEFAULT);
+}
+#endif
+
+#ifdef RTEMS_VIRTIO_NET
+int rtems_vtpci_attach(
+  struct rtems_bsdnet_ifconfig *config,
+  struct vtpci_softc **xsc
+)
+{
+	struct vtpci_softc *sc;
+	int      error, i, ret;
+	uint32_t val32;
+
+	*xsc = &vtpci_softc;
+	sc = &vtpci_softc;
+
+	/* Parse NIC_NAME & Init structures */
+	if ( ( sc->unit_number =
+			rtems_bsdnet_parse_driver_name( config,
&sc->unit_name ) ) < 0 ) {
+		return 0;
+	}
+
+	/* Find device on pci bus */
+	{
+	int pbus, pdev, pfun;
+
+	for ( i = VIRTIO_PCI_DEVICEID_MIN; i < VIRTIO_PCI_DEVICEID_MAX;
i++ ) {
+		ret = pci_find_device( VIRTIO_PCI_VENDORID, i,
sc->unit_number,
+		&pbus, &pdev, &pfun );
+
+		if ( ret == PCIB_ERR_SUCCESS ) {
+			sc->pci_signature = PCIB_DEVSIG_MAKE( pbus, pdev,
pfun );
+			break;
+		}
+	}
+	}
+
+	/* Get IO Address */
+	pcib_conf_read32( sc->pci_signature, PCI_BASE_ADDRESS_0, &val32 );
+	val32 &= PCI_BASE_ADDRESS_IO_MASK;
+	sc->pci_io_base = val32;
+
+	error = vtpci_attach(1);
+	
+	return (error);
+}
+#endif
+
+static int
+vtpci_attach(device_t dev)
+{
+	struct vtpci_softc *sc;
+	device_t child;
+	int rid;
+	
+	sc = device_get_softc(dev);
+	sc->vtpci_dev = dev;
+#ifdef NOTUSED
+
+	pci_enable_busmaster(dev);
+	
+	rid = PCIR_BAR(0);
+	sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
+	    RF_ACTIVE);
+	if (sc->vtpci_res == NULL) {
+		device_printf(dev, "cannot map I/O space\n");
+		return (ENXIO);
+	}
+
+	if (pci_find_cap(dev, PCIY_MSI, NULL) != 0)
+		sc->vtpci_flags |= VTPCI_FLAG_NO_MSI;
+
+	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
+		rid = PCIR_BAR(1);
+		sc->vtpci_msix_res = bus_alloc_resource_any(dev,
+		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
+	}
+
+	if (sc->vtpci_msix_res == NULL)
+		sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	uint16_t val16;
+	
+	pcib_conf_read16( sc->pci_signature, PCI_COMMAND, &val16 );
+	val16 |= PCI_COMMAND_MASTER;
+	pcib_conf_write16( sc->pci_signature, PCI_COMMAND, val16 );
+#endif
+
+	vtpci_reset(sc);
+
+	/* Tell the host we've noticed this device. */
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+
+#ifdef NOTUSED
+	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
+		device_printf(dev, "cannot create child device\n");
+		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
+		vtpci_detach(dev);
+		return (ENOMEM);
+	}
+
+	sc->vtpci_child_dev = child;
+#endif
+	vtpci_probe_and_attach_child(sc);
+
+	return (0);
+}
+
+#ifdef NOTUSED
+static int
+vtpci_detach(device_t dev)
+{
+	struct vtpci_softc *sc;
+	device_t child;
+	int error;
+
+	sc = device_get_softc(dev);
+
+	if ((child = sc->vtpci_child_dev) != NULL) {
+		error = device_delete_child(dev, child);
+		if (error)
+			return (error);
+		sc->vtpci_child_dev = NULL;
+	}
+
+	vtpci_reset(sc);
+
+	if (sc->vtpci_msix_res != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1),
+		    sc->vtpci_msix_res);
+		sc->vtpci_msix_res = NULL;
+	}
+
+	if (sc->vtpci_res != NULL) {
+		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0),
+		    sc->vtpci_res);
+		sc->vtpci_res = NULL;
+	}
+
+	return (0);
+}
+
+static int
+vtpci_suspend(device_t dev)
+{
+
+	return (bus_generic_suspend(dev));
+}
+
+static int
+vtpci_resume(device_t dev)
+{
+
+	return (bus_generic_resume(dev));
+}
+
+int
+vtpci_shutdown(device_t dev)
+{
+
+	(void) bus_generic_shutdown(dev);
+	/* Forcibly stop the host device. */
+	vtpci_stop(dev);
+
+	return (0);
+}
+
+static void
+vtpci_driver_added(device_t dev, driver_t *driver)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	vtpci_probe_and_attach_child(sc);
+}
+
+static void
+vtpci_child_detached(device_t dev, device_t child)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	vtpci_reset(sc);
+	vtpci_release_child_resources(sc);
+}
+
+static int
+vtpci_read_config_ivar(device_t dev, device_t child, int index, uintptr_t
*result)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	if (sc->vtpci_child_dev != child)
+		return (ENOENT);
+
+	switch (index) {
+	case VIRTIO_IVAR_DEVTYPE:
+	case VIRTIO_IVAR_SUBDEVICE:
+		*result = pci_get_subdevice(dev);
+		break;
+	case VIRTIO_IVAR_VENDOR:
+		*result = pci_get_vendor(dev);
+		break;
+	case VIRTIO_IVAR_DEVICE:
+		*result = pci_get_device(dev);
+		break;
+	case VIRTIO_IVAR_SUBVENDOR:
+		*result = pci_get_subdevice(dev);
+		break;
+	default:
+		return (ENOENT);
+	}
+
+	return (0);
+}
+
+static int
+vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t
value)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	if (sc->vtpci_child_dev != child)
+		return (ENOENT);
+
+	switch (index) {
+	case VIRTIO_IVAR_FEATURE_DESC:
+		sc->vtpci_child_feat_desc = (void *) value;
+		break;
+	default:
+		return (ENOENT);
+	}
+
+	return (0);
+}
+#endif
+
+uint64_t
+vtpci_negotiate_features(device_t dev, uint64_t child_features)
+{
+	struct vtpci_softc *sc;
+	uint64_t host_features, features;
+
+	sc = device_get_softc(dev);
+
+	host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES);
+	vtpci_describe_features(sc, "host", host_features);
+
+	/*
+	 * Limit negotiated features to what the driver, virtqueue, and
+	 * host all support.
+	 */
+	features = host_features & child_features;
+	features = virtqueue_filter_features(features);
+	sc->vtpci_features = features;
+
+	vtpci_describe_features(sc, "negotiated", features);
+	vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features);
+
+	return (features);
+}
+
+int
+vtpci_with_feature(device_t dev, uint64_t feature)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	return ((sc->vtpci_features & feature) != 0);
+}
+
+int
+vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs,
+    struct vq_alloc_info *vq_info)
+{
+	struct vtpci_softc *sc;
+	struct virtqueue *vq;
+	struct vtpci_virtqueue *vqx;
+	struct vq_alloc_info *info;
+	int idx, error;
+	uint16_t size;
+
+	sc = device_get_softc(dev);
+
+	if (sc->vtpci_nvqs != 0)
+#ifdef NOTUSED
+		return (EALREADY);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+		return (EINVAL);
+#endif
+	if (nvqs <= 0)
+		return (EINVAL);
+
+	sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue),
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (sc->vtpci_vqs == NULL)
+		return (ENOMEM);
+
+	for (idx = 0; idx < nvqs; idx++) {
+		vqx = &sc->vtpci_vqs[idx];
+		info = &vq_info[idx];
+
+		vtpci_select_virtqueue(sc, idx);
+		size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
+
+		error = virtqueue_alloc(dev, idx, size,
VIRTIO_PCI_VRING_ALIGN,
+		    0xFFFFFFFFUL, info, &vq);
+		if (error) {
+			device_printf(dev,
+			    "cannot allocate virtqueue %d: %d\n", idx,
error);
+			break;
+		}
+
+		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
+		    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+
+		vqx->vtv_vq = *info->vqai_vq = vq;
+		vqx->vtv_no_intr = info->vqai_intr == NULL;
+
+		sc->vtpci_nvqs++;
+	}
+
+	if (error)
+		vtpci_free_virtqueues(sc);
+
+	return (error);
+}
+
+int
+vtpci_setup_intr(device_t dev, enum intr_type type)
+{
+	struct vtpci_softc *sc;
+	int attempt, error;
+
+	sc = device_get_softc(dev);
+
+	for (attempt = 0; attempt < 5; attempt++) {
+		/*
+		 * Start with the most desirable interrupt configuration
and
+		 * fallback towards less desirable ones.
+		 */
+		switch (attempt) {
+#ifdef NOTUSED
+		case 0:
+			error = vtpci_alloc_intr_msix_pervq(sc);
+			break;
+		case 1:
+			error = vtpci_alloc_intr_msix_shared(sc);
+			break;
+		case 2:
+			error = vtpci_alloc_intr_msi(sc);
+			break;
+		case 3:
+			error = vtpci_alloc_intr_legacy(sc);
+			break;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+		case 0:
+			error = vtpci_alloc_intr_legacy(sc);
+			break;
+#endif
+		default:
+			device_printf(dev,
+			    "exhausted all interrupt allocation
attempts\n");
+			return (ENXIO);
+		}
+
+		if (error == 0 && vtpci_setup_interrupts(sc, type) == 0)
+			break;
+
+		vtpci_cleanup_setup_intr_attempt(sc);
+	}
+
+#ifdef NOTUSED
+	if (bootverbose) {  
+		if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
+			device_printf(dev, "using legacy interrupt\n");
+		else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
+			device_printf(dev, "using MSI interrupt\n");
+		else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX)
+			device_printf(dev, "using shared MSIX
interrupts\n");
+		else
+			device_printf(dev, "using per VQ MSIX
interrupts\n");
+	}
+#endif
+
+	return (0);
+}
+
+void
+vtpci_stop(device_t dev)
+{
+
+	vtpci_reset(device_get_softc(dev));
+}
+
+int
+vtpci_reinit(device_t dev, uint64_t features)
+{
+	struct vtpci_softc *sc;
+	int idx, error;
+
+	sc = device_get_softc(dev);
+
+	/*
+	 * Redrive the device initialization. This is a bit of an abuse of
+	 * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to
+	 * play nice.
+	 *
+	 * We do not allow the host device to change from what was
originally
+	 * negotiated beyond what the guest driver changed. MSIX state
should
+	 * not change, number of virtqueues and their size remain the
same, etc.
+	 * This will need to be rethought when we want to support
migration.
+	 */
+
+	if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
+		vtpci_stop(dev);
+
+	/*
+	 * Quickly drive the status through ACK and DRIVER. The device
+	 * does not become usable again until vtpci_reinit_complete().
+	 */
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
+
+	vtpci_negotiate_features(dev, features);
+
+	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
+		error = vtpci_reinit_virtqueue(sc, idx);
+		if (error)
+			return (error);
+	}
+
+#ifdef NOTUSED
+	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
+		error = vtpci_set_host_msix_vectors(sc);
+		if (error)
+			return (error);
+	}
+#endif
+
+	return (0);
+}
+
+void
+vtpci_reinit_complete(device_t dev)
+{
+
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_notify_virtqueue(device_t dev, uint16_t queue)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue);
+}
+
+static uint8_t
+vtpci_get_status(device_t dev)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS));
+}
+
+static void
+vtpci_set_status(device_t dev, uint8_t status)
+{
+	struct vtpci_softc *sc;
+
+	sc = device_get_softc(dev);
+
+	if (status != VIRTIO_CONFIG_STATUS_RESET)
+		status |= vtpci_get_status(dev);
+
+	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
+}
+
+void
+vtpci_read_dev_config(device_t dev, bus_size_t offset,
+    void *dst, int length)
+{
+	struct vtpci_softc *sc;
+	bus_size_t off;
+	uint8_t *d;
+	int size;
+
+	sc = device_get_softc(dev);
+	off = VIRTIO_PCI_CONFIG(sc) + offset;
+
+	for (d = dst; length > 0; d += size, off += size, length -= size)
{
+		if (length >= 4) {
+			size = 4;
+			*(uint32_t *)d = vtpci_read_config_4(sc, off);
+		} else if (length >= 2) {
+			size = 2;
+			*(uint16_t *)d = vtpci_read_config_2(sc, off);
+		} else {
+			size = 1;
+			*d = vtpci_read_config_1(sc, off);
+		}
+	}
+}
+
+void
+vtpci_write_dev_config(device_t dev, bus_size_t offset,
+    void *src, int length)
+{
+	struct vtpci_softc *sc;
+	bus_size_t off;
+	uint8_t *s;
+	int size;
+
+	sc = device_get_softc(dev);
+	off = VIRTIO_PCI_CONFIG(sc) + offset;
+
+	for (s = src; length > 0; s += size, off += size, length -= size)
{
+		if (length >= 4) {
+			size = 4;
+			vtpci_write_config_4(sc, off, *(uint32_t *)s);
+		} else if (length >= 2) {
+			size = 2;
+			vtpci_write_config_2(sc, off, *(uint16_t *)s);
+		} else {
+			size = 1;
+			vtpci_write_config_1(sc, off, *s);
+		}
+	}
+}
+
+static void
+vtpci_describe_features(struct vtpci_softc *sc, const char *msg,
+    uint64_t features)
+{
+	device_t dev, child;
+
+	dev = sc->vtpci_dev;
+#ifdef NOTUSED
+	child = sc->vtpci_child_dev;
+
+	if (device_is_attached(child) && bootverbose == 0)
+		return;
+#endif
+
+	virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc);
+}
+
+static void
+vtpci_probe_and_attach_child(struct vtpci_softc *sc)
+{
+	device_t dev, child;
+
+	dev = sc->vtpci_dev;
+#ifdef NOTUSED
+	child = sc->vtpci_child_dev;
+
+	if (child == NULL)
+		return;
+
+	if (device_get_state(child) != DS_NOTPRESENT)
+		return;
+
+	if (device_probe(child) != 0)
+		return;
+#endif
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
+#ifdef RTEMS_VIRTIO_NET
+	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+#endif
+#ifdef NOTUSED
+	if (device_attach(child) != 0) {
+		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
+		vtpci_reset(sc);
+		vtpci_release_child_resources(sc);
+		/* Reset status for future attempt. */
+		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
+	} else {
+		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+		VIRTIO_ATTACH_COMPLETED(child);
+	}
+#endif
+}
+
+#ifdef NOTUSED
+static int
+vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors)
+{
+	device_t dev;
+	int nmsix, cnt, required;
+
+	dev = sc->vtpci_dev;
+
+	/* Allocate an additional vector for the config changes. */
+	required = nvectors + 1;
+
+	nmsix = pci_msix_count(dev);
+	if (nmsix < required)
+		return (1);
+
+	cnt = required;
+	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
+		sc->vtpci_nmsix_resources = required;
+		return (0);
+	}
+
+	pci_release_msi(dev);
+
+	return (1);
+}
+
+static int
+vtpci_alloc_msi(struct vtpci_softc *sc)
+{
+	device_t dev;
+	int nmsi, cnt, required;
+
+	dev = sc->vtpci_dev;
+	required = 1;
+
+	nmsi = pci_msi_count(dev);
+	if (nmsi < required)
+		return (1);
+
+	cnt = required;
+	if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required)
+		return (0);
+
+	pci_release_msi(dev);
+
+	return (1);
+}
+
+static int
+vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc)
+{
+	int i, nvectors, error;
+
+	if (vtpci_disable_msix != 0 ||
+	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
+		return (ENOTSUP);
+
+	for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) {
+		if (sc->vtpci_vqs[i].vtv_no_intr == 0)
+			nvectors++;
+	}
+
+	error = vtpci_alloc_msix(sc, nvectors);
+	if (error)
+		return (error);
+
+	sc->vtpci_flags |= VTPCI_FLAG_MSIX;
+
+	return (0);
+}
+
+static int
+vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc)
+{
+	int error;
+
+	if (vtpci_disable_msix != 0 ||
+	    sc->vtpci_flags & VTPCI_FLAG_NO_MSIX)
+		return (ENOTSUP);
+
+	error = vtpci_alloc_msix(sc, 1);
+	if (error)
+		return (error);
+
+	sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX;
+
+	return (0);
+}
+
+static int
+vtpci_alloc_intr_msi(struct vtpci_softc *sc)
+{
+	int error;
+
+	/* Only BHyVe supports MSI. */
+	if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI)
+		return (ENOTSUP);
+
+	error = vtpci_alloc_msi(sc);
+	if (error)
+		return (error);
+
+	sc->vtpci_flags |= VTPCI_FLAG_MSI;
+
+	return (0);
+}
+#endif
+
+static int
+vtpci_alloc_intr_legacy(struct vtpci_softc *sc)
+{
+
+	sc->vtpci_flags |= VTPCI_FLAG_LEGACY;
+
+	return (0);
+}
+
+static int
+vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags,
+    struct vtpci_interrupt *intr)
+{
+#ifdef NOTUSED
+	struct resource *irq;
+
+	irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid,
flags);
+	if (irq == NULL)
+		return (ENXIO);
+
+	intr->vti_irq = irq;
+	intr->vti_rid = rid;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	uint8_t val8;
+	pcib_conf_read8( sc->pci_signature, PCI_INTERRUPT_LINE, &val8 );
+	intr->isr_number = val8;
+#endif
+
+	return (0);
+}
+
+static int
+vtpci_alloc_intr_resources(struct vtpci_softc *sc)
+{
+	struct vtpci_interrupt *intr;
+	int i, rid, flags, nvq_intrs, error;
+
+	rid = 0;
+#ifdef NOTUSED
+	flags = RF_ACTIVE;
+
+	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
+		flags |= RF_SHAREABLE;
+	else
+		rid = 1;
+#endif
+
+	/*
+	 * For legacy and MSI interrupts, this single resource handles all
+	 * interrupts. For MSIX, this resource is used for the
configuration
+	 * changed interrupt.
+	 */
+	intr = &sc->vtpci_device_interrupt;
+	error = vtpci_alloc_interrupt(sc, rid, flags, intr);
+	if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY |
VTPCI_FLAG_MSI))
+		return (error);
+
+#ifdef NOTUSED
+	/* Subtract one for the configuration changed interrupt. */
+	nvq_intrs = sc->vtpci_nmsix_resources - 1;
+
+	intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs *
+	    sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (sc->vtpci_msix_vq_interrupts == NULL)
+		return (ENOMEM);
+
+	for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) {
+		error = vtpci_alloc_interrupt(sc, rid, flags, intr);
+		if (error)
+			return (error);
+	}
+#endif
+
+	return (0);
+}
+
+static int
+vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type)
+{
+	struct vtpci_interrupt *intr;
+	int error;
+
+	intr = &sc->vtpci_device_interrupt;
+#ifdef NOTUSED
+	error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL,
+	    vtpci_legacy_intr, sc, &intr->vti_handler);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	error = rtems_interrupt_handler_install(
+		intr->isr_number,
+		NULL,
+		RTEMS_INTERRUPT_SHARED,
+		(rtems_interrupt_handler) vtpci_legacy_intr,
+		sc);
+	intr->vti_handler = vtpci_legacy_intr;
+#endif
+
+	return (error);
+}
+
+#ifdef NOTUSED
+static int
+vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type
type)
+{
+	struct vtpci_virtqueue *vqx;
+	struct vtpci_interrupt *intr;
+	int i, error;
+
+	intr = sc->vtpci_msix_vq_interrupts;
+
+	for (i = 0; i < sc->vtpci_nvqs; i++) {
+		vqx = &sc->vtpci_vqs[i];
+
+		if (vqx->vtv_no_intr)
+			continue;
+
+		error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type,
+		    vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq,
+		    &intr->vti_handler);
+		if (error)
+			return (error);
+
+		intr++;
+	}
+
+	return (0);
+}
+
+static int
+vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type)
+{
+	device_t dev;
+	struct vtpci_interrupt *intr;
+	int error;
+
+	dev = sc->vtpci_dev;
+	intr = &sc->vtpci_device_interrupt;
+
+	error = bus_setup_intr(dev, intr->vti_irq, type, NULL,
+	    vtpci_config_intr, sc, &intr->vti_handler);
+	if (error)
+		return (error);
+
+	if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) {
+		intr = sc->vtpci_msix_vq_interrupts;
+		error = bus_setup_intr(dev, intr->vti_irq, type,
+		    vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc,
+		    &intr->vti_handler);
+	} else
+		error = vtpci_setup_pervq_msix_interrupts(sc, type);
+
+	return (error ? error : vtpci_set_host_msix_vectors(sc));
+}
+#endif
+
+static int
+vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type)
+{
+	int error;
+
+	type |= INTR_MPSAFE;
+	KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK,
+	    ("%s: no interrupt type selected %#x", __func__,
sc->vtpci_flags));
+
+	error = vtpci_alloc_intr_resources(sc);
+	if (error)
+		return (error);
+
+	if (sc->vtpci_flags & VTPCI_FLAG_LEGACY)
+		error = vtpci_setup_legacy_interrupt(sc, type);
+#ifdef NOTUSED
+	else if (sc->vtpci_flags & VTPCI_FLAG_MSI)
+		error = vtpci_setup_msi_interrupt(sc, type);
+	else
+		error = vtpci_setup_msix_interrupts(sc, type);
+#endif
+
+	return (error);
+}
+
+#ifdef NOTUSED
+static int
+vtpci_register_msix_vector(struct vtpci_softc *sc, int offset,
+    struct vtpci_interrupt *intr)
+{
+	device_t dev;
+	uint16_t vector;
+
+	dev = sc->vtpci_dev;
+
+	if (intr != NULL) {
+		/* Map from guest rid to host vector. */
+		vector = intr->vti_rid - 1;
+	} else
+		vector = VIRTIO_MSI_NO_VECTOR;
+
+	vtpci_write_config_2(sc, offset, vector);
+
+	/* Read vector to determine if the host had sufficient resources.
*/
+	if (vtpci_read_config_2(sc, offset) != vector) {
+		device_printf(dev,
+		    "insufficient host resources for MSIX interrupts\n");
+		return (ENODEV);
+	}
+
+	return (0);
+}
+
+static int
+vtpci_set_host_msix_vectors(struct vtpci_softc *sc)
+{
+	struct vtpci_interrupt *intr, *tintr;
+	int idx, offset, error;
+
+	intr = &sc->vtpci_device_interrupt;
+	offset = VIRTIO_MSI_CONFIG_VECTOR;
+
+	error = vtpci_register_msix_vector(sc, offset, intr);
+	if (error)
+		return (error);
+
+	intr = sc->vtpci_msix_vq_interrupts;
+	offset = VIRTIO_MSI_QUEUE_VECTOR;
+
+	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
+		vtpci_select_virtqueue(sc, idx);
+
+		if (sc->vtpci_vqs[idx].vtv_no_intr)
+			tintr = NULL;
+		else
+			tintr = intr;
+
+		error = vtpci_register_msix_vector(sc, offset, tintr);
+		if (error)
+			break;
+
+		/*
+		 * For shared MSIX, all the virtqueues share the first
+		 * interrupt.
+		 */
+		if ((sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0)
+			intr++;
+	}
+
+	return (error);
+}
+#endif
+
+static int
+vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx)
+{
+	struct vtpci_virtqueue *vqx;
+	struct virtqueue *vq;
+	int error;
+	uint16_t size;
+
+	vqx = &sc->vtpci_vqs[idx];
+	vq = vqx->vtv_vq;
+
+	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
+
+	vtpci_select_virtqueue(sc, idx);
+	size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM);
+
+	error = virtqueue_reinit(vq, size);
+	if (error)
+		return (error);
+
+	vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN,
+	    virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
+
+	return (0);
+}
+
+static void
+vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt
*intr)
+{
+	device_t dev;
+
+	dev = sc->vtpci_dev;
+
+#ifdef NOTUSED
+	if (intr->vti_handler != NULL) {
+		bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler);
+		intr->vti_handler = NULL;
+	}
+
+	if (intr->vti_irq != NULL) {
+		bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid,
+		    intr->vti_irq);
+		intr->vti_irq = NULL;
+		intr->vti_rid = -1;
+	}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	if( intr->isr_number != 0 ){
+		rtems_interrupt_handler_remove(
+			intr->isr_number,
+			(rtems_interrupt_handler) vtpci_legacy_intr,
+			NULL);
+		intr->vti_handler = NULL;
+	}
+#endif
+}
+
+static void
+vtpci_free_interrupts(struct vtpci_softc *sc)
+{
+	struct vtpci_interrupt *intr;
+	int i, nvq_intrs;
+
+	vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt);
+
+	if (sc->vtpci_nmsix_resources != 0) {
+		nvq_intrs = sc->vtpci_nmsix_resources - 1;
+		sc->vtpci_nmsix_resources = 0;
+
+		intr = sc->vtpci_msix_vq_interrupts;
+		if (intr != NULL) {
+			for (i = 0; i < nvq_intrs; i++, intr++)
+				vtpci_free_interrupt(sc, intr);
+
+			free(sc->vtpci_msix_vq_interrupts, M_DEVBUF);
+			sc->vtpci_msix_vq_interrupts = NULL;
+		}
+	}
+
+#ifdef NOTUSED
+	if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX))
+		pci_release_msi(sc->vtpci_dev);
+#endif
+
+	sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK;
+}
+
+static void
+vtpci_free_virtqueues(struct vtpci_softc *sc)
+{
+	struct vtpci_virtqueue *vqx;
+	int idx;
+
+	for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
+		vqx = &sc->vtpci_vqs[idx];
+
+		vtpci_select_virtqueue(sc, idx);
+		vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0);
+
+		virtqueue_free(vqx->vtv_vq);
+		vqx->vtv_vq = NULL;
+	}
+
+	free(sc->vtpci_vqs, M_DEVBUF);
+	sc->vtpci_vqs = NULL;
+	sc->vtpci_nvqs = 0;
+}
+
+#ifdef NOTUSED
+static void
+vtpci_release_child_resources(struct vtpci_softc *sc)
+{
+
+	vtpci_free_interrupts(sc);
+	vtpci_free_virtqueues(sc);
+}
+#endif
+
+static void
+vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc)
+{
+	int idx;
+
+	if (sc->vtpci_flags & VTPCI_FLAG_MSIX) {
+		vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR,
+		    VIRTIO_MSI_NO_VECTOR);
+
+		for (idx = 0; idx < sc->vtpci_nvqs; idx++) {
+			vtpci_select_virtqueue(sc, idx);
+			vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR,
+			    VIRTIO_MSI_NO_VECTOR);
+		}
+	}
+
+	vtpci_free_interrupts(sc);
+}
+
+static void
+vtpci_reset(struct vtpci_softc *sc)
+{
+
+	/*
+	 * Setting the status to RESET sets the host device to
+	 * the original, uninitialized state.
+	 */
+	vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET);
+}
+
+static void
+vtpci_select_virtqueue(struct vtpci_softc *sc, int idx)
+{
+
+	vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx);
+}
+
+static void
+vtpci_legacy_intr(void *xsc)
+{
+	struct vtpci_softc *sc;
+	struct vtpci_virtqueue *vqx;
+	int i;
+	uint8_t isr;
+
+	sc = xsc;
+	vqx = &sc->vtpci_vqs[0];
+	
+	/* Reading the ISR also clears it. */
+	isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR);
+
+	if (isr & VIRTIO_PCI_ISR_CONFIG)
+		vtpci_config_intr(sc);
+
+	if (isr & VIRTIO_PCI_ISR_INTR) {
+#ifdef NOTUSED
+		for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
+			if (vqx->vtv_no_intr == 0)
+				virtqueue_intr(vqx->vtv_vq);
+		}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	    rtems_bsdnet_event_send( sc->daemonTid, RTEMS_EVENT_1 );
+#endif
+	}
+}
+
+#ifdef NOTUSED
+static int
+vtpci_vq_shared_intr_filter(void *xsc)
+{
+	struct vtpci_softc *sc;
+	struct vtpci_virtqueue *vqx;
+	int i, rc;
+
+	rc = 0;
+	sc = xsc;
+	vqx = &sc->vtpci_vqs[0];
+
+	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
+		if (vqx->vtv_no_intr == 0)
+			rc |= virtqueue_intr_filter(vqx->vtv_vq);
+	}
+
+	return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
+}
+
+static void
+vtpci_vq_shared_intr(void *xsc)
+{
+	struct vtpci_softc *sc;
+	struct vtpci_virtqueue *vqx;
+	int i;
+
+	sc = xsc;
+	vqx = &sc->vtpci_vqs[0];
+
+	for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) {
+		if (vqx->vtv_no_intr == 0)
+			virtqueue_intr(vqx->vtv_vq);
+	}
+}
+
+static int
+vtpci_vq_intr_filter(void *xvq)
+{
+	struct virtqueue *vq;
+	int rc;
+
+	vq = xvq;
+	rc = virtqueue_intr_filter(vq);
+
+	return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
+}
+static void
+vtpci_vq_intr(void *xvq)
+{
+	struct virtqueue *vq;
+
+	vq = xvq;
+	virtqueue_intr(vq);
+}
+#endif
+
+
+static void
+vtpci_config_intr(void *xsc)
+{
+	struct vtpci_softc *sc;
+	device_t child;
+
+	sc = xsc;
+	child = sc->vtpci_child_dev;
+
+#ifdef NOTUSED
+	if (child != NULL)
+		VIRTIO_CONFIG_CHANGE(child);
+#endif
+}
+
+#endif /* __i386__ */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
new file mode 100644
index 0000000..c34708e
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
@@ -0,0 +1,166 @@
+/**
+ * @file virtio_pci.h
+ * @brief Header for virtio_pci.c
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+ 
+/*-
+ * Copyright IBM Corp. 2007
+ *
+ * Authors:
+ *  Anthony Liguori  <aliguori at us.ibm.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this
software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE
LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
`
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/pci/virtio_pci.h 238360
2012-07-11 02:57:19Z grehan $
+ */
+
+#ifndef _VIRTIO_PCI_H
+#define _VIRTIO_PCI_H
+
+#ifdef RTEMS_VIRTIO_NET
+struct vtpci_interrupt {
+	uint32_t		isr_number;
+	void			*vti_handler;
+};
+
+struct vtpci_virtqueue {
+	struct virtqueue	*vtv_vq;
+	int			 vtv_no_intr;
+};
+
+struct vtpci_softc {
+	device_t			 vtpci_dev;
+#ifdef NOTUSED
+	struct resource			*vtpci_res;
+	struct resource			*vtpci_msix_res;
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	int unit_number;
+	char *unit_name;
+
+	int pci_signature;
+	uint32_t pci_io_base;
+	rtems_id daemonTid;
+#endif
+	uint64_t			 vtpci_features;
+	uint32_t			 vtpci_flags;
+
+	/* This "bus" will only ever have one child. */
+	device_t			 vtpci_child_dev;
+	struct virtio_feature_desc	*vtpci_child_feat_desc;
+
+	int				 vtpci_nvqs;
+	struct vtpci_virtqueue		*vtpci_vqs;
+
+	/*
+	 * Ideally, each virtqueue that the driver provides a callback for
will
+	 * receive its own MSIX vector. If there are not sufficient
vectors
+	 * available, then attempt to have all the VQs share one vector.
For
+	 * MSIX, the configuration changed notifications must be on their
own
+	 * vector.
+	 *
+	 * If MSIX is not available, we will attempt to have the whole
device
+	 * share one MSI vector, and then, finally, one legacy interrupt.
+	 */
+	struct vtpci_interrupt		 vtpci_device_interrupt;
+	struct vtpci_interrupt		*vtpci_msix_vq_interrupts;
+	int				 vtpci_nmsix_resources;
+};
+
+int rtems_vtpci_attach(struct rtems_bsdnet_ifconfig *config, struct
vtpci_softc **xsc);
+uint64_t vtpci_negotiate_features(device_t, uint64_t);
+int	vtpci_with_feature(device_t, uint64_t);
+int	vtpci_alloc_virtqueues(device_t, int, int,
+		    struct vq_alloc_info *);
+int	vtpci_setup_intr(device_t, enum intr_type);
+void vtpci_stop(device_t);
+int	vtpci_reinit(device_t, uint64_t);
+void vtpci_reinit_complete(device_t);
+void vtpci_notify_virtqueue(device_t, uint16_t);
+void vtpci_read_dev_config(device_t, bus_size_t, void *, int);
+void vtpci_write_dev_config(device_t, bus_size_t, void *, int);
+#endif /* RTEMS_VIRTIO_NET */
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_PCI_VENDORID	0x1AF4
+#define VIRTIO_PCI_DEVICEID_MIN	0x1000
+#define VIRTIO_PCI_DEVICEID_MAX	0x103F
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION	0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES  0  /* host's supported features (32bit,
RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32,
RW) */
+#define VIRTIO_PCI_QUEUE_PFN      8  /* physical address of VQ (32, RW)
*/
+#define VIRTIO_PCI_QUEUE_NUM      12 /* number of ring entries (16, RO)
*/
+#define VIRTIO_PCI_QUEUE_SEL      14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY	  16 /* notify host regarding VQ
(16, RW) */
+#define VIRTIO_PCI_STATUS         18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR            19 /* interrupt status register,
reading
+				      * also clears the register (8, RO)
*/
+/* Only if MSIX is enabled: */
+#define VIRTIO_MSI_CONFIG_VECTOR  20 /* configuration change vector (16,
RW) */
+#define VIRTIO_MSI_QUEUE_VECTOR   22 /* vector for selected VQ
notifications
+					(16, RW) */
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR	0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG	0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR	0xFFFF
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(sc) \
+    (((sc)->vtpci_flags & VTPCI_FLAG_MSIX) ? 24 : 20)
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT	12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN	4096
+
+#endif /* _VIRTIO_PCI_H */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_ring.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_ring.h
new file mode 100644
index 0000000..3e780c5
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_ring.h
@@ -0,0 +1,180 @@
+/**
+ * @file virtio_ring.h
+ * @brief Header for virtio.c
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * Copyright Rusty Russell IBM Corporation 2007.
+ *
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this
software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE
LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtio_ring.h 238360
2012-07-11 02:57:19Z grehan $
+ */
+
+#ifndef VIRTIO_RING_H
+#define	VIRTIO_RING_H
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT       1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE      2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT	4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer.  It's unreliable, so it's simply an
+ * optimization.  Guest will still kick if it's out of buffers. */
+#define VRING_USED_F_NO_NOTIFY  1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer.  It's unreliable, so it's
+ * simply an optimization.  */
+#define VRING_AVAIL_F_NO_INTERRUPT      1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next". */
+struct vring_desc {
+        /* Address (guest-physical). */
+        uint64_t addr;
+        /* Length. */
+        uint32_t len;
+        /* The flags as indicated above. */
+        uint16_t flags;
+        /* We chain unused descriptors via this, too. */
+        uint16_t next;
+};
+
+struct vring_avail {
+        uint16_t flags;
+        uint16_t idx;
+        uint16_t ring[0];
+};
+
+/* uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+        /* Index of start of used descriptor chain. */
+        uint32_t id;
+        /* Total length of the descriptor chain which was written to. */
+        uint32_t len;
+};
+
+struct vring_used {
+        uint16_t flags;
+        uint16_t idx;
+        struct vring_used_elem ring[0];
+};
+
+struct vring {
+	unsigned int num;
+
+	struct vring_desc *desc;
+	struct vring_avail *avail;
+	struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this.  We assume num is a power of 2.
+ *
+ * struct vring {
+ *      // The actual descriptors (16 bytes each)
+ *      struct vring_desc desc[num];
+ *
+ *      // A ring of available descriptor heads with free-running index.
+ *      __u16 avail_flags;
+ *      __u16 avail_idx;
+ *      __u16 available[num];
+ *      __u16 used_event_idx;
+ *
+ *      // Padding to the next align boundary.
+ *      char pad[];
+ *
+ *      // A ring of used descriptor heads with free-running index.
+ *      __u16 used_flags;
+ *      __u16 used_idx;
+ *      struct vring_used_elem used[num];
+ *      __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and
vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr)	((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr)	(*(uint16_t
*)&(vr)->used->ring[(vr)->num])
+
+static inline int
+vring_size(unsigned int num, unsigned long align)
+{
+	int size;
+
+	size = num * sizeof(struct vring_desc);
+	size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)) +
+	    sizeof(uint16_t);
+	size = (size + align - 1) & ~(align - 1);
+	size += sizeof(struct vring_used) +
+	    (num * sizeof(struct vring_used_elem)) + sizeof(uint16_t);
+	return (size);
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+    unsigned long align)
+{
+        vr->num = num;
+        vr->desc = (struct vring_desc *) p;
+        vr->avail = (struct vring_avail *) (p +
+	    num * sizeof(struct vring_desc));
+        vr->used = (void *)
+	    (((unsigned long) &vr->avail->ring[num] + align-1) &
~(align-1));
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ *
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+
+	return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx -
old);
+}
+#endif /* VIRTIO_RING_H */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.c
b/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.c
new file mode 100644
index 0000000..1d07b76
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.c
@@ -0,0 +1,963 @@
+/**
+ * @file virqueue.c
+ * @brief Implements the virtqueue interface as basically described
+ * in the original VirtIO paper.
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtqueue.c 255166 2013-09-03
02:26:57Z bryanv $
+ */
+
+#define VTNET_LEGACY_TX
+#define RTEMS_VIRTIO_NET
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <bsp.h>
+
+#include <sys/mbuf.h>
+#include <sys/param.h>
+
+#include "virtio.h"
+#include "virtqueue.h"
+#include "virtio_ring.h"
+#include "virtio_pci.h"
+
+#define RTEMS_VIRTIO_NET
+#ifdef RTEMS_VIRTIO_NET
+static uint16_t rtems_vq_ring_enqueue_segments(struct virtqueue *vq,
+	struct vring_desc *desc, uint16_t head_idx, struct mbuf *m_head,
+	int readable, int writable);
+#endif
+
+#define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
+#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
+
+struct vq_desc_extra {
+		void		  *cookie;
+		struct vring_desc *indirect;
+		vm_paddr_t	   indirect_paddr;
+		uint16_t	   ndescs;
+};
+
+
+struct virtqueue {
+	device_t		 vq_dev;
+	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
+	uint16_t		 vq_queue_index;
+	uint16_t		 vq_nentries;
+	uint32_t		 vq_flags;
+
+	int			 vq_alignment;
+	int			 vq_ring_size;
+	void			*vq_ring_mem;
+	void			*vq_ring_mem_orig;
+	int			 vq_max_indirect_size;
+	int			 vq_indirect_mem_size;
+	virtqueue_intr_t	*vq_intrhand;
+	void			*vq_intrhand_arg;
+
+	struct vring		 vq_ring;
+	uint16_t		 vq_free_cnt;
+	uint16_t		 vq_queued_cnt;
+	/*
+	 * Head of the free chain in the descriptor table. If
+	 * there are no free descriptors, this will be set to
+	 * VQ_RING_DESC_CHAIN_END.
+	 */
+	uint16_t		 vq_desc_head_idx;
+	/*
+	 * Last consumed descriptor in the used table,
+	 * trails vq_ring.used->idx.
+	 */
+	uint16_t		 vq_used_cons_idx;
+
+	struct vq_desc_extra vq_descx[10];
+};
+
+/*
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+#define VQASSERT(_vq, _exp, _msg, ...)				\
+    KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
+	##__VA_ARGS__))
+
+#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
+    VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
+	"invalid ring index: %d, max: %d", (_idx),		\
+	(_vq)->vq_nentries)
+
+#define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
+    VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
+	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
+	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
+
+#ifdef NOTUSED
+static int	virtqueue_init_indirect(struct virtqueue *vq, int);
+static void	virtqueue_free_indirect(struct virtqueue *vq);
+static void	virtqueue_init_indirect_list(struct virtqueue *,
+		    struct vring_desc *);
+		    
+static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
+		    struct vring_desc *, uint16_t, struct sglist *, int,
int);
+static int	vq_ring_use_indirect(struct virtqueue *, int);
+static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
+#endif
+
+static void	vq_ring_init(struct virtqueue *);
+static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
+static int	vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
+static int	vq_ring_must_notify_host(struct virtqueue *);
+static void	vq_ring_notify_host(struct virtqueue *);
+static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
+
+uint64_t
+virtqueue_filter_features(uint64_t features)
+{
+	uint64_t mask;
+
+	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
+	mask |= VIRTIO_RING_F_INDIRECT_DESC;
+	mask |= VIRTIO_RING_F_EVENT_IDX;
+
+	return (features & mask);
+}
+
+int
+virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
+    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue
**vqp)
+{
+	struct virtqueue *vq;
+	int error;
+
+	*vqp = NULL;
+	error = 0;
+
+	if (size == 0) {
+		device_printf(dev,
+		    "virtqueue %d (%s) does not exist (size is zero)\n",
+		    queue, info->vqai_name);
+		return (ENODEV);
+	} else if (!powerof2(size)) {
+		device_printf(dev,
+		    "virtqueue %d (%s) size is not a power of 2: %d\n",
+		    queue, info->vqai_name, size);
+		return (ENXIO);
+	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
+		device_printf(dev, "virtqueue %d (%s) requested too many "
+		    "indirect descriptors: %d, max %d\n",
+		    queue, info->vqai_name, info->vqai_maxindirsz,
+		    VIRTIO_MAX_INDIRECT);
+		return (EINVAL);
+	}
+
+	vq = malloc(sizeof(struct virtqueue) +
+	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT |
M_ZERO);
+	if (vq == NULL) {
+		device_printf(dev, "cannot allocate virtqueue\n");
+		return (ENOMEM);
+	}
+
+	vq->vq_dev = dev;
+	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
+	vq->vq_queue_index = queue;
+	vq->vq_alignment = align;
+	vq->vq_nentries = size;
+	vq->vq_free_cnt = size;
+	vq->vq_intrhand = info->vqai_intr;
+	vq->vq_intrhand_arg = info->vqai_intr_arg;
+
+	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
+		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
+
+#ifdef NOTUSED
+	if (info->vqai_maxindirsz > 1) {
+		error = virtqueue_init_indirect(vq,
info->vqai_maxindirsz);
+		if (error)
+			goto fail;
+	}
+
+	vq->vq_ring_size = round_page(vring_size(size, align));
+	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
+	    M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	vq->vq_ring_size = vring_size(size, align);
+	vq->vq_ring_mem = malloc(vq->vq_ring_size+align, M_DEVBUF,
M_NOWAIT);
+	vq->vq_ring_mem_orig = vq->vq_ring_mem;
+	if ( ( (unsigned long) vq->vq_ring_mem % align ) > 0 ) {
+		vq->vq_ring_mem =
+		(void *) ( (unsigned long) vq->vq_ring_mem +
+		( align - ( (unsigned long) vq->vq_ring_mem % align ) ) );
+	}
+#endif
+	if (vq->vq_ring_mem == NULL) {
+		device_printf(dev,
+		    "cannot allocate memory for virtqueue ring\n");
+		error = ENOMEM;
+		goto fail;
+	}
+
+	vq_ring_init(vq);
+	virtqueue_disable_intr(vq);
+
+	*vqp = vq;
+
+fail:
+	if (error)
+		virtqueue_free(vq);
+
+	return (error);
+}
+
+#ifdef NOTUSED
+static int
+virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
+{
+	device_t dev;
+	struct vq_desc_extra *dxp;
+	int i, size;
+
+	dev = vq->vq_dev;
+
+	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) ==
0) {
+		/*
+		 * Indirect descriptors requested by the driver but not
+		 * negotiated. Return zero to keep the initialization
+		 * going: we'll run fine without.
+		 */
+		if (bootverbose)
+			device_printf(dev, "virtqueue %d (%s) requested "
+			    "indirect descriptors but not negotiated\n",
+			    vq->vq_queue_index, vq->vq_name);
+		return (0);
+	}
+
+	size = indirect_size * sizeof(struct vring_desc);
+	vq->vq_max_indirect_size = indirect_size;
+	vq->vq_indirect_mem_size = size;
+	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
+
+	for (i = 0; i < vq->vq_nentries; i++) {
+		dxp = &vq->vq_descx[i];
+
+		dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
+		if (dxp->indirect == NULL) {
+			device_printf(dev, "cannot allocate indirect
list\n");
+			return (ENOMEM);
+		}
+
+		dxp->indirect_paddr = vtophys(dxp->indirect);
+		virtqueue_init_indirect_list(vq, dxp->indirect);
+	}
+
+	return (0);
+}
+
+static void
+virtqueue_free_indirect(struct virtqueue *vq)
+{
+	struct vq_desc_extra *dxp;
+	int i;
+
+	for (i = 0; i < vq->vq_nentries; i++) {
+		dxp = &vq->vq_descx[i];
+
+		if (dxp->indirect == NULL)
+			break;
+
+		free(dxp->indirect, M_DEVBUF);
+		dxp->indirect = NULL;
+		dxp->indirect_paddr = 0;
+	}
+
+	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
+	vq->vq_indirect_mem_size = 0;
+}
+
+static void
+virtqueue_init_indirect_list(struct virtqueue *vq,
+    struct vring_desc *indirect)
+{
+	int i;
+
+	bzero(indirect, vq->vq_indirect_mem_size);
+
+	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
+		indirect[i].next = i + 1;
+	indirect[i].next = VQ_RING_DESC_CHAIN_END;
+}
+#endif
+
+int
+virtqueue_reinit(struct virtqueue *vq, uint16_t size)
+{
+	struct vq_desc_extra *dxp;
+	int i;
+
+	if (vq->vq_nentries != size) {
+		device_printf(vq->vq_dev,
+		    "%s: '%s' changed size; old=%hu, new=%hu\n",
+		    __func__, vq->vq_name, vq->vq_nentries, size);
+		return (EINVAL);
+	}
+
+	/* Warn if the virtqueue was not properly cleaned up. */
+	if (vq->vq_free_cnt != vq->vq_nentries) {
+		device_printf(vq->vq_dev,
+		    "%s: warning '%s' virtqueue not empty, "
+		    "leaking %d entries\n", __func__, vq->vq_name,
+		    vq->vq_nentries - vq->vq_free_cnt);
+	}
+
+	vq->vq_desc_head_idx = 0;
+	vq->vq_used_cons_idx = 0;
+	vq->vq_queued_cnt = 0;
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	/* To be safe, reset all our allocated memory. */
+	bzero(vq->vq_ring_mem, vq->vq_ring_size);
+	for (i = 0; i < vq->vq_nentries; i++) {
+		dxp = &vq->vq_descx[i];
+		dxp->cookie = NULL;
+		dxp->ndescs = 0;
+#ifdef NOTUSED
+		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
+			virtqueue_init_indirect_list(vq, dxp->indirect);
+#endif
+	}
+
+	vq_ring_init(vq);
+	virtqueue_disable_intr(vq);
+
+	return (0);
+}
+
+void
+virtqueue_free(struct virtqueue *vq)
+{
+
+	if (vq->vq_free_cnt != vq->vq_nentries) {
+		device_printf(vq->vq_dev, "%s: freeing non-empty
virtqueue, "
+		    "leaking %d entries\n", vq->vq_name,
+		    vq->vq_nentries - vq->vq_free_cnt);
+	}
+
+#ifdef NOTUSED
+	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
+		virtqueue_free_indirect(vq);
+#endif
+
+	if (vq->vq_ring_mem != NULL) {
+#ifdef NOTUSED
+		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+		free(vq->vq_ring_mem_orig, M_DEVBUF);
+#endif
+		vq->vq_ring_size = 0;
+		vq->vq_ring_mem = NULL;
+	}
+
+	free(vq, M_DEVBUF);
+}
+
+vm_paddr_t
+virtqueue_paddr(struct virtqueue *vq)
+{
+
+	return (vtophys(vq->vq_ring_mem));
+}
+
+int
+virtqueue_size(struct virtqueue *vq)
+{
+
+	return (vq->vq_nentries);
+}
+
+int
+virtqueue_empty(struct virtqueue *vq)
+{
+
+	return (vq->vq_nentries == vq->vq_free_cnt);
+}
+
+int
+virtqueue_full(struct virtqueue *vq)
+{
+
+	return (vq->vq_free_cnt == 0);
+}
+
+void
+virtqueue_notify(struct virtqueue *vq)
+{
+
+	/* Ensure updated avail->idx is visible to host. */
+	mb();
+
+	if (vq_ring_must_notify_host(vq))
+		vq_ring_notify_host(vq);
+	vq->vq_queued_cnt = 0;
+}
+
+int
+virtqueue_nused(struct virtqueue *vq)
+{
+	uint16_t used_idx, nused;
+
+	used_idx = vq->vq_ring.used->idx;
+
+	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
+	VQASSERT(vq, nused <= vq->vq_nentries, "used more than
available");
+
+	return (nused);
+}
+
+int
+virtqueue_intr_filter(struct virtqueue *vq)
+{
+
+	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+		return (0);
+
+	virtqueue_disable_intr(vq);
+
+	return (1);
+}
+
+void
+virtqueue_intr(struct virtqueue *vq)
+{
+	
+	vq->vq_intrhand(vq->vq_intrhand_arg);
+}
+
+int
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+
+	return (vq_ring_enable_interrupt(vq, 0));
+}
+
+int
+virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
+{
+	uint16_t ndesc, avail_idx;
+
+	avail_idx = vq->vq_ring.avail->idx;
+	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
+
+	switch (hint) {
+	case VQ_POSTPONE_SHORT:
+		ndesc = ndesc / 4;
+		break;
+	case VQ_POSTPONE_LONG:
+		ndesc = (ndesc * 3) / 4;
+		break;
+	case VQ_POSTPONE_EMPTIED:
+		break;
+	}
+
+	return (vq_ring_enable_interrupt(vq, ndesc));
+}
+
+/*
+ * Note this is only considered a hint to the host.
+ */
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+
+	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
+		    vq->vq_nentries - 1;
+	} else
+		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+#ifdef NOTUSED
+int
+virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
+    int readable, int writable)
+{
+	struct vq_desc_extra *dxp;
+	int needed;
+	uint16_t head_idx, idx;
+
+	needed = readable + writable;
+
+	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
+	VQASSERT(vq, needed == sg->sg_nseg,
+	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
+	VQASSERT(vq,
+	    needed <= vq->vq_nentries || needed <=
vq->vq_max_indirect_size,
+	    "too many segments to enqueue: %d, %d/%d", needed,
+	    vq->vq_nentries, vq->vq_max_indirect_size);
+
+	if (needed < 1)
+		return (EINVAL);
+	if (vq->vq_free_cnt == 0)
+		return (ENOSPC);
+
+	if (vq_ring_use_indirect(vq, needed)) {
+		vq_ring_enqueue_indirect(vq, cookie, sg, readable,
writable);
+		return (0);
+	} else if (vq->vq_free_cnt < needed)
+		return (EMSGSIZE);
+
+	head_idx = vq->vq_desc_head_idx;
+	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+	dxp = &vq->vq_descx[head_idx];
+
+	VQASSERT(vq, dxp->cookie == NULL,
+	    "cookie already exists for index %d", head_idx);
+	dxp->cookie = cookie;
+	dxp->ndescs = needed;
+
+	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
+	    sg, readable, writable);
+
+	vq->vq_desc_head_idx = idx;
+	vq->vq_free_cnt -= needed;
+	if (vq->vq_free_cnt == 0)
+		VQ_RING_ASSERT_CHAIN_TERM(vq);
+	else
+		VQ_RING_ASSERT_VALID_IDX(vq, idx);
+
+	vq_ring_update_avail(vq, head_idx);
+
+	return (0);
+}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+int
+rtems_virtqueue_enqueue(struct virtqueue *vq, void *cookie,
+    int readable, int writable)
+{
+	struct vq_desc_extra *dxp;
+	int needed;
+	uint16_t head_idx, idx;
+
+	needed = readable + writable;
+
+	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
+	VQASSERT(vq,
+	    needed <= vq->vq_nentries || needed <=
vq->vq_max_indirect_size,
+	    "too many segments to enqueue: %d, %d/%d", needed,
+	    vq->vq_nentries, vq->vq_max_indirect_size);
+
+	if (needed < 1)
+		return (EINVAL);
+	if (vq->vq_free_cnt == 0)
+		return (ENOSPC);
+
+#ifdef NOTUSED
+	if (vq_ring_use_indirect(vq, needed)) {
+		vq_ring_enqueue_indirect(vq, cookie, sg, readable,
writable);
+		return (0);
+	} else if (vq->vq_free_cnt < needed)
+		return (EMSGSIZE);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+	if (vq->vq_free_cnt < needed)
+		return (EMSGSIZE);
+#endif
+
+	head_idx = vq->vq_desc_head_idx;
+	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+	dxp = &vq->vq_descx[head_idx];
+
+	VQASSERT(vq, dxp->cookie == NULL,
+	    "cookie already exists for index %d", head_idx);
+	dxp->cookie = cookie;
+	dxp->ndescs = needed;
+
+	idx = rtems_vq_ring_enqueue_segments(vq, vq->vq_ring.desc,
head_idx,
+	    cookie, readable, writable);
+
+	vq->vq_desc_head_idx = idx;
+	vq->vq_free_cnt -= needed;
+	if (vq->vq_free_cnt == 0)
+		VQ_RING_ASSERT_CHAIN_TERM(vq);
+	else
+		VQ_RING_ASSERT_VALID_IDX(vq, idx);
+
+	vq_ring_update_avail(vq, head_idx);
+
+	return (0);
+}
+#endif
+
+void *
+virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
+{
+	struct vring_used_elem *uep;
+	void *cookie;
+	uint16_t used_idx, desc_idx;
+
+	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
+		return (NULL);
+	
+	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
+	uep = &vq->vq_ring.used->ring[used_idx];
+	
+	rmb();
+	// __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
+	desc_idx = (uint16_t) uep->id;
+	if (len != NULL)
+		*len = uep->len;
+
+	vq_ring_free_chain(vq, desc_idx);
+
+	cookie = vq->vq_descx[desc_idx].cookie;
+	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
+	vq->vq_descx[desc_idx].cookie = NULL;
+	
+	return (cookie);
+}
+
+#ifdef NOTUSED
+void *
+virtqueue_poll(struct virtqueue *vq, uint32_t *len)
+{
+	void *cookie;
+
+	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
+		cpu_spinwait();
+
+	return (cookie);
+}
+#endif
+
+void *
+virtqueue_drain(struct virtqueue *vq, int *last)
+{
+	void *cookie;
+	int idx;
+
+	cookie = NULL;
+	idx = *last;
+
+	while (idx < vq->vq_nentries && cookie == NULL) {
+		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
+			vq->vq_descx[idx].cookie = NULL;
+			/* Free chain to keep free count consistent. */
+			vq_ring_free_chain(vq, idx);
+		}
+		idx++;
+	}
+
+	*last = idx;
+
+	return (cookie);
+}
+
+#ifdef NOTUSED
+void
+virtqueue_dump(struct virtqueue *vq)
+{
+
+	if (vq == NULL)
+		return;
+
+	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
+	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
+	    "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
+	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
+	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
+	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
+	    vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
+	    vq->vq_ring.used->flags);
+}
+#endif
+
+static void
+vq_ring_init(struct virtqueue *vq)
+{
+	struct vring *vr;
+	char *ring_mem;
+	int i, size;
+
+	ring_mem = vq->vq_ring_mem;
+	size = vq->vq_nentries;
+	vr = &vq->vq_ring;
+
+	vring_init(vr, size, ring_mem, vq->vq_alignment);
+
+	for (i = 0; i < size - 1; i++)
+		vr->desc[i].next = i + 1;
+	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
+static void
+vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
+{
+	uint16_t avail_idx;
+
+	/*
+	 * Place the head of the descriptor chain into the next slot and
make
+	 * it usable to the host. The chain is made available now rather
than
+	 * deferring to virtqueue_notify() in the hopes that if the host
is
+	 * currently running on another CPU, we can keep it processing the
new
+	 * descriptor.
+	 */
+	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
+	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+
+	wmb();
+	vq->vq_ring.avail->idx++;
+
+	/* Keep pending count until virtqueue_notify(). */
+	vq->vq_queued_cnt++;
+}
+
+#ifdef NOTUSED
+static uint16_t
+vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
+    uint16_t head_idx, struct sglist *sg, int readable, int writable)
+{
+	struct sglist_seg *seg;
+	struct vring_desc *dp;
+	int i, needed;
+	uint16_t idx;
+
+	needed = readable + writable;
+
+	for (i = 0, idx = head_idx, seg = sg->sg_segs;
+	     i < needed;
+	     i++, idx = dp->next, seg++) {
+		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
+		    "premature end of free desc chain");
+
+		dp = &desc[idx];
+		dp->addr = seg->ss_paddr;
+		dp->len = seg->ss_len;
+		dp->flags = 0;
+
+		if (i < needed - 1)
+			dp->flags |= VRING_DESC_F_NEXT;
+		if (i >= readable)
+			dp->flags |= VRING_DESC_F_WRITE;
+	}
+
+	return (idx);
+}
+#endif
+#ifdef RTEMS_VIRTIO_NET
+static uint16_t
+rtems_vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc
*desc,
+    uint16_t head_idx, struct mbuf *m_head, int readable, int writable)
+{
+	struct mbuf *m;
+	struct vring_desc *dp;
+	int i, needed;
+	uint16_t idx;
+
+	needed = readable + writable;
+
+	for (i = 0, idx = head_idx, m = m_head; i < needed;
+		i++, idx = dp->next, m=m->m_next) {
+		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
+			"premature end of free desc chain");
+
+		dp = &desc[idx];
+		dp->addr = (uint64_t) m->m_data;
+		dp->len = m->m_len;
+		dp->flags = 0;
+
+		if (i < needed - 1)
+			dp->flags |= VRING_DESC_F_NEXT;
+		if (i >= readable)
+			dp->flags |= VRING_DESC_F_WRITE;
+	}
+
+	return (idx);
+}
+#endif
+
+#ifdef NOTUSED
+static int
+vq_ring_use_indirect(struct virtqueue *vq, int needed)
+{
+
+	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
+		return (0);
+
+	if (vq->vq_max_indirect_size < needed)
+		return (0);
+
+	if (needed < 2)
+		return (0);
+
+	return (1);
+}
+
+static void
+vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
+    struct sglist *sg, int readable, int writable)
+{
+	struct vring_desc *dp;
+	struct vq_desc_extra *dxp;
+	int needed;
+	uint16_t head_idx;
+
+	needed = readable + writable;
+	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
+	    "enqueuing too many indirect descriptors");
+
+	head_idx = vq->vq_desc_head_idx;
+	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
+	dp = &vq->vq_ring.desc[head_idx];
+	dxp = &vq->vq_descx[head_idx];
+
+	VQASSERT(vq, dxp->cookie == NULL,
+	    "cookie already exists for index %d", head_idx);
+	dxp->cookie = cookie;
+	dxp->ndescs = 1;
+
+	dp->addr = dxp->indirect_paddr;
+	dp->len = needed * sizeof(struct vring_desc);
+	dp->flags = VRING_DESC_F_INDIRECT;
+
+	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
+	    sg, readable, writable);
+
+	vq->vq_desc_head_idx = dp->next;
+	vq->vq_free_cnt--;
+	if (vq->vq_free_cnt == 0)
+		VQ_RING_ASSERT_CHAIN_TERM(vq);
+	else
+		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
+
+	vq_ring_update_avail(vq, head_idx);
+}
+#endif
+
+static int
+vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
+{
+
+	/*
+	 * Enable interrupts, making sure we get the latest index of
+	 * what's already been consumed.
+	 */
+	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
+		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx +
ndesc;
+	else
+		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+
+	mb();
+
+	/*
+	 * Enough items may have already been consumed to meet our
threshold
+	 * since we last checked. Let our caller know so it processes the
new
+	 * entries.
+	 */
+	if (virtqueue_nused(vq) > ndesc)
+		return (1);
+
+	return (0);
+}
+
+static int
+vq_ring_must_notify_host(struct virtqueue *vq)
+{
+	uint16_t new_idx, prev_idx, event_idx;
+
+#ifdef NOTUSED
+	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
+		new_idx = vq->vq_ring.avail->idx;
+		prev_idx = new_idx - vq->vq_queued_cnt;
+		event_idx = vring_avail_event(&vq->vq_ring);
+
+		return (vring_need_event(event_idx, new_idx, prev_idx) !=
0);
+	}
+#endif
+
+	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
+}
+
+static void
+vq_ring_notify_host(struct virtqueue *vq)
+{
+
+	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
+}
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+	struct vring_desc *dp;
+	struct vq_desc_extra *dxp;
+
+	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
+	dp = &vq->vq_ring.desc[desc_idx];
+	dxp = &vq->vq_descx[desc_idx];
+
+	if (vq->vq_free_cnt == 0)
+		VQ_RING_ASSERT_CHAIN_TERM(vq);
+
+	vq->vq_free_cnt += dxp->ndescs;
+	dxp->ndescs--;
+	
+	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+		while (dp->flags & VRING_DESC_F_NEXT) {
+			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
+			dp = &vq->vq_ring.desc[dp->next];
+			dxp->ndescs--;
+		}
+	}
+	
+	VQASSERT(vq, dxp->ndescs == 0,
+	    "failed to free entire desc chain, remaining: %d",
dxp->ndescs);
+
+	/*
+	 * We must append the existing free chain, if any, to the end of
+	 * newly freed chain. If the virtqueue was completely used, then
+	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+	 */
+	dp->next = vq->vq_desc_head_idx;
+	vq->vq_desc_head_idx = desc_idx;
+}
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.h
new file mode 100644
index 0000000..9016f7d
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtqueue.h
@@ -0,0 +1,127 @@
+/**
+ * @file virqueue.h
+ * @brief 
+ */
+
+/*
+ * Authors: Jin-Hyun Kim <jinhyun at konkuk.ac.kr>, 
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>, http://sslab.konkuk.ac.kr
+ * Ported from FreeBSD to RTEMS March 16
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.NET
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the
distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtqueue.h 255109 2013-09-01
04:16:43Z bryanv $
+ */
+
+#ifndef _VIRTIO_VIRTQUEUE_H
+#define _VIRTIO_VIRTQUEUE_H
+
+struct virtqueue;
+#ifdef NOTUSED
+struct sglist;
+#endif
+
+/* Support for indirect buffer descriptors. */
+#define VIRTIO_RING_F_INDIRECT_DESC	(1 << 28)
+
+/* Support to suppress interrupt until specific index is reached. */
+#define VIRTIO_RING_F_EVENT_IDX		(1 << 29)
+
+/* Device callback for a virtqueue interrupt. */
+typedef void virtqueue_intr_t(void *);
+
+/*
+ * Hint on how long the next interrupt should be postponed. This is
+ * only used when the EVENT_IDX feature is negotiated.
+ */
+typedef enum {
+	VQ_POSTPONE_SHORT,
+	VQ_POSTPONE_LONG,
+	VQ_POSTPONE_EMPTIED	/* Until all available desc are used. */
+} vq_postpone_t;
+
+#define VIRTQUEUE_MAX_NAME_SZ	32
+
+/* One for each virtqueue the device wishes to allocate. */
+struct vq_alloc_info {
+	char		   vqai_name[VIRTQUEUE_MAX_NAME_SZ];
+	int		   vqai_maxindirsz;
+	virtqueue_intr_t  *vqai_intr;
+	void		  *vqai_intr_arg;
+	struct virtqueue **vqai_vq;
+};
+
+#define VQ_ALLOC_INFO_INIT(_i,_nsegs,_intr,_arg,_vqp,_str,...) do {	\
+	snprintf((_i)->vqai_name, VIRTQUEUE_MAX_NAME_SZ, _str,		\
+	    ##__VA_ARGS__);						\
+	(_i)->vqai_maxindirsz = (_nsegs);				\
+	(_i)->vqai_intr = (_intr);					\
+	(_i)->vqai_intr_arg = (_arg);					\
+	(_i)->vqai_vq = (_vqp);						\
+} while (0)
+
+uint64_t virtqueue_filter_features(uint64_t features);
+
+int	 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
+	     int align, vm_paddr_t highaddr, struct vq_alloc_info *info,
+	     struct virtqueue **vqp);
+void	*virtqueue_drain(struct virtqueue *vq, int *last);
+void	 virtqueue_free(struct virtqueue *vq);
+int	 virtqueue_reinit(struct virtqueue *vq, uint16_t size);
+
+int	 virtqueue_intr_filter(struct virtqueue *vq);
+void	 virtqueue_intr(struct virtqueue *vq);
+int	 virtqueue_enable_intr(struct virtqueue *vq);
+int	 virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t
hint);
+void	 virtqueue_disable_intr(struct virtqueue *vq);
+
+/* Get physical address of the virtqueue ring. */
+vm_paddr_t virtqueue_paddr(struct virtqueue *vq);
+
+int	 virtqueue_full(struct virtqueue *vq);
+int	 virtqueue_empty(struct virtqueue *vq);
+int	 virtqueue_size(struct virtqueue *vq);
+int	 virtqueue_nused(struct virtqueue *vq);
+void	 virtqueue_notify(struct virtqueue *vq);
+void	 virtqueue_dump(struct virtqueue *vq);
+
+#ifdef NOTUSED
+int	 virtqueue_enqueue(struct virtqueue *vq, void *cookie,
+	     struct sglist *sg, int readable, int writable);
+#endif
+#ifdef RTEMS_VIRTIO_NET
+int	rtems_virtqueue_enqueue(struct virtqueue *vq, void *cookie,
+	     int readable, int writable);
+#endif
+void	*virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
+void	*virtqueue_poll(struct virtqueue *vq, uint32_t *len);
+
+#endif /* _VIRTIO_VIRTQUEUE_H */
-- 
1.9.1









More information about the devel mailing list