[PATCH] pc386: Add virtio network driver

Jinhyun jinhyun at konkuk.ac.kr
Tue Mar 29 13:47:11 UTC 2016


diff --git a/c/src/lib/libbsp/i386/pc386/Makefile.am
b/c/src/lib/libbsp/i386/pc386/Makefile.am
index d9af7dd..da50c67 100644
--- a/c/src/lib/libbsp/i386/pc386/Makefile.am
+++ b/c/src/lib/libbsp/i386/pc386/Makefile.am
@@ -232,6 +232,19 @@ noinst_PROGRAMS += 3c509.rel
 3c509_rel_LDFLAGS += -Wl,--undefined=ep_board
 endif
 
+if HAS_NETWORKING
+vtnet_CPPFLAGS = -D__INSIDE_RTEMS_BSD_TCPIP_STACK__
+noinst_PROGRAMS += vtnet.rel
+vtnet_rel_SOURCES = virtio/if_vtnet.c
+vtnet_rel_SOURCES += virtio/if_vtnet.h
+vtnet_rel_SOURCES += virtio/virtio_pci.c
+vtnet_rel_SOURCES += virtio/virtio_pci.h
+vtnet_rel_SOURCES += virtio/virtio.c
+vtnet_rel_SOURCES += virtio/virtio.h
+vtnet_rel_CPPFLAGS = $(AM_CPPFLAGS) $(vtnet_CPPFLAGS)
+vtnet_rel_LDFLAGS = $(RTEMS_RELLDFLAGS)
+endif
+
 libbsp_a_LIBADD = ../../../libcpu/@RTEMS_CPU@/cache.rel
 libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/page.rel
 libbsp_a_LIBADD += ../../../libcpu/@RTEMS_CPU@/score.rel
@@ -246,6 +259,7 @@ if HAS_NETWORKING
 libbsp_a_LIBADD += ne2000.rel
 libbsp_a_LIBADD += wd8003.rel
 libbsp_a_LIBADD += 3c509.rel
+libbsp_a_LIBADD += vtnet.rel
 endif
 
 EXTRA_DIST += HOWTO
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
new file mode 100644
index 0000000..4fcd92c
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.c
@@ -0,0 +1,1032 @@
+/**
+ * @file if_vtnet.c
+ * @brief Driver for virtio network devices
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+
+#include <bsp.h>
+#include <bsp/irq.h>
+
+#include <sys/mbuf.h>
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+
+#include <net/if.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <pcibios.h>
+
+#include "virtio.h"
+#include "virtio_pci.h"
+#include "if_vtnet.h"
+
+static struct vtnet_softc vtnet_softc;
+static rtems_interval     vtnet_ticksPerSecond;
+
+int rtems_vtnet_driver_attach(
+  struct rtems_bsdnet_ifconfig *config,
+  int                           attaching
+)
+{
+  struct vtnet_softc *sc;
+  int                 ret;
+
+  printk( "rtems_vtnet_driver_attach start\n" );
+
+  sc = &vtnet_softc;
+  memset( sc, 0, sizeof( struct vtnet_softc ) );
+
+  /* Set up timing values */
+  vtnet_ticksPerSecond = rtems_clock_get_ticks_per_second();
+
+  printk( "\tAttaching virtio pci...\n" );
+  ret = vtpci_attach( config, &sc->vtpci_softc );
+
+  if ( ret ) {
+    printk( "vtpci_attach fail...\n" );
+  }
+
+  /* Alloc virtqueues */
+  printk( "\tAllocating virtqueues...\n" );
+  ret = vtnet_alloc_virtqueues( sc );
+
+  if ( ret ) {
+    printk( "vtnet_alloc_virtqueues fail...\n" );
+  }
+
+  /* Setup interrupt */
+  printk( "\tSetup interrupt...\n" );
+  ret = vtnet_setup_intr( &sc->vtpci_softc );
+
+  if ( ret ) {
+    printk( "vtpci_setup_intr fail...\n" );
+  }
+
+  /* Setup interface */
+  printk( "\tSetup interface...\n" );
+  ret = vtnet_setup_interface( sc, config );
+
+  if ( ret ) {
+    printk( "vtnet_setup_interface fail...\n" );
+  }
+
+  printk( "rtems_vtnet_driver_attach end\n" );
+
+  return 0;
+}
+
+static int vtnet_alloc_virtqueues( struct vtnet_softc *sc )
+{
+  uint16_t val16;
+  int      size, error;
+
+  /* Init virtio_net_hdr */
+  memset( &sc->vtnet_net_hdr, 0, sizeof( struct virtio_net_hdr ) );
+
+  /* Select virtqueue 0 */
+  vtpci_io_write_2( &sc->vtpci_softc,
+    VIRTIO_PCI_QUEUE_SEL,
+    VIRTIO_PCI_QUEUE_SEL_RX );
+  val16 = vtpci_io_read_2( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_NUM );
+
+  /* Alloc memory & continuous memory for vring */
+  sc->vtnet_rxvq_size = val16;
+  size = sizeof( struct virtqueue ) + val16 * sizeof( struct vq_desc_extra
);
+  sc->vtnet_rxvq = malloc( size, M_DEVBUF, M_NOWAIT );
+  memset( sc->vtnet_rxvq, 0, size );
+
+  error =
+    vtnet_alloc_virtqueue( sc->vtnet_rxvq, VIRTIO_PCI_QUEUE_SEL_RX, val16
);
+
+  if ( error ) {
+    return error;
+  }
+
+  /* Select virtqueue 1 */
+  vtpci_io_write_2( &sc->vtpci_softc,
+    VIRTIO_PCI_QUEUE_SEL,
+    VIRTIO_PCI_QUEUE_SEL_TX );
+  val16 = vtpci_io_read_2( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_NUM );
+
+  /* Alloc memory & continuous memory for vring */
+  sc->vtnet_txvq_size = val16;
+  size = sizeof( struct virtqueue ) + val16 * sizeof( struct vq_desc_extra
);
+  sc->vtnet_txvq = malloc( size, M_DEVBUF, M_NOWAIT );
+  memset( sc->vtnet_txvq, 0, size );
+
+  error =
+    vtnet_alloc_virtqueue( sc->vtnet_txvq, VIRTIO_PCI_QUEUE_SEL_TX, val16
);
+
+  if ( error ) {
+    return error;
+  }
+
+  return 0;
+}
+
+static int vtnet_alloc_virtqueue(
+  struct virtqueue *vq,
+  uint16_t          queue_num,
+  uint16_t          queue_size
+)
+{
+  unsigned long align;
+  void         *mem;
+  int           size;
+
+  vq->vq_queue_index = queue_num;
+  vq->vq_alignment = VIRTIO_PCI_VRING_ALIGN;
+  vq->vq_nentries = queue_size;
+  vq->vq_free_cnt = queue_size;
+  vq->vq_max_indirect_size = VTNET_RX_MIN_SEGS;
+
+  align = vq->vq_alignment;
+  size = queue_size * sizeof( struct vring_desc );
+  size += sizeof( struct vring_avail ) + ( queue_size * sizeof( uint16_t )
) +
+          sizeof( uint16_t );
+
+  size = ( size + align - 1 ) & ~( vq->vq_alignment - 1 );
+  size += sizeof( struct vring_used ) +
+          ( queue_size * sizeof( struct vring_used_elem ) ) +
+          sizeof( uint16_t );
+
+  mem = (void *) malloc( size + align, M_DEVBUF, M_NOWAIT );
+
+  if ( ( (unsigned long) mem % align ) > 0 ) {
+    mem =
+      (void *) ( (unsigned long) mem +
+                 ( align - ( (unsigned long) mem % align ) ) );
+  }
+
+  vq->vq_ring_size = size;
+  vq->vq_ring_mem = mem;
+  memset( vq->vq_ring_mem, 0, size );
+
+  virtqueue_disable_intr( vq );
+
+  return 0;
+}
+
+int vtnet_setup_intr( struct vtpci_softc *sc )
+{
+  uint8_t val8;
+  int     ret;
+
+  /* Get intrrupt level */
+  pcib_conf_read8( sc->pci_signature, PCI_INTERRUPT_LINE, &val8 );
+  ret = rtems_interrupt_handler_install(
+    val8,
+    NULL,
+    RTEMS_INTERRUPT_SHARED,
+    (rtems_interrupt_handler) vtnet_intr,
+    NULL );
+
+  return ret;
+}
+
+static int vtnet_setup_interface(
+  struct vtnet_softc           *sc,
+  struct rtems_bsdnet_ifconfig *config
+)
+{
+  struct ifnet *ifp;
+  int           mtu;
+
+  ifp = &sc->arpcom.ac_if;
+  ifp->if_softc = (void *) &vtnet_softc;
+
+  if ( config->mtu ) {
+    mtu = config->mtu;
+  } else {
+    mtu = ETHERMTU;
+  }
+
+  sc->arpcom.ac_enaddr[ 0 ] = 0x08;
+  sc->arpcom.ac_enaddr[ 1 ] = 0x00;
+  sc->arpcom.ac_enaddr[ 2 ] = 0x27;
+  sc->arpcom.ac_enaddr[ 3 ] = 0x98;
+  sc->arpcom.ac_enaddr[ 4 ] = 0xe7;
+  sc->arpcom.ac_enaddr[ 5 ] = 0x0f;
+
+  ifp->if_softc = sc;
+  ifp->if_unit = sc->vtpci_softc.unit_number;
+  ifp->if_name = sc->vtpci_softc.unit_name;
+  ifp->if_mtu = mtu;
+  ifp->if_init = vtnet_init;
+  ifp->if_ioctl = vtnet_ioctl;
+  ifp->if_start = vtnet_start;
+  ifp->if_output = ether_output;
+  ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
+  ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
+
+  if_attach( ifp );
+  ether_ifattach( ifp );
+
+  return 0;
+}
+
+static void vtnet_init( void *xsc )
+{
+  rtems_interrupt_level level;
+  struct vtnet_softc   *sc;
+  struct ifnet         *ifp;
+  struct mbuf          *m;
+  uint32_t              val32;
+  uint16_t              val16;
+  uint8_t               val8;
+  int                   last;
+
+  sc = xsc;
+  ifp = &sc->arpcom.ac_if;
+
+  rtems_interrupt_disable( level );
+
+  if ( ifp->if_flags & IFF_RUNNING ) {
+    return;
+  }
+
+  printk( "vtnet_init start\n" );
+
+  /* vtnet_stop */
+  ifp->if_flags &= ~IFF_RUNNING;
+  sc->vtnet_link_active = 0;
+  vtnet_callout_stop( sc );
+
+  /* vtnet_disable_interrupts */
+  virtqueue_disable_intr( sc->vtnet_rxvq );
+  virtqueue_disable_intr( sc->vtnet_txvq );
+
+  /* vtpci_stop */
+  vtpci_io_write_1( &sc->vtpci_softc,
+    VIRTIO_PCI_STATUS,
+    VIRTIO_CONFIG_STATUS_RESET );
+
+  /* vtnet_drain_rxtx_queues */
+  last = 0;
+
+  while ( ( m = virtqueue_drain( sc->vtnet_rxvq, &last ) ) != NULL ) {
+    m_freem( m );
+  }
+
+  last = 0;
+
+  while ( ( m = virtqueue_drain( sc->vtnet_txvq, &last ) ) != NULL ) {
+    m_freem( m );
+  }
+
+  /* vtpci_reinit */
+  val8 = vtpci_io_read_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS );
+
+  if ( val8 != VIRTIO_CONFIG_STATUS_RESET ) {
+    /* vtpci_stop */
+    vtpci_io_write_1( &sc->vtpci_softc,
+      VIRTIO_PCI_STATUS,
+      VIRTIO_CONFIG_STATUS_RESET );
+  }
+
+  /* Set status bit as acknowlege & driver */
+  val8 = vtpci_io_read_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS );
+  val8 |= VIRTIO_CONFIG_STATUS_ACK;
+  vtpci_io_write_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS, val8 );
+  val8 = vtpci_io_read_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS );
+  val8 |= VIRTIO_CONFIG_STATUS_DRIVER;
+  vtpci_io_write_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS, val8 );
+
+  vtnet_negotiate_features( sc );
+
+  /* vtpci_init_virtqueue */
+  vtnet_init_virtqueue( sc, sc->vtnet_rxvq );
+  vtnet_init_virtqueue( sc, sc->vtnet_txvq );
+
+  vtpci_io_write_2( &sc->vtpci_softc,
+    VIRTIO_PCI_QUEUE_SEL,
+    VIRTIO_PCI_QUEUE_SEL_RX );
+  val16 = vtpci_io_read_2( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_NUM );
+
+  if ( !val16 || vtpci_io_read_4( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_PFN )
) {
+    printk( "\tRXQ already exists!\n" );
+  }
+
+  vtpci_io_write_4( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_PFN,
+    ( (uint32_t) sc->vtnet_rxvq->vq_ring_mem >>
+      VIRTIO_PCI_QUEUE_ADDR_SHIFT ) );
+
+  vtpci_io_write_2( &sc->vtpci_softc,
+    VIRTIO_PCI_QUEUE_SEL,
+    VIRTIO_PCI_QUEUE_SEL_TX );
+  val16 = vtpci_io_read_2( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_NUM );
+
+  if ( !val16 || vtpci_io_read_4( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_PFN )
) {
+    printk( "\tTXQ already exists!\n" );
+  }
+
+  vtpci_io_write_4( &sc->vtpci_softc, VIRTIO_PCI_QUEUE_PFN,
+    ( (uint32_t) sc->vtnet_txvq->vq_ring_mem >>
+      VIRTIO_PCI_QUEUE_ADDR_SHIFT ) );
+
+  /* vtnet_init_rx_queues */
+  sc->vtnet_rx_clsize = MCLBYTES;
+  vtnet_rxq_populate( sc );
+
+  /* vtnet_init_tx_queues */
+  sc->vtnet_tx_watchdog = 0;
+
+  vq_ring_enable_interrupt( sc->vtnet_rxvq, 0 );
+  vq_ring_enable_interrupt( sc->vtnet_txvq, 0 );
+  ifp->if_flags |= IFF_RUNNING;
+
+  /* virtio_reinit_complete */
+  val8 = vtpci_io_read_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS );
+  val8 |= VIRTIO_CONFIG_STATUS_DRIVER_OK;
+  vtpci_io_write_1( &sc->vtpci_softc, VIRTIO_PCI_STATUS, val8 );
+
+  /* vtnet_update_link_status; */
+  sc->vtnet_link_active = 1;
+
+  /* start vtnet_daemon */
+  if ( sc->daemonTid == 0 ) {
+    sc->daemonTid = rtems_bsdnet_newproc( "VTNd",
+      VTNET_DAEMON_STKSIZE,
+      vtnet_daemon,
+      sc );
+  }
+
+  /* start vtnet_tick */
+  sc->stat_ch = vtnet_timeout_running;
+  /* TODO: timeout routine is not working */
+  /* timeout(vtnet_tick, sc, hz); */
+
+  printk( "vtnet_init end\n" );
+
+  rtems_interrupt_enable( level );
+}
+
+static int vtnet_rxq_eof( struct vtnet_softc *sc )
+{
+  struct virtqueue    *vq;
+  struct ifnet        *ifp;
+  struct mbuf         *m;
+  struct ether_header *eh;
+  uint32_t             len;
+  int                  deq, count;
+
+  vq = sc->vtnet_rxvq;
+  ifp = &sc->arpcom.ac_if;
+  len = 0;
+  deq = 0;
+  count = VTNET_RX_PROCESS_LIMIT;
+
+  while ( count-- > 0 ) {
+    m = virtqueue_dequeue( vq, &len );
+
+    if ( m == NULL ) {
+      break;
+    }
+
+    deq++;
+
+    if ( len < sc->vtnet_hdr_size ) {
+      vtnet_rxq_enqueue( sc, m );
+      continue;
+    }
+
+    if ( vtnet_rxq_replace( sc, m, len ) != 0 ) {
+      vtnet_rxq_enqueue( sc, m );
+      continue;
+    }
+
+    m->m_pkthdr.len = len;
+    m->m_pkthdr.rcvif = ifp;
+    m_adj( m, sizeof( struct virtio_net_hdr ) );
+
+    /* vtnet_rxq_input */
+    eh = mtod( m, struct ether_header * );
+    m_adj( m, sizeof( struct ether_header ) );
+    m->m_pkthdr.len = m->m_len;
+
+    ether_input( ifp, eh, m );
+
+    if ( ( ifp->if_flags & IFF_RUNNING ) == 0 ) {
+      break;
+    }
+  }
+
+  if ( deq > 0 ) {
+    vtpci_notify( &sc->vtpci_softc, vq );
+  }
+
+  return ( count > 0 ? 0 : EAGAIN );
+}
+
+static int vtnet_rxq_enqueue(
+  struct vtnet_softc *sc,
+  struct mbuf        *m_head
+)
+{
+  struct virtqueue     *vq;
+  struct vring_desc    *desc, *dp;
+  struct vq_desc_extra *dxp;
+  struct mbuf          *m;
+  uint16_t              head_idx, idx;
+  uint8_t               segs;
+
+  vq = sc->vtnet_rxvq;
+  desc = vq->vq_ring.desc;
+
+  if ( vq->vq_free_cnt == 0 ) {
+    return ENOSPC;
+  }
+
+  segs = 0;
+
+  for ( m = m_head; m != NULL; m = m->m_next ) {
+    segs++;
+  }
+
+  if ( segs == 0 ) {
+    return EINVAL;
+  }
+
+  if ( vq->vq_free_cnt < segs ) {
+    return EMSGSIZE;
+  }
+
+  head_idx = vq->vq_desc_head_idx;
+  dxp = &vq->vq_descx[ head_idx ];
+  dxp->cookie = m_head;
+  dxp->ndescs = segs;
+
+  for ( m = m_head, idx = head_idx; m != NULL;
+        m = m->m_next, idx = dp->next ) {
+    dp = &desc[ idx ];
+    dp->addr = (uint32_t) m->m_data;
+    dp->len = m->m_len;
+    dp->flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
+  }
+
+  dp->flags &= ~( VRING_DESC_F_NEXT );
+
+  vq->vq_desc_head_idx = idx;
+  vq->vq_free_cnt -= segs;
+
+  vq_ring_update_avail( vq, head_idx );
+
+  return 0;
+}
+
+static int vtnet_rxq_replace(
+  struct vtnet_softc *sc,
+  struct mbuf        *m,
+  int                 len
+)
+{
+  struct mbuf *m_new;
+  int          error;
+
+  if ( m->m_next == NULL ) {
+    if ( m->m_len < len ) {
+      return EINVAL;
+    }
+
+    m_new = vtnet_rxq_alloc_mbuf( sc, 1, NULL );
+
+    if ( m_new == NULL ) {
+      return ENOBUFS;
+    }
+
+    error = vtnet_rxq_enqueue( sc, m_new );
+
+    if ( error ) {
+      m_freem( m_new );
+    } else {
+      m->m_len = len;
+    }
+  } else {
+    error = vtnet_rxq_replace_nomgr( sc, m, len );
+  }
+
+  return error;
+}
+
+static struct mbuf *vtnet_rxq_alloc_mbuf(
+  struct vtnet_softc *sc,
+  int                 nbufs,
+  struct mbuf       **m_tailp
+)
+{
+  struct mbuf *m_head, *m_tail, *m;
+  int          i;
+
+  MGETHDR( m_head, M_DONTWAIT, MT_DATA );
+
+  if ( m_head == NULL ) {
+    m_freem( m_head );
+
+    return NULL;
+  }
+
+  MCLGET( m_head, M_DONTWAIT );
+
+  if ( ( m_head->m_flags & M_EXT ) == 0 ) {
+    m_freem( m_head );
+
+    return NULL;
+  }
+
+  m_head->m_len = sc->vtnet_rx_clsize;
+  m_tail = m_head;
+
+  for ( i = 1; i < nbufs; i++ ) {
+    MGETHDR( m, M_DONTWAIT, MT_DATA );
+
+    if ( m == NULL ) {
+      m_freem( m_head );
+
+      return NULL;
+    }
+
+    MCLGET( m, M_DONTWAIT );
+
+    if ( ( m->m_flags & M_EXT ) == 0 ) {
+      m_freem( m_head );
+      m_freem( m );
+
+      return NULL;
+    }
+
+    m->m_len = sc->vtnet_rx_clsize;
+    m_tail->m_next = m;
+    m_tail = m;
+  }
+
+  if ( m_tailp != NULL ) {
+    *m_tailp = m_tail;
+  }
+
+  return m_head;
+}
+
+static int vtnet_rxq_replace_nomgr(
+  struct vtnet_softc *sc,
+  struct mbuf        *m0,
+  int                 len0
+)
+{
+  struct mbuf *m, *m_prev, *m_new, *m_tail;
+  int          len, clsize, nreplace, error;
+
+  clsize = sc->vtnet_rx_clsize;
+  m_prev = NULL;
+  m_tail = NULL;
+  nreplace = 0;
+  m = m0;
+  len = len0;
+
+  while ( len > 0 ) {
+    if ( m == NULL ) {
+      return EMSGSIZE;
+    }
+
+    m->m_len = ( m->m_len > len ) ? len : m->m_len;
+    len -= m->m_len;
+
+    m_prev = m;
+    m = m->m_next;
+    nreplace++;
+  }
+
+  m_new = vtnet_rxq_alloc_mbuf( sc, nreplace, &m_tail );
+
+  if ( m_new == NULL ) {
+    m_prev->m_len = clsize;
+
+    return ENOBUFS;
+  }
+
+  if ( m_prev->m_next != NULL ) {
+    m_tail->m_next = m_prev->m_next;
+    m_prev->m_next = NULL;
+  }
+
+  error = vtnet_rxq_enqueue( sc, m_new );
+
+  if ( error ) {
+    if ( m_tail->m_next != NULL ) {
+      m_prev->m_next = m_tail->m_next;
+      m_tail->m_next = NULL;
+    }
+
+    m_prev->m_len = clsize;
+    m_freem( m_new );
+  }
+
+  return error;
+}
+
+static void vtnet_start( struct ifnet *ifp )
+{
+  rtems_interrupt_level level;
+  struct vtnet_softc   *sc;
+  struct virtqueue     *vq;
+  struct mbuf          *m;
+  int                   enq, error;
+
+  sc = &vtnet_softc;
+  vq = sc->vtnet_txvq;
+  enq = 0;
+
+  rtems_interrupt_disable( level );
+
+  if ( ( ifp->if_flags & IFF_RUNNING ) == 0 || sc->vtnet_link_active == 0 )
{
+    rtems_interrupt_enable( level );
+
+    return;
+  }
+
+  vtnet_txq_eof( sc );
+
+  while ( ifp->if_snd.ifq_head != NULL ) {
+    IF_DEQUEUE( &ifp->if_snd, m );
+
+    if ( m == NULL ) {
+      break;
+    }
+
+    if ( ( error = vtnet_txq_enqueue( sc, m ) ) != 0 ) {
+      if ( m != NULL ) {
+        IF_PREPEND( &ifp->if_snd, m );
+      }
+
+      break;
+    }
+
+    enq++;
+  }
+
+  if ( enq > 0 ) {
+    vtpci_notify( &sc->vtpci_softc, vq );
+    sc->vtnet_tx_watchdog = VTNET_TX_TIMEOUT;
+  }
+
+  rtems_interrupt_enable( level );
+}
+
+static void vtnet_txq_eof( struct vtnet_softc *sc )
+{
+  struct virtqueue *vq;
+  struct mbuf      *m;
+
+  vq = sc->vtnet_txvq;
+
+  while ( ( m = virtqueue_dequeue( vq, NULL ) ) != NULL ) {
+    m_freem( m );
+  }
+
+  if ( vq->vq_nentries == vq->vq_free_cnt ) {
+    sc->vtnet_tx_watchdog = 0;
+  }
+}
+
+static int vtnet_txq_enqueue(
+  struct vtnet_softc *sc,
+  struct mbuf        *m_head
+)
+{
+  struct virtqueue     *vq;
+  struct vring_desc    *desc, *dp;
+  struct vq_desc_extra *dxp;
+  struct mbuf          *m;
+  uint16_t              head_idx;
+  uint8_t               segs;
+
+  vq = sc->vtnet_txvq;
+  desc = vq->vq_ring.desc;
+
+  if ( vq->vq_free_cnt == 0 ) {
+    return ENOSPC;
+  }
+
+  segs = 1;
+
+  for ( m = m_head; m != NULL; m = m->m_next ) {
+    segs++;
+  }
+
+  if ( segs == 1 ) {
+    return EINVAL;
+  }
+
+  if ( vq->vq_free_cnt < segs ) {
+    return EMSGSIZE;
+  }
+
+  head_idx = vq->vq_desc_head_idx;
+  dxp = &vq->vq_descx[ head_idx ];
+  dxp->cookie = m_head;
+  dxp->ndescs = segs;
+
+  /* First desc of chain must be vtnet tx hdr */
+  dp = &desc[ head_idx ];
+  dp->addr = (uint32_t) &sc->vtnet_net_hdr;
+  dp->len = sizeof( struct virtio_net_hdr );
+  dp->flags |= VRING_DESC_F_NEXT;
+
+  /* Link rest mbuf to chain */
+  for ( m = m_head; m != NULL; m = m->m_next ) {
+    dp = &desc[ dp->next ];
+    dp->addr = mtod( m, uint32_t );
+    dp->len = m->m_len;
+    dp->flags |= VRING_DESC_F_NEXT;
+  }
+
+  dp->flags &= ~( VRING_DESC_F_NEXT );
+
+  vq->vq_desc_head_idx = dp->next;
+  vq->vq_free_cnt -= segs;
+
+  vq_ring_update_avail( vq, head_idx );
+
+  return 0;
+}
+
+static void vtnet_negotiate_features( struct vtnet_softc *sc )
+{
+  uint32_t host_features;
+  uint32_t guest_features;
+
+  host_features =
+    vtpci_io_read_4( &sc->vtpci_softc, VIRTIO_PCI_HOST_FEATURES );
+  guest_features = host_features & 0xfffff;
+  guest_features &= VTNET_FEATURES;
+
+  vtpci_io_write_4( &sc->vtpci_softc, VIRTIO_PCI_GUEST_FEATURES,
+    guest_features );
+  guest_features =
+    vtpci_io_read_4( &sc->vtpci_softc, VIRTIO_PCI_GUEST_FEATURES );
+
+  printk( "\tHost_features:\t0x%08x\n", host_features );
+  printk( "\tGuest_features:\t0x%08x\n", guest_features );
+
+  sc->vtnet_hdr_size = sizeof( struct virtio_net_hdr ) + ETHER_HDR_LEN;
+}
+
+static void vtnet_callout_stop( struct vtnet_softc *sc )
+{
+  if ( sc->stat_ch == vtnet_timeout_running ) {
+    sc->stat_ch = vtnet_timeout_stop_rq;
+
+    while ( sc->stat_ch != vtnet_timeout_stopped ) {
+      rtems_bsdnet_semaphore_release();
+      rtems_task_wake_after( vtnet_ticksPerSecond );
+      rtems_bsdnet_semaphore_obtain();
+    }
+  }
+}
+
+static void vtnet_callout_reset( struct vtnet_softc *sc )
+{
+  if ( sc->stat_ch == vtnet_timeout_running ) {
+    timeout( vtnet_tick, sc, hz );
+  } else if ( sc->stat_ch == vtnet_timeout_stop_rq ) {
+    sc->stat_ch = vtnet_timeout_stopped;
+  }
+}
+
+static int vtnet_init_virtqueue(
+  struct vtnet_softc *sc,
+  struct virtqueue   *vq
+)
+{
+  struct vq_desc_extra *dxp;
+  int                   i;
+
+  vq->vq_desc_head_idx = 0;
+  vq->vq_used_cons_idx = 0;
+  vq->vq_queued_cnt = 0;
+  vq->vq_free_cnt = vq->vq_nentries;
+
+  memset( vq->vq_ring_mem, 0, vq->vq_ring_size );
+
+  for ( i = 0; i < vq->vq_nentries; i++ ) {
+    dxp = &vq->vq_descx[ i ];
+    dxp->cookie = NULL;
+    dxp->ndescs = 0;
+    dxp->indirect = NULL;
+  }
+
+  vq_ring_init( vq );
+  virtqueue_disable_intr( vq );
+
+  return 0;
+}
+
+static int vtnet_rxq_populate( struct vtnet_softc *sc )
+{
+  struct virtqueue *rxvq;
+  struct mbuf      *m;
+  int               nbufs, error;
+
+  rxvq = sc->vtnet_rxvq;
+
+  for ( nbufs = 0; rxvq->vq_free_cnt; nbufs++ ) {
+    m = vtnet_rxq_alloc_mbuf( sc, 1, NULL );
+
+    if ( m == NULL ) {
+      return ENOBUFS;
+    }
+
+    error = vtnet_rxq_enqueue( sc, m );
+
+    if ( error ) {
+      m_freem( m );
+    }
+  }
+
+  if ( nbufs > 0 ) {
+    vtpci_notify( &sc->vtpci_softc, rxvq );
+
+    if ( error == EMSGSIZE ) {
+      error = 0;
+    }
+  }
+
+  return error;
+}
+
+static int vtnet_ioctl(
+  struct ifnet   *ifp,
+  ioctl_command_t cmd,
+  caddr_t         data
+)
+{
+  int error;
+
+  error = 0;
+
+  switch ( cmd ) {
+    case SIOCSIFMTU:
+      /* TODO: vtnet_change_mtu */
+      break;
+    case SIOCSIFFLAGS:
+      vtnet_init( ifp->if_softc );
+      break;
+    case SIOCADDMULTI:
+    case SIOCDELMULTI:
+      /* TODO: vtnet_rx_filter_mac */
+      break;
+    default:
+      error = ether_ioctl( ifp, cmd, data );
+      break;
+  }
+
+  return error;
+}
+
+static void vtnet_tick( void *xsc )
+{
+  struct vtnet_softc *sc;
+  struct ifnet       *ifp;
+  int                 timedout;
+
+  sc = &vtnet_softc;
+  ifp = &sc->arpcom.ac_if;
+
+  timedout = vtnet_watchdog( sc );
+
+  if ( timedout != 0 ) {
+    ifp->if_flags &= ~IFF_RUNNING;
+    vtnet_init( sc );
+  } else {
+    vtnet_callout_reset( sc );
+  }
+}
+
+static int vtnet_watchdog( struct vtnet_softc *sc )
+{
+  if ( sc->vtnet_tx_watchdog == 0 || --sc->vtnet_tx_watchdog ) {
+    return 0;
+  }
+
+  return 1;
+}
+
+static rtems_isr vtnet_intr( rtems_vector_number v )
+{
+  struct vtnet_softc *sc;
+  struct ifnet       *ifp;
+  uint8_t             isr;
+
+  sc = &vtnet_softc;
+  ifp = &sc->arpcom.ac_if;
+
+  isr = vtpci_io_read_1( &sc->vtpci_softc, VIRTIO_PCI_ISR );
+
+  if ( isr & VIRTIO_PCI_ISR_INTR ) {
+    if ( ( ifp->if_flags & IFF_RUNNING ) == 0 ) {
+      return;
+    }
+
+    rtems_bsdnet_event_send( sc->daemonTid, RTEMS_EVENT_1 );
+  } else if ( isr & VIRTIO_PCI_ISR_CONFIG ) {
+    /*
+     *	TODO: Handling of config interrupt
+     */
+  }
+}
+
+static void vtnet_daemon( void *xsc )
+{
+  struct vtnet_softc *sc;
+  struct ifnet       *ifp;
+  struct virtqueue   *rxvq;
+  struct virtqueue   *txvq;
+  int                 more, tries;
+  rtems_event_set     events;
+
+  sc = &vtnet_softc;
+  ifp = &sc->arpcom.ac_if;
+  rxvq = sc->vtnet_rxvq;
+  txvq = sc->vtnet_txvq;
+
+  while ( 1 ) {
+    rtems_bsdnet_event_receive( RTEMS_EVENT_1,
+      RTEMS_WAIT | RTEMS_EVENT_ANY,
+      RTEMS_NO_TIMEOUT,
+      &events );
+
+    /* tx intr */
+    tries = 0;
+againtx:
+    vtnet_txq_eof( sc );
+
+    if ( ifp->if_snd.ifq_head != NULL ) {
+      vtnet_start( ifp );
+    }
+
+    if ( virtqueue_postpone_intr( txvq, VQ_POSTPONE_LONG ) != 0 ) {
+      virtqueue_disable_intr( txvq );
+
+      if ( tries++ < VTNET_INTR_DISABLE_RETRIES ) {
+        goto againtx;
+      }
+    }
+
+    /* rx intr */
+    tries = 0;
+againrx:
+    more = vtnet_rxq_eof( sc );
+
+    if ( more || vq_ring_enable_interrupt( rxvq, 0 ) != 0 ) {
+      if ( !more ) {
+        virtqueue_disable_intr( rxvq );
+      }
+
+      if ( tries++ < VTNET_INTR_DISABLE_RETRIES ) {
+        goto againrx;
+      }
+    }
+  }
+}
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.h
b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.h
new file mode 100644
index 0000000..7f81bad
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/if_vtnet.h
@@ -0,0 +1,181 @@
+/**
+ * @file if_vtnet.h
+ * @brief Header for if_vtnet.c
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/network/virtio_net.h 255111
2013-09-01 04:23:54Z bryanv $
+ */
+
+#ifndef _IF_VTNET_H_
+#define _IF_VTNET_H_
+
+/* The feature bitmap for virtio net */
+#define VIRTIO_NET_F_CSUM 0x00001       /* Host handles pkts w/ partial
csum */
+#define VIRTIO_NET_F_GUEST_CSUM 0x00002 /* Guest handles pkts w/ partial
csum*/
+#define VIRTIO_NET_F_MAC 0x00020        /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO 0x00040        /* Host handles pkts w/ any GSO
type */
+#define VIRTIO_NET_F_GUEST_TSO4 0x00080 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 0x00100 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 0x00200  /* Guest can handle TSO[6] w/ ECN
in.*/
+#define VIRTIO_NET_F_GUEST_UFO 0x00400  /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 0x00800  /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 0x01000  /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 0x02000   /* Host can handle TSO[6] w/ ECN
in. */
+#define VIRTIO_NET_F_HOST_UFO 0x04000   /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 0x08000  /* Host can merge receive buffers.
*/
+#define VIRTIO_NET_F_STATUS 0x10000     /* virtio_net_config.status
available*/
+#define VIRTIO_NET_F_CTRL_VQ 0x20000    /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 0x40000    /* Control channel RX mode support
*/
+#define VIRTIO_NET_F_CTRL_VLAN 0x80000  /* Control channel VLAN filtering
*/
+
+#define VTNET_FEATURES \
+  ( VIRTIO_NET_F_CSUM | \
+    VIRTIO_NET_F_MAC | \
+    VIRTIO_NET_F_GSO | \
+    VIRTIO_NET_F_HOST_TSO4 | \
+    VIRTIO_NET_F_HOST_TSO6 | \
+    VIRTIO_NET_F_HOST_ECN | \
+    VIRTIO_NET_F_HOST_UFO )
+
+#define VTNET_RX_MIN_SEGS 2
+#define VTNET_RX_PROCESS_LIMIT 512
+#define VTNET_TX_TIMEOUT 5
+#define VTNET_INTR_DISABLE_RETRIES 4
+
+#define VTNET_DAEMON_STKSIZE 4096
+
+struct virtio_net_hdr {
+  uint8_t flags;
+  uint8_t gso_type;
+  uint16_t hdr_len;
+  uint16_t gso_size;
+  uint16_t csum_start;
+  uint16_t csum_offset;
+};
+
+struct vtnet_tx_header {
+  struct virtio_net_hdr hdr;
+  struct mbuf *vth_mbuf;
+};
+
+struct vtnet_softc {
+  struct arpcom arpcom;
+
+  struct vtpci_softc vtpci_softc;
+  struct virtqueue *vtnet_rxvq;
+  struct virtqueue *vtnet_txvq;
+
+  rtems_id daemonTid;
+
+  struct virtio_net_hdr vtnet_net_hdr;
+  int vtnet_rxvq_size;
+  int vtnet_txvq_size;
+
+  int vtnet_link_active;
+  int vtnet_hdr_size;
+  int vtnet_rx_clsize;
+  int vtnet_tx_watchdog;
+
+  enum { vtnet_timeout_stopped, vtnet_timeout_running,
vtnet_timeout_stop_rq }
+  stat_ch;
+};
+
+static int vtnet_alloc_virtqueues( struct vtnet_softc *sc );
+static int vtnet_alloc_virtqueue(
+  struct virtqueue *vq,
+  uint16_t          num,
+  uint16_t          queue_size
+);
+
+static int vtnet_setup_interface(
+  struct vtnet_softc           *sc,
+  struct rtems_bsdnet_ifconfig *config
+);
+static void vtnet_init( void *xsc );
+static void vtnet_negotiate_features( struct vtnet_softc *sc );
+static int vtnet_init_virtqueue(
+  struct vtnet_softc *sc,
+  struct virtqueue   *vq
+);
+static void vtnet_start( struct ifnet *ifp );
+static int vtnet_ioctl(
+  struct ifnet   *ifp,
+  ioctl_command_t cmd,
+  caddr_t         data
+);
+
+static int vtnet_setup_intr( struct vtpci_softc *sc );
+static rtems_isr vtnet_intr( rtems_vector_number v );
+static void vtnet_daemon( void *xsc );
+
+static void vtnet_tick( void *xsc );
+static int vtnet_watchdog( struct vtnet_softc *sc );
+static void vtnet_callout_stop( struct vtnet_softc *sc );
+static void vtnet_callout_reset( struct vtnet_softc *sc );
+
+static int vtnet_rxq_populate( struct vtnet_softc *sc );
+static int vtnet_rxq_eof( struct vtnet_softc *sc );
+static int vtnet_rxq_enqueue(
+  struct vtnet_softc *sc,
+  struct mbuf        *m_head
+);
+static int vtnet_rxq_replace(
+  struct vtnet_softc *sc,
+  struct mbuf        *m,
+  int                 len
+);
+static struct mbuf *vtnet_rxq_alloc_mbuf(
+  struct vtnet_softc *sc,
+  int                 nbufs,
+  struct mbuf       **m_tailp
+);
+static int vtnet_rxq_replace_nomgr(
+  struct vtnet_softc *sc,
+  struct mbuf        *m0,
+  int                 len0
+);
+
+static void vtnet_txq_eof( struct vtnet_softc *sc );
+static int vtnet_txq_enqueue(
+  struct vtnet_softc *sc,
+  struct mbuf        *m_head
+);
+
+#endif /* _IF_VTNET_H_ */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
b/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
new file mode 100644
index 0000000..85251fa
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio.c
@@ -0,0 +1,220 @@
+/**
+ * @file virtio.c
+ * @brief Functions for virtqueue, vring
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems.h>
+
+#include "virtio.h"
+
+void *virtqueue_dequeue(
+  struct virtqueue *vq,
+  uint32_t         *len
+)
+{
+  struct vring_used_elem *uep;
+  void                   *cookie;
+  uint16_t                used_idx, desc_idx;
+
+  if ( vq->vq_used_cons_idx == vq->vq_ring.used->idx ) {
+    return NULL;
+  }
+
+  used_idx = vq->vq_used_cons_idx++ & ( vq->vq_nentries - 1 );
+  uep = &vq->vq_ring.used->ring[ used_idx ];
+
+  rmb();
+
+  desc_idx = (uint16_t) uep->id;
+
+  if ( len != NULL ) {
+    *len = uep->len;
+  }
+
+  vq_ring_free_chain( vq, desc_idx );
+
+  cookie = vq->vq_descx[ desc_idx ].cookie;
+  vq->vq_descx[ desc_idx ].cookie = NULL;
+
+  return cookie;
+}
+
+void *virtqueue_drain(
+  struct virtqueue *vq,
+  int              *last
+)
+{
+  void *cookie;
+  int   idx;
+
+  cookie = NULL;
+  idx = *last;
+
+  while ( idx < vq->vq_nentries && cookie == NULL ) {
+    if ( ( cookie = vq->vq_descx[ idx ].cookie ) != NULL ) {
+      vq->vq_descx[ idx ].cookie = NULL;
+      vq_ring_free_chain( vq, idx );
+    }
+
+    idx++;
+  }
+
+  *last = idx;
+
+  return cookie;
+}
+
+void virtqueue_disable_intr( struct virtqueue *vq )
+{
+  vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+int virtqueue_postpone_intr(
+  struct virtqueue *vq,
+  vq_postpone_t     hint
+)
+{
+  uint16_t ndesc, avail_idx;
+
+  avail_idx = vq->vq_ring.avail->idx;
+  ndesc = (uint16_t) ( avail_idx - vq->vq_used_cons_idx );
+
+  switch ( hint ) {
+    case VQ_POSTPONE_SHORT:
+      ndesc = ndesc / 4;
+      break;
+    case VQ_POSTPONE_LONG:
+      ndesc = ( ndesc * 3 ) / 4;
+      break;
+    case VQ_POSTPONE_EMPTIED:
+      break;
+  }
+
+  return vq_ring_enable_interrupt( vq, ndesc );
+}
+
+void vq_ring_init( struct virtqueue *vq )
+{
+  struct vring *vr;
+  uint8_t      *ring_mem;
+  int           i, size;
+
+  ring_mem = vq->vq_ring_mem;
+  size = vq->vq_nentries;
+  vr = &vq->vq_ring;
+
+  vr->num = size;
+  vr->desc = (struct vring_desc *) ring_mem;
+  vr->avail =
+    (struct vring_avail *) ( ring_mem + size * sizeof( struct vring_desc )
);
+  vr->used = (void *)
+             ( ( (unsigned long) &vr->avail->ring[ size ] +
vq->vq_alignment -
+                 1 ) & ~( vq->vq_alignment - 1 ) );
+
+  for ( i = 0; i < size - 1; i++ ) {
+    vr->desc[ i ].next = i + 1;
+  }
+
+  vr->desc[ i ].next = VQ_RING_DESC_CHAIN_END;
+}
+
+void vq_ring_free_chain(
+  struct virtqueue *vq,
+  uint16_t          desc_idx
+)
+{
+  struct vring_desc    *dp;
+  struct vq_desc_extra *dxp;
+
+  dp = &vq->vq_ring.desc[ desc_idx ];
+  dxp = &vq->vq_descx[ desc_idx ];
+
+  vq->vq_free_cnt += dxp->ndescs;
+  dxp->ndescs--;
+
+  while ( dp->flags & VRING_DESC_F_NEXT ) {
+    dp = &vq->vq_ring.desc[ dp->next ];
+    dxp->ndescs--;
+  }
+
+  dp->next = vq->vq_desc_head_idx;
+  vq->vq_desc_head_idx = desc_idx;
+}
+
+void vq_ring_update_avail(
+  struct virtqueue *vq,
+  uint16_t          desc_idx
+)
+{
+  uint16_t avail_idx;
+
+  avail_idx = vq->vq_ring.avail->idx & ( vq->vq_nentries - 1 );
+  vq->vq_ring.avail->ring[ avail_idx ] = desc_idx;
+
+  wmb();
+
+  vq->vq_ring.avail->idx++;
+  vq->vq_queued_cnt++;
+}
+
+int vq_ring_must_notify_host( struct virtqueue *vq )
+{
+  return ( ( vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY ) == 0 );
+}
+
+int vq_ring_enable_interrupt(
+  struct virtqueue *vq,
+  uint16_t          ndesc
+)
+{
+  uint16_t used_idx, nused;
+
+  vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+
+  mb();
+
+  used_idx = vq->vq_ring.used->idx;
+  nused = (uint16_t) ( used_idx - vq->vq_used_cons_idx );
+
+  if ( nused > ndesc ) {
+    return 1;
+  }
+
+  return 0;
+}
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
new file mode 100644
index 0000000..47a61e8
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio.h
@@ -0,0 +1,167 @@
+/**
+ * @file virtio.h
+ * @brief Header for virtio.c
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/virtio.h 252708 2013-07-04
17:59:09Z bryanv $
+ */
+
+#ifndef _VIRTIO_H_
+#define _VIRTIO_H_
+
+#define mb() asm volatile ( "lock; addl $0,0(%%esp) " ::: "memory" );
+#define rmb() mb()
+#define wmb() asm volatile ( "lock; addl $0, (%%esp)" ::: "memory", "cc" )
+
+#define vring_used_event( vr ) ( ( vr )->avail->ring[ ( vr )->num ] )
+#define vring_avail_event( vr ) ( *(uint16_t *) &( vr )->used->ring[ ( vr
)-> \
+                                                                     num ]
)
+#define vring_need_event( event_idx, new_idx, old ) \
+  ( (uint16_t) ( new_idx - event_idx - 1 ) < (uint16_t) ( new_idx - old ) )
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_VENDOR_ID 0x1AF4
+#define VIRTIO_DEVICE_ID_MIN 0x1000
+#define VIRTIO_DEVICE_ID_MAX 0x1040
+
+#define VRING_DESC_F_NEXT 1
+#define VRING_DESC_F_WRITE 2
+#define VRING_DESC_F_INDIRECT 4
+#define VRING_USED_F_NO_NOTIFY 1
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+#define VQ_RING_DESC_CHAIN_END 32768
+
+typedef enum {
+  VQ_POSTPONE_SHORT,
+  VQ_POSTPONE_LONG,
+  VQ_POSTPONE_EMPTIED
+} vq_postpone_t;
+
+struct vring_desc {
+  uint64_t addr;
+  uint32_t len;
+  uint16_t flags;
+  uint16_t next;
+};
+
+struct vring_avail {
+  uint16_t flags;
+  uint16_t idx;
+  uint16_t ring[];
+};
+
+struct vring_used_elem {
+  uint32_t id;
+  uint32_t len;
+};
+
+struct vring_used {
+  uint16_t flags;
+  uint16_t idx;
+  struct vring_used_elem ring[];
+};
+
+struct vring {
+  unsigned int num;
+  struct vring_desc *desc;
+  struct vring_avail *avail;
+  struct vring_used *used;
+};
+
+struct vq_desc_extra {
+  void *cookie;
+  struct vring_desc *indirect;
+  uint32_t indirect_paddr;
+  uint16_t ndescs;
+};
+
+struct virtqueue {
+  uint16_t vq_queue_index;
+  uint16_t vq_nentries;
+  uint32_t vq_flags;
+
+  uint16_t vq_free_cnt;
+  uint16_t vq_queued_cnt;
+
+  int vq_alignment;
+  int vq_ring_size;
+
+  struct vring vq_ring;
+  void *vq_ring_mem;
+  int vq_max_indirect_size;
+  int vq_indirect_mem_size;
+
+  uint16_t vq_desc_head_idx;
+  uint16_t vq_used_cons_idx;
+
+  struct vq_desc_extra vq_descx[ 0 ];
+};
+
+void *virtqueue_dequeue(
+  struct virtqueue *vq,
+  uint32_t         *len
+);
+void *virtqueue_drain(
+  struct virtqueue *vq,
+  int              *last
+);
+void virtqueue_disable_intr( struct virtqueue *vq );
+int virtqueue_postpone_intr(
+  struct virtqueue *vq,
+  vq_postpone_t     hint
+);
+
+void vq_ring_init( struct virtqueue *vq );
+void vq_ring_free_chain(
+  struct virtqueue *vq,
+  uint16_t          desc_idx
+);
+void vq_ring_update_avail(
+  struct virtqueue *vq,
+  uint16_t          desc_idx
+);
+int vq_ring_must_notify_host( struct virtqueue *vq );
+int vq_ring_enable_interrupt(
+  struct virtqueue *vq,
+  uint16_t          ndesc
+);
+
+#endif /* _VIRTIO_H_ */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
new file mode 100644
index 0000000..8ba9365
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.c
@@ -0,0 +1,176 @@
+/**
+ * @file virtio_pci.c
+ * @brief Driver for the virtio PCI interface
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * Copyright (c) 2011, Bryan Venteicher <bryanv at FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Driver for the VirtIO PCI interface. */
+
+#ifdef __i386__
+
+#include <rtems.h>
+#include <rtems/rtems_bsdnet.h>
+#include <bsp.h>
+
+#include <pcibios.h>
+
+#include "virtio.h"
+#include "virtio_pci.h"
+
+__inline uint8_t vtpci_io_read_1(
+  struct vtpci_softc *sc,
+  int                 reg
+)
+{
+  uint8_t val;
+
+  inport_byte( sc->pci_io_base + reg, val );
+
+  return val;
+}
+__inline uint16_t vtpci_io_read_2(
+  struct vtpci_softc *sc,
+  int                 reg
+)
+{
+  uint16_t val;
+
+  inport_word( sc->pci_io_base + reg, val );
+
+  return val;
+}
+__inline uint32_t vtpci_io_read_4(
+  struct vtpci_softc *sc,
+  int                 reg
+)
+{
+  uint32_t val;
+
+  inport_long( sc->pci_io_base + reg, val );
+
+  return val;
+}
+__inline void vtpci_io_write_1(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint8_t             val
+)
+{
+  outport_byte( sc->pci_io_base + reg, val );
+}
+__inline void vtpci_io_write_2(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint16_t            val
+)
+{
+  outport_word( sc->pci_io_base + reg, val );
+}
+__inline void vtpci_io_write_4(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint32_t            val
+)
+{
+  outport_long( sc->pci_io_base + reg, val );
+}
+
+int vtpci_attach(
+  struct rtems_bsdnet_ifconfig *config,
+  struct vtpci_softc           *sc
+)
+{
+  int      i, ret;
+  uint8_t  val8;
+  uint16_t val16;
+  uint32_t val32;
+
+  /* Parse NIC_NAME & Init structures */
+  if ( ( sc->unit_number =
+           rtems_bsdnet_parse_driver_name( config, &sc->unit_name ) ) < 0 )
{
+    return 0;
+  }
+
+  /* Find device on pci bus */
+  {
+    int pbus, pdev, pfun;
+
+    for ( i = VIRTIO_DEVICE_ID_MIN; i < VIRTIO_DEVICE_ID_MAX; i++ ) {
+      ret = pci_find_device( VIRTIO_VENDOR_ID, i, sc->unit_number,
+        &pbus, &pdev, &pfun );
+
+      if ( ret == PCIB_ERR_SUCCESS ) {
+        sc->pci_signature = PCIB_DEVSIG_MAKE( pbus, pdev, pfun );
+        break;
+      }
+    }
+  }
+
+  /* Enable bus matering */
+  pcib_conf_read16( sc->pci_signature, PCI_COMMAND, &val16 );
+  val16 |= PCI_COMMAND_MASTER;
+  pcib_conf_write16( sc->pci_signature, PCI_COMMAND, val16 );
+
+  /* Set latency timer */
+  pcib_conf_read8( sc->pci_signature, PCI_LATENCY_TIMER, &val8 );
+  val8 |= 0x00;
+  pcib_conf_write8( sc->pci_signature, PCI_LATENCY_TIMER, val8 );
+
+  /* Get IO Address */
+  pcib_conf_read32( sc->pci_signature, PCI_BASE_ADDRESS_0, &val32 );
+  val32 &= PCI_BASE_ADDRESS_IO_MASK;
+  sc->pci_io_base = val32;
+
+  return 0;
+}
+
+void vtpci_notify(
+  struct vtpci_softc *sc,
+  struct virtqueue   *vq
+)
+{
+  mb();
+
+  if ( vq_ring_must_notify_host( vq ) ) {
+    vtpci_io_write_2( sc, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index );
+  }
+
+  vq->vq_queued_cnt = 0;
+}
+
+#endif /* __i386__ */
diff --git a/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
new file mode 100644
index 0000000..7d0db9e
--- /dev/null
+++ b/c/src/lib/libbsp/i386/pc386/virtio/virtio_pci.h
@@ -0,0 +1,131 @@
+/**
+ * @file virtio_pci.h
+ * @brief Header for virtio_pci.c
+ */
+
+/*
+ * Copyright (c) 2016 Jin-Hyun Kim <jinhyun at konkuk.ac.kr>
+ *   and Hyun-Wook Jin <jinh at konkuk.ac.kr>
+ * Ported from FreeBSD to RTEMS March 16, http://sslab.konkuk.ac.kr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/*-
+ * Copyright IBM Corp. 2007
+ *
+ * Authors:
+ *  Anthony Liguori  <aliguori at us.ibm.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions to
implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY `
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/10.0.0/sys/dev/virtio/pci/virtio_pci.h 238360
2012-07-11 02:57:19Z grehan $
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+/* VirtIO Header, located in BAR 0. */
+#define VIRTIO_PCI_HOST_FEATURES 0   /* host's supported features (32bit,
RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4  /* guest's supported features (32, RW)
*/
+#define VIRTIO_PCI_QUEUE_PFN 8       /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12      /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14      /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16   /* notify host regarding VQ (16, RW)
*/
+#define VIRTIO_PCI_STATUS 18         /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19            /* interrupt status register, reading
*/
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+#define VIRTIO_PCI_QUEUE_SEL_RX 0
+#define VIRTIO_PCI_QUEUE_SEL_TX 1
+
+#define VIRTIO_PCI_ISR_INTR 0x1
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* How many bits to shift physical queue address written to QUEUE_PFN */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+struct vtpci_softc {
+  int unit_number;
+  char *unit_name;
+
+  int pci_signature;
+  uint32_t pci_io_base;
+};
+
+__inline uint8_t vtpci_io_read_1(
+  struct vtpci_softc *sc,
+  int                 reg
+);
+__inline uint16_t vtpci_io_read_2(
+  struct vtpci_softc *sc,
+  int                 reg
+);
+__inline uint32_t vtpci_io_read_4(
+  struct vtpci_softc *sc,
+  int                 reg
+);
+__inline void vtpci_io_write_1(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint8_t             val
+);
+__inline void vtpci_io_write_2(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint16_t            val
+);
+__inline void vtpci_io_write_4(
+  struct vtpci_softc *sc,
+  int                 reg,
+  uint32_t            val
+);
+int vtpci_attach(
+  struct rtems_bsdnet_ifconfig *config,
+  struct vtpci_softc           *sc
+);
+void vtpci_notify(
+  struct vtpci_softc *sc,
+  struct virtqueue   *vq
+);
+
+#endif /* _VIRTIO_PCI_H_ */





More information about the devel mailing list