2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
34 #include <sys/select.h>
36 #include <sys/ioctl.h>
49 #include <pthread_np.h>
56 #define VTNET_RINGSZ 1024
58 #define VTNET_MAXSEGS 32
61 * PCI config-space register offsets
63 #define VTNET_R_CFG0 24
64 #define VTNET_R_CFG1 25
65 #define VTNET_R_CFG2 26
66 #define VTNET_R_CFG3 27
67 #define VTNET_R_CFG4 28
68 #define VTNET_R_CFG5 29
69 #define VTNET_R_CFG6 30
70 #define VTNET_R_CFG7 31
71 #define VTNET_R_MAX 31
73 #define VTNET_REGSZ VTNET_R_MAX+1
78 #define VTNET_S_HOSTCAPS \
79 ( 0x00000020 | /* host supplies MAC */ \
80 0x00008000 | /* host can merge Rx buffers */ \
81 0x00010000 ) /* config status available */
92 static int use_msix = 1;
97 uint16_t hq_cur_aidx; /* trails behind 'avail_idx' */
99 /* Host-context pointers to the queue */
100 struct virtio_desc *hq_dtable;
101 uint16_t *hq_avail_flags;
102 uint16_t *hq_avail_idx; /* monotonically increasing */
103 uint16_t *hq_avail_ring;
105 uint16_t *hq_used_flags;
106 uint16_t *hq_used_idx; /* monotonically increasing */
107 struct virtio_used *hq_used_ring;
111 * Fixed network header size
113 struct virtio_net_rxhdr {
115 uint8_t vrh_gso_type;
116 uint16_t vrh_hdr_len;
117 uint16_t vrh_gso_size;
118 uint16_t vrh_csum_start;
119 uint16_t vrh_csum_offset;
126 static int pci_vtnet_debug;
127 #define DPRINTF(params) if (pci_vtnet_debug) printf params
128 #define WPRINTF(params) printf params
133 struct pci_vtnet_softc {
134 struct pci_devinst *vsc_pi;
135 pthread_mutex_t vsc_mtx;
136 struct mevent *vsc_mevp;
145 uint32_t vsc_features;
146 uint8_t vsc_macaddr[6];
148 uint64_t vsc_pfn[VTNET_MAXQ];
149 struct vring_hqueue vsc_hq[VTNET_MAXQ];
150 uint16_t vsc_msix_table_idx[VTNET_MAXQ];
152 pthread_mutex_t rx_mtx;
156 pthread_mutex_t tx_mtx;
157 pthread_cond_t tx_cond;
160 #define vtnet_ctx(sc) ((sc)->vsc_pi->pi_vmctx)
163 * Return the size of IO BAR that maps virtio header and device specific
164 * region. The size would vary depending on whether MSI-X is enabled or
168 pci_vtnet_iosize(struct pci_devinst *pi)
170 if (pci_msix_enabled(pi))
171 return (VTNET_REGSZ);
173 return (VTNET_REGSZ - (VTCFG_R_CFG1 - VTCFG_R_MSIX));
177 * Return the number of available descriptors in the vring taking care
178 * of the 16-bit index wraparound.
181 hq_num_avail(struct vring_hqueue *hq)
186 * We're just computing (a-b) mod 2^16
188 * The only glitch here is that in standard C,
189 * uint16_t promotes to (signed) int when int has
190 * more than 16 bits (pretty much always now), so
191 * we have to force it back to unsigned.
193 ndesc = (unsigned)*hq->hq_avail_idx - (unsigned)hq->hq_cur_aidx;
195 assert(ndesc <= hq->hq_size);
201 pci_vtnet_qsize(int qnum)
203 /* XXX no ctl queue currently */
204 if (qnum == VTNET_CTLQ) {
208 /* XXX fixed currently. Maybe different for tx/rx/ctl */
209 return (VTNET_RINGSZ);
213 pci_vtnet_ring_reset(struct pci_vtnet_softc *sc, int ring)
215 struct vring_hqueue *hq;
217 assert(ring < VTNET_MAXQ);
219 hq = &sc->vsc_hq[ring];
222 * Reset all soft state
228 * If the transmit thread is active then stall until it is done.
231 pci_vtnet_txwait(struct pci_vtnet_softc *sc)
234 pthread_mutex_lock(&sc->tx_mtx);
235 while (sc->tx_in_progress) {
236 pthread_mutex_unlock(&sc->tx_mtx);
238 pthread_mutex_lock(&sc->tx_mtx);
240 pthread_mutex_unlock(&sc->tx_mtx);
244 * If the receive thread is active then stall until it is done.
247 pci_vtnet_rxwait(struct pci_vtnet_softc *sc)
250 pthread_mutex_lock(&sc->rx_mtx);
251 while (sc->rx_in_progress) {
252 pthread_mutex_unlock(&sc->rx_mtx);
254 pthread_mutex_lock(&sc->rx_mtx);
256 pthread_mutex_unlock(&sc->rx_mtx);
260 pci_vtnet_update_status(struct pci_vtnet_softc *sc, uint32_t value)
264 DPRINTF(("vtnet: device reset requested !\n"));
269 * Wait for the transmit and receive threads to finish their
272 pci_vtnet_txwait(sc);
273 pci_vtnet_rxwait(sc);
275 sc->vsc_rx_ready = 0;
276 pci_vtnet_ring_reset(sc, VTNET_RXQ);
277 pci_vtnet_ring_reset(sc, VTNET_TXQ);
282 sc->vsc_status = value;
286 * Called to send a buffer chain out to the tap device
289 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
294 if (sc->vsc_tapfd == -1)
298 * If the length is < 60, pad out to that and add the
299 * extra zero'd segment to the iov. It is guaranteed that
300 * there is always an extra iov available by the caller.
303 memset(pad, 0, 60 - len);
304 iov[iovcnt].iov_base = pad;
305 iov[iovcnt].iov_len = 60 - len;
308 (void) writev(sc->vsc_tapfd, iov, iovcnt);
312 * Called when there is read activity on the tap file descriptor.
313 * Each buffer posted by the guest is assumed to be able to contain
314 * an entire ethernet frame + rx header.
315 * MP note: the dummybuf is only used for discarding frames, so there
316 * is no need for it to be per-vtnet or locked.
318 static uint8_t dummybuf[2048];
321 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
323 struct virtio_desc *vd;
324 struct virtio_used *vu;
325 struct vring_hqueue *hq;
326 struct virtio_net_rxhdr *vrx;
331 int didx, uidx, aidx; /* descriptor, avail and used index */
334 * Should never be called without a valid tap fd
336 assert(sc->vsc_tapfd != -1);
339 * But, will be called when the rx ring hasn't yet
340 * been set up or the guest is resetting the device.
342 if (!sc->vsc_rx_ready || sc->resetting) {
344 * Drop the packet and try later.
346 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
351 * Calculate the number of available rx buffers
353 hq = &sc->vsc_hq[VTNET_RXQ];
355 ndescs = hq_num_avail(hq);
359 * Drop the packet and try later
361 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
365 aidx = hq->hq_cur_aidx;
366 uidx = *hq->hq_used_idx;
367 for (i = 0; i < ndescs; i++) {
369 * 'aidx' indexes into the an array of descriptor indexes
371 didx = hq->hq_avail_ring[aidx % hq->hq_size];
372 assert(didx >= 0 && didx < hq->hq_size);
374 vd = &hq->hq_dtable[didx];
377 * Get a pointer to the rx header, and use the
378 * data immediately following it for the packet buffer.
380 vrx = paddr_guest2host(vtnet_ctx(sc), vd->vd_addr, vd->vd_len);
381 buf = (uint8_t *)(vrx + 1);
383 len = read(sc->vsc_tapfd, buf,
384 vd->vd_len - sizeof(struct virtio_net_rxhdr));
386 if (len < 0 && errno == EWOULDBLOCK) {
391 * The only valid field in the rx packet header is the
392 * number of buffers, which is always 1 without TSO
395 memset(vrx, 0, sizeof(struct virtio_net_rxhdr));
399 * Write this descriptor into the used ring
401 vu = &hq->hq_used_ring[uidx % hq->hq_size];
403 vu->vu_tlen = len + sizeof(struct virtio_net_rxhdr);
409 * Update the used pointer, and signal an interrupt if allowed
411 *hq->hq_used_idx = uidx;
412 hq->hq_cur_aidx = aidx;
414 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
416 pci_generate_msix(sc->vsc_pi,
417 sc->vsc_msix_table_idx[VTNET_RXQ]);
420 pci_generate_msi(sc->vsc_pi, 0);
426 pci_vtnet_tap_callback(int fd, enum ev_type type, void *param)
428 struct pci_vtnet_softc *sc = param;
430 pthread_mutex_lock(&sc->rx_mtx);
431 sc->rx_in_progress = 1;
432 pci_vtnet_tap_rx(sc);
433 sc->rx_in_progress = 0;
434 pthread_mutex_unlock(&sc->rx_mtx);
439 pci_vtnet_ping_rxq(struct pci_vtnet_softc *sc)
442 * A qnotify means that the rx process can now begin
444 if (sc->vsc_rx_ready == 0) {
445 sc->vsc_rx_ready = 1;
450 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vring_hqueue *hq)
452 struct iovec iov[VTNET_MAXSEGS + 1];
453 struct virtio_desc *vd;
454 struct virtio_used *vu;
458 int uidx, aidx, didx;
460 uidx = *hq->hq_used_idx;
461 aidx = hq->hq_cur_aidx;
462 didx = hq->hq_avail_ring[aidx % hq->hq_size];
463 assert(didx >= 0 && didx < hq->hq_size);
465 vd = &hq->hq_dtable[didx];
468 * Run through the chain of descriptors, ignoring the
469 * first header descriptor. However, include the header
470 * length in the total length that will be put into the
474 vd = &hq->hq_dtable[vd->vd_next];
476 for (i = 0, plen = 0;
478 i++, vd = &hq->hq_dtable[vd->vd_next]) {
479 iov[i].iov_base = paddr_guest2host(vtnet_ctx(sc),
480 vd->vd_addr, vd->vd_len);
481 iov[i].iov_len = vd->vd_len;
485 if ((vd->vd_flags & VRING_DESC_F_NEXT) == 0)
488 assert(i < VTNET_MAXSEGS);
490 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, i + 1));
491 pci_vtnet_tap_tx(sc, iov, i + 1, plen);
494 * Return this chain back to the host
496 vu = &hq->hq_used_ring[uidx % hq->hq_size];
499 hq->hq_cur_aidx = aidx + 1;
500 *hq->hq_used_idx = uidx + 1;
504 pci_vtnet_ping_txq(struct pci_vtnet_softc *sc)
506 struct vring_hqueue *hq = &sc->vsc_hq[VTNET_TXQ];
510 * Calculate number of ring entries to process
512 ndescs = hq_num_avail(hq);
517 /* Signal the tx thread for processing */
518 pthread_mutex_lock(&sc->tx_mtx);
519 if (sc->tx_in_progress == 0)
520 pthread_cond_signal(&sc->tx_cond);
521 pthread_mutex_unlock(&sc->tx_mtx);
525 * Thread which will handle processing of TX desc
528 pci_vtnet_tx_thread(void *param)
530 struct pci_vtnet_softc *sc = (struct pci_vtnet_softc *) param;
531 struct vring_hqueue *hq;
532 int i, ndescs, needintr,error;
535 hq = &sc->vsc_hq[VTNET_TXQ];
538 * Let us wait till the tx queue pointers get initialised &
541 pthread_mutex_lock(&sc->tx_mtx);
542 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
546 pthread_mutex_lock(&sc->tx_mtx);
552 ndescs = hq_num_avail(hq);
559 * Generate an interrupt if able
561 if ((*hq->hq_avail_flags &
562 VRING_AVAIL_F_NO_INTERRUPT) == 0) {
564 pci_generate_msix(sc->vsc_pi,
565 sc->vsc_msix_table_idx[VTNET_TXQ]);
568 pci_generate_msi(sc->vsc_pi, 0);
573 sc->tx_in_progress = 0;
574 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
577 sc->tx_in_progress = 1;
578 pthread_mutex_unlock(&sc->tx_mtx);
582 * Run through all the entries, placing them into
583 * iovecs and sending when an end-of-packet is found
585 for (i = 0; i < ndescs; i++)
586 pci_vtnet_proctx(sc, hq);
588 ndescs = hq_num_avail(hq);
594 pci_vtnet_ping_ctlq(struct pci_vtnet_softc *sc)
597 DPRINTF(("vtnet: control qnotify!\n\r"));
601 pci_vtnet_ring_init(struct pci_vtnet_softc *sc, uint64_t pfn)
603 struct vring_hqueue *hq;
604 int qnum = sc->vsc_curq;
606 assert(qnum < VTNET_MAXQ);
608 sc->vsc_pfn[qnum] = pfn << VRING_PFN;
611 * Set up host pointers to the various parts of the
614 hq = &sc->vsc_hq[qnum];
615 hq->hq_size = pci_vtnet_qsize(qnum);
617 hq->hq_dtable = paddr_guest2host(vtnet_ctx(sc), pfn << VRING_PFN,
618 vring_size(hq->hq_size));
619 hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size);
620 hq->hq_avail_idx = hq->hq_avail_flags + 1;
621 hq->hq_avail_ring = hq->hq_avail_flags + 2;
622 hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring,
624 hq->hq_used_idx = hq->hq_used_flags + 1;
625 hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2);
628 * Initialize queue indexes
634 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
637 unsigned char digest[16];
639 char tname[MAXCOMLEN + 1];
640 struct pci_vtnet_softc *sc;
643 sc = malloc(sizeof(struct pci_vtnet_softc));
644 memset(sc, 0, sizeof(struct pci_vtnet_softc));
649 pthread_mutex_init(&sc->vsc_mtx, NULL);
652 * Use MSI if set by user
654 if ((env_msi = getenv("BHYVE_USE_MSI")) != NULL) {
655 if (strcasecmp(env_msi, "yes") == 0)
660 * Attempt to open the tap device
666 strcpy(tbuf, "/dev/");
667 strlcat(tbuf, opts, sizeof(tbuf));
669 sc->vsc_tapfd = open(tbuf, O_RDWR);
670 if (sc->vsc_tapfd == -1) {
671 WPRINTF(("open of tap device %s failed\n", tbuf));
674 * Set non-blocking and register for read
675 * notifications with the event loop
678 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
679 WPRINTF(("tap device O_NONBLOCK failed\n"));
680 close(sc->vsc_tapfd);
684 sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
686 pci_vtnet_tap_callback,
688 if (sc->vsc_mevp == NULL) {
689 WPRINTF(("Could not register event\n"));
690 close(sc->vsc_tapfd);
697 * The MAC address is the standard NetApp OUI of 00-a0-98,
698 * followed by an MD5 of the vm name. The slot/func number is
699 * prepended to this for slots other than 1:0, so that
700 * a bootloader can netboot from the equivalent of slot 1.
702 if (pi->pi_slot == 1 && pi->pi_func == 0) {
703 strncpy(nstr, vmname, sizeof(nstr));
705 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
706 pi->pi_func, vmname);
710 MD5Update(&mdctx, nstr, strlen(nstr));
711 MD5Final(digest, &mdctx);
713 sc->vsc_macaddr[0] = 0x00;
714 sc->vsc_macaddr[1] = 0xa0;
715 sc->vsc_macaddr[2] = 0x98;
716 sc->vsc_macaddr[3] = digest[0];
717 sc->vsc_macaddr[4] = digest[1];
718 sc->vsc_macaddr[5] = digest[2];
720 /* initialize config space */
721 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
722 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
723 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
724 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
730 for (i = 0; i < VTNET_MAXQ; i++)
731 sc->vsc_msix_table_idx[i] = VIRTIO_MSI_NO_VECTOR;
734 * BAR 1 used to map MSI-X table and PBA
736 if (pci_emul_add_msixcap(pi, VTNET_MAXQ, 1))
740 pci_emul_add_msicap(pi, 1);
743 pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTNET_REGSZ);
747 sc->rx_in_progress = 0;
748 pthread_mutex_init(&sc->rx_mtx, NULL);
751 * Initialize tx semaphore & spawn TX processing thread
752 * As of now, only one thread for TX desc processing is
755 sc->tx_in_progress = 0;
756 pthread_mutex_init(&sc->tx_mtx, NULL);
757 pthread_cond_init(&sc->tx_cond, NULL);
758 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
759 snprintf(tname, sizeof(tname), "%s vtnet%d tx", vmname, pi->pi_slot);
760 pthread_set_name_np(sc->tx_tid, tname);
766 * Function pointer array to handle queue notifications
768 static void (*pci_vtnet_qnotify[VTNET_MAXQ])(struct pci_vtnet_softc *) = {
775 vtnet_adjust_offset(struct pci_devinst *pi, uint64_t offset)
778 * Device specific offsets used by guest would change based on
779 * whether MSI-X capability is enabled or not
781 if (!pci_msix_enabled(pi)) {
782 if (offset >= VTCFG_R_MSIX)
783 return (offset + (VTCFG_R_CFG1 - VTCFG_R_MSIX));
790 pci_vtnet_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
791 int baridx, uint64_t offset, int size, uint64_t value)
793 struct pci_vtnet_softc *sc = pi->pi_arg;
797 if (baridx == pci_msix_table_bar(pi) ||
798 baridx == pci_msix_pba_bar(pi)) {
799 pci_emul_msix_twrite(pi, offset, size, value);
806 if (offset + size > pci_vtnet_iosize(pi)) {
807 DPRINTF(("vtnet_write: 2big, offset %ld size %d\n",
812 pthread_mutex_lock(&sc->vsc_mtx);
814 offset = vtnet_adjust_offset(pi, offset);
817 case VTCFG_R_GUESTCAP:
819 sc->vsc_features = value & VTNET_S_HOSTCAPS;
823 pci_vtnet_ring_init(sc, value);
827 assert(value < VTNET_MAXQ);
828 sc->vsc_curq = value;
830 case VTCFG_R_QNOTIFY:
832 assert(value < VTNET_MAXQ);
833 (*pci_vtnet_qnotify[value])(sc);
837 pci_vtnet_update_status(sc, value);
841 sc->vsc_msix_table_idx[VTNET_CTLQ] = value;
845 assert(sc->vsc_curq != VTNET_CTLQ);
846 sc->vsc_msix_table_idx[sc->vsc_curq] = value;
854 assert((size + offset) <= (VTNET_R_CFG5 + 1));
855 ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
857 * The driver is allowed to change the MAC address
859 sc->vsc_macaddr[offset - VTNET_R_CFG0] = value;
861 *(uint8_t *) ptr = value;
862 } else if (size == 2) {
863 *(uint16_t *) ptr = value;
865 *(uint32_t *) ptr = value;
868 case VTCFG_R_HOSTCAP:
873 DPRINTF(("vtnet: write to readonly reg %ld\n\r", offset));
876 DPRINTF(("vtnet: unknown i/o write offset %ld\n\r", offset));
881 pthread_mutex_unlock(&sc->vsc_mtx);
885 pci_vtnet_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
886 int baridx, uint64_t offset, int size)
888 struct pci_vtnet_softc *sc = pi->pi_arg;
893 if (baridx == pci_msix_table_bar(pi) ||
894 baridx == pci_msix_pba_bar(pi)) {
895 return (pci_emul_msix_tread(pi, offset, size));
901 if (offset + size > pci_vtnet_iosize(pi)) {
902 DPRINTF(("vtnet_read: 2big, offset %ld size %d\n",
907 pthread_mutex_lock(&sc->vsc_mtx);
909 offset = vtnet_adjust_offset(pi, offset);
912 case VTCFG_R_HOSTCAP:
914 value = VTNET_S_HOSTCAPS;
916 case VTCFG_R_GUESTCAP:
918 value = sc->vsc_features; /* XXX never read ? */
922 value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN;
926 value = pci_vtnet_qsize(sc->vsc_curq);
930 value = sc->vsc_curq; /* XXX never read ? */
932 case VTCFG_R_QNOTIFY:
934 value = sc->vsc_curq; /* XXX never read ? */
938 value = sc->vsc_status;
943 sc->vsc_isr = 0; /* a read clears this flag */
947 value = sc->vsc_msix_table_idx[VTNET_CTLQ];
951 assert(sc->vsc_curq != VTNET_CTLQ);
952 value = sc->vsc_msix_table_idx[sc->vsc_curq];
960 assert((size + offset) <= (VTNET_R_CFG5 + 1));
961 ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
963 value = *(uint8_t *) ptr;
964 } else if (size == 2) {
965 value = *(uint16_t *) ptr;
967 value = *(uint32_t *) ptr;
972 value = 0x01; /* XXX link always up */
976 value = 0; /* XXX link status in LSB */
979 DPRINTF(("vtnet: unknown i/o read offset %ld\n\r", offset));
984 pthread_mutex_unlock(&sc->vsc_mtx);
989 struct pci_devemu pci_de_vnet = {
990 .pe_emu = "virtio-net",
991 .pe_init = pci_vtnet_init,
992 .pe_barwrite = pci_vtnet_write,
993 .pe_barread = pci_vtnet_read
995 PCI_EMUL_SET(pci_de_vnet);