2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
34 #include <sys/select.h>
36 #include <sys/ioctl.h>
55 #define VTNET_RINGSZ 256
57 #define VTNET_MAXSEGS 32
60 * PCI config-space register offsets
62 #define VTNET_R_CFG0 24
63 #define VTNET_R_CFG1 25
64 #define VTNET_R_CFG2 26
65 #define VTNET_R_CFG3 27
66 #define VTNET_R_CFG4 28
67 #define VTNET_R_CFG5 29
68 #define VTNET_R_CFG6 30
69 #define VTNET_R_CFG7 31
70 #define VTNET_R_MAX 31
72 #define VTNET_REGSZ VTNET_R_MAX+1
77 #define VTNET_S_HOSTCAPS \
78 ( 0x00000020 | /* host supplies MAC */ \
79 0x00008000 | /* host can merge Rx buffers */ \
80 0x00010000 ) /* config status available */
91 static int use_msix = 1;
96 uint16_t hq_cur_aidx; /* trails behind 'avail_idx' */
98 /* Host-context pointers to the queue */
99 struct virtio_desc *hq_dtable;
100 uint16_t *hq_avail_flags;
101 uint16_t *hq_avail_idx; /* monotonically increasing */
102 uint16_t *hq_avail_ring;
104 uint16_t *hq_used_flags;
105 uint16_t *hq_used_idx; /* monotonically increasing */
106 struct virtio_used *hq_used_ring;
110 * Fixed network header size
112 struct virtio_net_rxhdr {
114 uint8_t vrh_gso_type;
115 uint16_t vrh_hdr_len;
116 uint16_t vrh_gso_size;
117 uint16_t vrh_csum_start;
118 uint16_t vrh_csum_offset;
125 static int pci_vtnet_debug;
126 #define DPRINTF(params) if (pci_vtnet_debug) printf params
127 #define WPRINTF(params) printf params
132 struct pci_vtnet_softc {
133 struct pci_devinst *vsc_pi;
134 pthread_mutex_t vsc_mtx;
135 struct mevent *vsc_mevp;
144 uint32_t vsc_features;
145 uint8_t vsc_macaddr[6];
147 uint64_t vsc_pfn[VTNET_MAXQ];
148 struct vring_hqueue vsc_hq[VTNET_MAXQ];
149 uint16_t vsc_msix_table_idx[VTNET_MAXQ];
153 * Return the size of IO BAR that maps virtio header and device specific
154 * region. The size would vary depending on whether MSI-X is enabled or
158 pci_vtnet_iosize(struct pci_devinst *pi)
160 if (pci_msix_enabled(pi))
161 return (VTNET_REGSZ);
163 return (VTNET_REGSZ - (VTCFG_R_CFG1 - VTCFG_R_MSIX));
167 * Return the number of available descriptors in the vring taking care
168 * of the 16-bit index wraparound.
171 hq_num_avail(struct vring_hqueue *hq)
176 * We're just computing (a-b) in GF(216).
178 * The only glitch here is that in standard C,
179 * uint16_t promotes to (signed) int when int has
180 * more than 16 bits (pretty much always now), so
181 * we have to force it back to unsigned.
183 ndesc = (unsigned)*hq->hq_avail_idx - (unsigned)hq->hq_cur_aidx;
185 assert(ndesc <= hq->hq_size);
191 pci_vtnet_qsize(int qnum)
193 /* XXX no ctl queue currently */
194 if (qnum == VTNET_CTLQ) {
198 /* XXX fixed currently. Maybe different for tx/rx/ctl */
199 return (VTNET_RINGSZ);
203 pci_vtnet_ring_reset(struct pci_vtnet_softc *sc, int ring)
205 struct vring_hqueue *hq;
207 assert(ring < VTNET_MAXQ);
209 hq = &sc->vsc_hq[ring];
212 * Reset all soft state
218 pci_vtnet_update_status(struct pci_vtnet_softc *sc, uint32_t value)
222 DPRINTF(("vtnet: device reset requested !\n"));
223 pci_vtnet_ring_reset(sc, VTNET_RXQ);
224 pci_vtnet_ring_reset(sc, VTNET_TXQ);
225 sc->vsc_rx_ready = 0;
228 sc->vsc_status = value;
232 * Called to send a buffer chain out to the tap device
235 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
240 if (sc->vsc_tapfd == -1)
244 * If the length is < 60, pad out to that and add the
245 * extra zero'd segment to the iov. It is guaranteed that
246 * there is always an extra iov available by the caller.
249 memset(pad, 0, 60 - len);
250 iov[iovcnt].iov_base = pad;
251 iov[iovcnt].iov_len = 60 - len;
254 (void) writev(sc->vsc_tapfd, iov, iovcnt);
258 * Called when there is read activity on the tap file descriptor.
259 * Each buffer posted by the guest is assumed to be able to contain
260 * an entire ethernet frame + rx header.
261 * MP note: the dummybuf is only used for discarding frames, so there
262 * is no need for it to be per-vtnet or locked.
264 static uint8_t dummybuf[2048];
267 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
269 struct virtio_desc *vd;
270 struct virtio_used *vu;
271 struct vring_hqueue *hq;
272 struct virtio_net_rxhdr *vrx;
277 int didx, uidx, aidx; /* descriptor, avail and used index */
280 * Should never be called without a valid tap fd
282 assert(sc->vsc_tapfd != -1);
285 * But, will be called when the rx ring hasn't yet
288 if (sc->vsc_rx_ready == 0) {
290 * Drop the packet and try later.
292 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
297 * Calculate the number of available rx buffers
299 hq = &sc->vsc_hq[VTNET_RXQ];
301 ndescs = hq_num_avail(hq);
305 * Need to wait for host notification to read
307 if (sc->vsc_rxpend == 0) {
308 WPRINTF(("vtnet: no rx descriptors !\n"));
313 * Drop the packet and try later
315 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
319 aidx = hq->hq_cur_aidx;
320 uidx = *hq->hq_used_idx;
321 for (i = 0; i < ndescs; i++) {
323 * 'aidx' indexes into the an array of descriptor indexes
325 didx = hq->hq_avail_ring[aidx % hq->hq_size];
326 assert(didx >= 0 && didx < hq->hq_size);
328 vd = &hq->hq_dtable[didx];
331 * Get a pointer to the rx header, and use the
332 * data immediately following it for the packet buffer.
334 vrx = paddr_guest2host(vd->vd_addr, vd->vd_len);
335 buf = (uint8_t *)(vrx + 1);
337 len = read(sc->vsc_tapfd, buf,
338 vd->vd_len - sizeof(struct virtio_net_rxhdr));
340 if (len < 0 && errno == EWOULDBLOCK) {
345 * The only valid field in the rx packet header is the
346 * number of buffers, which is always 1 without TSO
349 memset(vrx, 0, sizeof(struct virtio_net_rxhdr));
353 * Write this descriptor into the used ring
355 vu = &hq->hq_used_ring[uidx % hq->hq_size];
357 vu->vu_tlen = len + sizeof(struct virtio_net_rxhdr);
363 * Update the used pointer, and signal an interrupt if allowed
365 *hq->hq_used_idx = uidx;
366 hq->hq_cur_aidx = aidx;
368 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
370 pci_generate_msix(sc->vsc_pi,
371 sc->vsc_msix_table_idx[VTNET_RXQ]);
374 pci_generate_msi(sc->vsc_pi, 0);
380 pci_vtnet_tap_callback(int fd, enum ev_type type, void *param)
382 struct pci_vtnet_softc *sc = param;
384 pthread_mutex_lock(&sc->vsc_mtx);
385 pci_vtnet_tap_rx(sc);
386 pthread_mutex_unlock(&sc->vsc_mtx);
391 pci_vtnet_ping_rxq(struct pci_vtnet_softc *sc)
394 * A qnotify means that the rx process can now begin
396 if (sc->vsc_rx_ready == 0) {
397 sc->vsc_rx_ready = 1;
401 * If the rx queue was empty, attempt to receive a
402 * packet that was previously blocked due to no rx bufs
405 if (sc->vsc_rxpend) {
406 WPRINTF(("vtnet: rx resumed\n\r"));
408 pci_vtnet_tap_rx(sc);
413 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vring_hqueue *hq)
415 struct iovec iov[VTNET_MAXSEGS + 1];
416 struct virtio_desc *vd;
417 struct virtio_used *vu;
421 int uidx, aidx, didx;
423 uidx = *hq->hq_used_idx;
424 aidx = hq->hq_cur_aidx;
425 didx = hq->hq_avail_ring[aidx % hq->hq_size];
426 assert(didx >= 0 && didx < hq->hq_size);
428 vd = &hq->hq_dtable[didx];
431 * Run through the chain of descriptors, ignoring the
432 * first header descriptor. However, include the header
433 * length in the total length that will be put into the
437 vd = &hq->hq_dtable[vd->vd_next];
439 for (i = 0, plen = 0;
441 i++, vd = &hq->hq_dtable[vd->vd_next]) {
442 iov[i].iov_base = paddr_guest2host(vd->vd_addr, vd->vd_len);
443 iov[i].iov_len = vd->vd_len;
447 if ((vd->vd_flags & VRING_DESC_F_NEXT) == 0)
450 assert(i < VTNET_MAXSEGS);
452 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, i + 1));
453 pci_vtnet_tap_tx(sc, iov, i + 1, plen);
456 * Return this chain back to the host
458 vu = &hq->hq_used_ring[uidx % hq->hq_size];
461 hq->hq_cur_aidx = aidx + 1;
462 *hq->hq_used_idx = uidx + 1;
465 * Generate an interrupt if able
467 if ((*hq->hq_avail_flags & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
469 pci_generate_msix(sc->vsc_pi,
470 sc->vsc_msix_table_idx[VTNET_TXQ]);
473 pci_generate_msi(sc->vsc_pi, 0);
479 pci_vtnet_ping_txq(struct pci_vtnet_softc *sc)
481 struct vring_hqueue *hq = &sc->vsc_hq[VTNET_TXQ];
486 * Calculate number of ring entries to process
488 ndescs = hq_num_avail(hq);
494 * Run through all the entries, placing them into iovecs and
495 * sending when an end-of-packet is found
497 for (i = 0; i < ndescs; i++)
498 pci_vtnet_proctx(sc, hq);
502 pci_vtnet_ping_ctlq(struct pci_vtnet_softc *sc)
505 DPRINTF(("vtnet: control qnotify!\n\r"));
509 pci_vtnet_ring_init(struct pci_vtnet_softc *sc, uint64_t pfn)
511 struct vring_hqueue *hq;
512 int qnum = sc->vsc_curq;
514 assert(qnum < VTNET_MAXQ);
516 sc->vsc_pfn[qnum] = pfn << VRING_PFN;
519 * Set up host pointers to the various parts of the
522 hq = &sc->vsc_hq[qnum];
523 hq->hq_size = pci_vtnet_qsize(qnum);
525 hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN,
526 vring_size(hq->hq_size));
527 hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size);
528 hq->hq_avail_idx = hq->hq_avail_flags + 1;
529 hq->hq_avail_ring = hq->hq_avail_flags + 2;
530 hq->hq_used_flags = (uint16_t *)roundup2((uintptr_t)hq->hq_avail_ring,
532 hq->hq_used_idx = hq->hq_used_flags + 1;
533 hq->hq_used_ring = (struct virtio_used *)(hq->hq_used_flags + 2);
536 * Initialize queue indexes
542 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
545 unsigned char digest[16];
547 struct pci_vtnet_softc *sc;
550 sc = malloc(sizeof(struct pci_vtnet_softc));
551 memset(sc, 0, sizeof(struct pci_vtnet_softc));
556 pthread_mutex_init(&sc->vsc_mtx, NULL);
559 * Use MSI if set by user
561 if ((env_msi = getenv("BHYVE_USE_MSI")) != NULL) {
562 if (strcasecmp(env_msi, "yes") == 0)
567 * Attempt to open the tap device
573 strcpy(tbuf, "/dev/");
574 strlcat(tbuf, opts, sizeof(tbuf));
576 sc->vsc_tapfd = open(tbuf, O_RDWR);
577 if (sc->vsc_tapfd == -1) {
578 WPRINTF(("open of tap device %s failed\n", tbuf));
581 * Set non-blocking and register for read
582 * notifications with the event loop
585 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
586 WPRINTF(("tap device O_NONBLOCK failed\n"));
587 close(sc->vsc_tapfd);
591 sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
593 pci_vtnet_tap_callback,
595 if (sc->vsc_mevp == NULL) {
596 WPRINTF(("Could not register event\n"));
597 close(sc->vsc_tapfd);
604 * The MAC address is the standard NetApp OUI of 00-a0-98,
605 * followed by an MD5 of the vm name. The slot/func number is
606 * prepended to this for slots other than 1:0, so that
607 * a bootloader can netboot from the equivalent of slot 1.
609 if (pi->pi_slot == 1 && pi->pi_func == 0) {
610 strncpy(nstr, vmname, sizeof(nstr));
612 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
613 pi->pi_func, vmname);
617 MD5Update(&mdctx, nstr, strlen(nstr));
618 MD5Final(digest, &mdctx);
620 sc->vsc_macaddr[0] = 0x00;
621 sc->vsc_macaddr[1] = 0xa0;
622 sc->vsc_macaddr[2] = 0x98;
623 sc->vsc_macaddr[3] = digest[0];
624 sc->vsc_macaddr[4] = digest[1];
625 sc->vsc_macaddr[5] = digest[2];
627 /* initialize config space */
628 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
629 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
630 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
631 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
637 for (i = 0; i < VTNET_MAXQ; i++)
638 sc->vsc_msix_table_idx[i] = VIRTIO_MSI_NO_VECTOR;
641 * BAR 1 used to map MSI-X table and PBA
643 if (pci_emul_add_msixcap(pi, VTNET_MAXQ, 1))
647 pci_emul_add_msicap(pi, 1);
650 pci_emul_alloc_bar(pi, 0, PCIBAR_IO, VTNET_REGSZ);
656 * Function pointer array to handle queue notifications
658 static void (*pci_vtnet_qnotify[VTNET_MAXQ])(struct pci_vtnet_softc *) = {
665 vtnet_adjust_offset(struct pci_devinst *pi, uint64_t offset)
668 * Device specific offsets used by guest would change based on
669 * whether MSI-X capability is enabled or not
671 if (!pci_msix_enabled(pi)) {
672 if (offset >= VTCFG_R_MSIX)
673 return (offset + (VTCFG_R_CFG1 - VTCFG_R_MSIX));
680 pci_vtnet_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
681 int baridx, uint64_t offset, int size, uint64_t value)
683 struct pci_vtnet_softc *sc = pi->pi_arg;
687 if (baridx == pci_msix_table_bar(pi) ||
688 baridx == pci_msix_pba_bar(pi)) {
689 pci_emul_msix_twrite(pi, offset, size, value);
696 if (offset + size > pci_vtnet_iosize(pi)) {
697 DPRINTF(("vtnet_write: 2big, offset %ld size %d\n",
702 pthread_mutex_lock(&sc->vsc_mtx);
704 offset = vtnet_adjust_offset(pi, offset);
707 case VTCFG_R_GUESTCAP:
709 sc->vsc_features = value & VTNET_S_HOSTCAPS;
713 pci_vtnet_ring_init(sc, value);
717 assert(value < VTNET_MAXQ);
718 sc->vsc_curq = value;
720 case VTCFG_R_QNOTIFY:
722 assert(value < VTNET_MAXQ);
723 (*pci_vtnet_qnotify[value])(sc);
727 pci_vtnet_update_status(sc, value);
731 sc->vsc_msix_table_idx[VTNET_CTLQ] = value;
735 assert(sc->vsc_curq != VTNET_CTLQ);
736 sc->vsc_msix_table_idx[sc->vsc_curq] = value;
744 assert((size + offset) <= (VTNET_R_CFG5 + 1));
745 ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
747 * The driver is allowed to change the MAC address
749 sc->vsc_macaddr[offset - VTNET_R_CFG0] = value;
751 *(uint8_t *) ptr = value;
752 } else if (size == 2) {
753 *(uint16_t *) ptr = value;
755 *(uint32_t *) ptr = value;
758 case VTCFG_R_HOSTCAP:
763 DPRINTF(("vtnet: write to readonly reg %ld\n\r", offset));
766 DPRINTF(("vtnet: unknown i/o write offset %ld\n\r", offset));
771 pthread_mutex_unlock(&sc->vsc_mtx);
775 pci_vtnet_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
776 int baridx, uint64_t offset, int size)
778 struct pci_vtnet_softc *sc = pi->pi_arg;
783 if (baridx == pci_msix_table_bar(pi) ||
784 baridx == pci_msix_pba_bar(pi)) {
785 return (pci_emul_msix_tread(pi, offset, size));
791 if (offset + size > pci_vtnet_iosize(pi)) {
792 DPRINTF(("vtnet_read: 2big, offset %ld size %d\n",
797 pthread_mutex_lock(&sc->vsc_mtx);
799 offset = vtnet_adjust_offset(pi, offset);
802 case VTCFG_R_HOSTCAP:
804 value = VTNET_S_HOSTCAPS;
806 case VTCFG_R_GUESTCAP:
808 value = sc->vsc_features; /* XXX never read ? */
812 value = sc->vsc_pfn[sc->vsc_curq] >> VRING_PFN;
816 value = pci_vtnet_qsize(sc->vsc_curq);
820 value = sc->vsc_curq; /* XXX never read ? */
822 case VTCFG_R_QNOTIFY:
824 value = sc->vsc_curq; /* XXX never read ? */
828 value = sc->vsc_status;
833 sc->vsc_isr = 0; /* a read clears this flag */
837 value = sc->vsc_msix_table_idx[VTNET_CTLQ];
841 assert(sc->vsc_curq != VTNET_CTLQ);
842 value = sc->vsc_msix_table_idx[sc->vsc_curq];
850 assert((size + offset) <= (VTNET_R_CFG5 + 1));
851 ptr = &sc->vsc_macaddr[offset - VTNET_R_CFG0];
853 value = *(uint8_t *) ptr;
854 } else if (size == 2) {
855 value = *(uint16_t *) ptr;
857 value = *(uint32_t *) ptr;
862 value = 0x01; /* XXX link always up */
866 value = 0; /* XXX link status in LSB */
869 DPRINTF(("vtnet: unknown i/o read offset %ld\n\r", offset));
874 pthread_mutex_unlock(&sc->vsc_mtx);
879 struct pci_devemu pci_de_vnet = {
880 .pe_emu = "virtio-net",
881 .pe_init = pci_vtnet_init,
882 .pe_barwrite = pci_vtnet_write,
883 .pe_barread = pci_vtnet_read
885 PCI_EMUL_SET(pci_de_vnet);