2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #ifndef WITHOUT_CAPSICUM
36 #include <sys/capsicum.h>
38 #include <sys/linker_set.h>
39 #include <sys/select.h>
41 #include <sys/ioctl.h>
42 #include <machine/atomic.h>
43 #include <net/ethernet.h>
44 #ifndef NETMAP_WITH_LIBS
45 #define NETMAP_WITH_LIBS
47 #include <net/netmap_user.h>
61 #include <pthread_np.h>
69 #define VTNET_RINGSZ 1024
71 #define VTNET_MAXSEGS 256
74 * Host capabilities. Note that we only offer a few of these.
76 #define VIRTIO_NET_F_CSUM (1 << 0) /* host handles partial cksum */
77 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* guest handles partial cksum */
78 #define VIRTIO_NET_F_MAC (1 << 5) /* host supplies MAC */
79 #define VIRTIO_NET_F_GSO_DEPREC (1 << 6) /* deprecated: host handles GSO */
80 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* guest can rcv TSOv4 */
81 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* guest can rcv TSOv6 */
82 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* guest can rcv TSO with ECN */
83 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* guest can rcv UFO */
84 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* host can rcv TSOv4 */
85 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* host can rcv TSOv6 */
86 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* host can rcv TSO with ECN */
87 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* host can rcv UFO */
88 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* host can merge RX buffers */
89 #define VIRTIO_NET_F_STATUS (1 << 16) /* config status field available */
90 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* control channel available */
91 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* control channel RX mode support */
92 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* control channel VLAN filtering */
93 #define VIRTIO_NET_F_GUEST_ANNOUNCE \
94 (1 << 21) /* guest can send gratuitous pkts */
96 #define VTNET_S_HOSTCAPS \
97 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \
98 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC)
101 * PCI config-space "registers"
103 struct virtio_net_config {
113 #define VTNET_CTLQ 2 /* NB: not yet supported */
118 * Fixed network header size
120 struct virtio_net_rxhdr {
122 uint8_t vrh_gso_type;
123 uint16_t vrh_hdr_len;
124 uint16_t vrh_gso_size;
125 uint16_t vrh_csum_start;
126 uint16_t vrh_csum_offset;
133 static int pci_vtnet_debug;
134 #define DPRINTF(params) if (pci_vtnet_debug) printf params
135 #define WPRINTF(params) printf params
140 struct pci_vtnet_softc {
141 struct virtio_softc vsc_vs;
142 struct vqueue_info vsc_queues[VTNET_MAXQ - 1];
143 pthread_mutex_t vsc_mtx;
144 struct mevent *vsc_mevp;
147 struct nm_desc *vsc_nmd;
150 volatile int resetting; /* set and checked outside lock */
152 uint64_t vsc_features; /* negotiated features */
154 struct virtio_net_config vsc_config;
156 pthread_mutex_t rx_mtx;
159 int rx_merge; /* merged rx bufs in use */
162 pthread_mutex_t tx_mtx;
163 pthread_cond_t tx_cond;
166 void (*pci_vtnet_rx)(struct pci_vtnet_softc *sc);
167 void (*pci_vtnet_tx)(struct pci_vtnet_softc *sc, struct iovec *iov,
168 int iovcnt, int len);
171 static void pci_vtnet_reset(void *);
172 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */
173 static int pci_vtnet_cfgread(void *, int, int, uint32_t *);
174 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t);
175 static void pci_vtnet_neg_features(void *, uint64_t);
177 static struct virtio_consts vtnet_vi_consts = {
178 "vtnet", /* our name */
179 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */
180 sizeof(struct virtio_net_config), /* config reg size */
181 pci_vtnet_reset, /* reset */
182 NULL, /* device-wide qnotify -- not used */
183 pci_vtnet_cfgread, /* read PCI config */
184 pci_vtnet_cfgwrite, /* write PCI config */
185 pci_vtnet_neg_features, /* apply negotiated features */
186 VTNET_S_HOSTCAPS, /* our capabilities */
190 * If the transmit thread is active then stall until it is done.
193 pci_vtnet_txwait(struct pci_vtnet_softc *sc)
196 pthread_mutex_lock(&sc->tx_mtx);
197 while (sc->tx_in_progress) {
198 pthread_mutex_unlock(&sc->tx_mtx);
200 pthread_mutex_lock(&sc->tx_mtx);
202 pthread_mutex_unlock(&sc->tx_mtx);
206 * If the receive thread is active then stall until it is done.
209 pci_vtnet_rxwait(struct pci_vtnet_softc *sc)
212 pthread_mutex_lock(&sc->rx_mtx);
213 while (sc->rx_in_progress) {
214 pthread_mutex_unlock(&sc->rx_mtx);
216 pthread_mutex_lock(&sc->rx_mtx);
218 pthread_mutex_unlock(&sc->rx_mtx);
222 pci_vtnet_reset(void *vsc)
224 struct pci_vtnet_softc *sc = vsc;
226 DPRINTF(("vtnet: device reset requested !\n"));
231 * Wait for the transmit and receive threads to finish their
234 pci_vtnet_txwait(sc);
235 pci_vtnet_rxwait(sc);
237 sc->vsc_rx_ready = 0;
239 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
241 /* now reset rings, MSI-X vectors, and negotiated capabilities */
242 vi_reset_dev(&sc->vsc_vs);
248 * Called to send a buffer chain out to the tap device
251 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
254 static char pad[60]; /* all zero bytes */
256 if (sc->vsc_tapfd == -1)
260 * If the length is < 60, pad out to that and add the
261 * extra zero'd segment to the iov. It is guaranteed that
262 * there is always an extra iov available by the caller.
265 iov[iovcnt].iov_base = pad;
266 iov[iovcnt].iov_len = 60 - len;
269 (void) writev(sc->vsc_tapfd, iov, iovcnt);
273 * Called when there is read activity on the tap file descriptor.
274 * Each buffer posted by the guest is assumed to be able to contain
275 * an entire ethernet frame + rx header.
276 * MP note: the dummybuf is only used for discarding frames, so there
277 * is no need for it to be per-vtnet or locked.
279 static uint8_t dummybuf[2048];
281 static __inline struct iovec *
282 rx_iov_trim(struct iovec *iov, int *niov, int tlen)
286 /* XXX short-cut: assume first segment is >= tlen */
287 assert(iov[0].iov_len >= tlen);
289 iov[0].iov_len -= tlen;
290 if (iov[0].iov_len == 0) {
295 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen);
303 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
305 struct iovec iov[VTNET_MAXSEGS], *riov;
306 struct vqueue_info *vq;
312 * Should never be called without a valid tap fd
314 assert(sc->vsc_tapfd != -1);
317 * But, will be called when the rx ring hasn't yet
318 * been set up or the guest is resetting the device.
320 if (!sc->vsc_rx_ready || sc->resetting) {
322 * Drop the packet and try later.
324 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
329 * Check for available rx buffers
331 vq = &sc->vsc_queues[VTNET_RXQ];
332 if (!vq_has_descs(vq)) {
334 * Drop the packet and try later. Interrupt on
335 * empty, if that's negotiated.
337 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
344 * Get descriptor chain.
346 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
347 assert(n >= 1 && n <= VTNET_MAXSEGS);
350 * Get a pointer to the rx header, and use the
351 * data immediately following it for the packet buffer.
353 vrx = iov[0].iov_base;
354 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);
356 len = readv(sc->vsc_tapfd, riov, n);
358 if (len < 0 && errno == EWOULDBLOCK) {
360 * No more packets, but still some avail ring
361 * entries. Interrupt if needed/appropriate.
369 * The only valid field in the rx packet header is the
370 * number of buffers if merged rx bufs were negotiated.
372 memset(vrx, 0, sc->rx_vhdrlen);
375 struct virtio_net_rxhdr *vrxh;
382 * Release this chain and handle more chains.
384 vq_relchain(vq, idx, len + sc->rx_vhdrlen);
385 } while (vq_has_descs(vq));
387 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
392 pci_vtnet_netmap_writev(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
397 for (r = nmd->cur_tx_ring; ; ) {
398 struct netmap_ring *ring = NETMAP_TXRING(nmd->nifp, r);
402 if (nm_ring_empty(ring)) {
404 if (r > nmd->last_tx_ring)
405 r = nmd->first_tx_ring;
406 if (r == nmd->cur_tx_ring)
411 idx = ring->slot[cur].buf_idx;
412 buf = NETMAP_BUF(ring, idx);
414 for (i = 0; i < iovcnt; i++) {
415 if (len + iov[i].iov_len > 2048)
417 memcpy(&buf[len], iov[i].iov_base, iov[i].iov_len);
418 len += iov[i].iov_len;
420 ring->slot[cur].len = len;
421 ring->head = ring->cur = nm_ring_next(ring, cur);
422 nmd->cur_tx_ring = r;
423 ioctl(nmd->fd, NIOCTXSYNC, NULL);
431 pci_vtnet_netmap_readv(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
437 for (r = nmd->cur_rx_ring; ; ) {
438 struct netmap_ring *ring = NETMAP_RXRING(nmd->nifp, r);
443 if (nm_ring_empty(ring)) {
445 if (r > nmd->last_rx_ring)
446 r = nmd->first_rx_ring;
447 if (r == nmd->cur_rx_ring)
452 idx = ring->slot[cur].buf_idx;
453 buf = NETMAP_BUF(ring, idx);
454 left = ring->slot[cur].len;
456 for (i = 0; i < iovcnt && left > 0; i++) {
457 if (iov[i].iov_len > left)
458 iov[i].iov_len = left;
459 memcpy(iov[i].iov_base, &buf[len], iov[i].iov_len);
460 len += iov[i].iov_len;
461 left -= iov[i].iov_len;
463 ring->head = ring->cur = nm_ring_next(ring, cur);
464 nmd->cur_rx_ring = r;
465 ioctl(nmd->fd, NIOCRXSYNC, NULL);
468 for (; i < iovcnt; i++)
475 * Called to send a buffer chain out to the vale port
478 pci_vtnet_netmap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt,
481 static char pad[60]; /* all zero bytes */
483 if (sc->vsc_nmd == NULL)
487 * If the length is < 60, pad out to that and add the
488 * extra zero'd segment to the iov. It is guaranteed that
489 * there is always an extra iov available by the caller.
492 iov[iovcnt].iov_base = pad;
493 iov[iovcnt].iov_len = 60 - len;
496 (void) pci_vtnet_netmap_writev(sc->vsc_nmd, iov, iovcnt);
500 pci_vtnet_netmap_rx(struct pci_vtnet_softc *sc)
502 struct iovec iov[VTNET_MAXSEGS], *riov;
503 struct vqueue_info *vq;
509 * Should never be called without a valid netmap descriptor
511 assert(sc->vsc_nmd != NULL);
514 * But, will be called when the rx ring hasn't yet
515 * been set up or the guest is resetting the device.
517 if (!sc->vsc_rx_ready || sc->resetting) {
519 * Drop the packet and try later.
521 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
526 * Check for available rx buffers
528 vq = &sc->vsc_queues[VTNET_RXQ];
529 if (!vq_has_descs(vq)) {
531 * Drop the packet and try later. Interrupt on
532 * empty, if that's negotiated.
534 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
541 * Get descriptor chain.
543 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
544 assert(n >= 1 && n <= VTNET_MAXSEGS);
547 * Get a pointer to the rx header, and use the
548 * data immediately following it for the packet buffer.
550 vrx = iov[0].iov_base;
551 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);
553 len = pci_vtnet_netmap_readv(sc->vsc_nmd, riov, n);
557 * No more packets, but still some avail ring
558 * entries. Interrupt if needed/appropriate.
566 * The only valid field in the rx packet header is the
567 * number of buffers if merged rx bufs were negotiated.
569 memset(vrx, 0, sc->rx_vhdrlen);
572 struct virtio_net_rxhdr *vrxh;
579 * Release this chain and handle more chains.
581 vq_relchain(vq, idx, len + sc->rx_vhdrlen);
582 } while (vq_has_descs(vq));
584 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
589 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param)
591 struct pci_vtnet_softc *sc = param;
593 pthread_mutex_lock(&sc->rx_mtx);
594 sc->rx_in_progress = 1;
595 sc->pci_vtnet_rx(sc);
596 sc->rx_in_progress = 0;
597 pthread_mutex_unlock(&sc->rx_mtx);
602 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq)
604 struct pci_vtnet_softc *sc = vsc;
607 * A qnotify means that the rx process can now begin
609 if (sc->vsc_rx_ready == 0) {
610 sc->vsc_rx_ready = 1;
611 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
616 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
618 struct iovec iov[VTNET_MAXSEGS + 1];
624 * Obtain chain of descriptors. The first one is
625 * really the header descriptor, so we need to sum
626 * up two lengths: packet length and transfer length.
628 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
629 assert(n >= 1 && n <= VTNET_MAXSEGS);
631 tlen = iov[0].iov_len;
632 for (i = 1; i < n; i++) {
633 plen += iov[i].iov_len;
634 tlen += iov[i].iov_len;
637 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, n));
638 sc->pci_vtnet_tx(sc, &iov[1], n - 1, plen);
640 /* chain is processed, release it and set tlen */
641 vq_relchain(vq, idx, tlen);
645 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq)
647 struct pci_vtnet_softc *sc = vsc;
650 * Any ring entries to process?
652 if (!vq_has_descs(vq))
655 /* Signal the tx thread for processing */
656 pthread_mutex_lock(&sc->tx_mtx);
657 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
658 if (sc->tx_in_progress == 0)
659 pthread_cond_signal(&sc->tx_cond);
660 pthread_mutex_unlock(&sc->tx_mtx);
664 * Thread which will handle processing of TX desc
667 pci_vtnet_tx_thread(void *param)
669 struct pci_vtnet_softc *sc = param;
670 struct vqueue_info *vq;
673 vq = &sc->vsc_queues[VTNET_TXQ];
676 * Let us wait till the tx queue pointers get initialised &
679 pthread_mutex_lock(&sc->tx_mtx);
680 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
684 /* note - tx mutex is locked here */
685 while (sc->resetting || !vq_has_descs(vq)) {
686 vq->vq_used->vu_flags &= ~VRING_USED_F_NO_NOTIFY;
688 if (!sc->resetting && vq_has_descs(vq))
691 sc->tx_in_progress = 0;
692 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
695 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
696 sc->tx_in_progress = 1;
697 pthread_mutex_unlock(&sc->tx_mtx);
701 * Run through entries, placing them into
702 * iovecs and sending when an end-of-packet
705 pci_vtnet_proctx(sc, vq);
706 } while (vq_has_descs(vq));
709 * Generate an interrupt if needed.
713 pthread_mutex_lock(&sc->tx_mtx);
719 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq)
722 DPRINTF(("vtnet: control qnotify!\n\r"));
727 pci_vtnet_parsemac(char *mac_str, uint8_t *mac_addr)
729 struct ether_addr *ea;
731 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
733 tmpstr = strsep(&mac_str,"=");
735 if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) {
736 ea = ether_aton(mac_str);
738 if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
739 memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
740 fprintf(stderr, "Invalid MAC %s\n", mac_str);
743 memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
750 pci_vtnet_tap_setup(struct pci_vtnet_softc *sc, char *devname)
753 #ifndef WITHOUT_CAPSICUM
757 strcpy(tbuf, "/dev/");
758 strlcat(tbuf, devname, sizeof(tbuf));
760 sc->pci_vtnet_rx = pci_vtnet_tap_rx;
761 sc->pci_vtnet_tx = pci_vtnet_tap_tx;
763 sc->vsc_tapfd = open(tbuf, O_RDWR);
764 if (sc->vsc_tapfd == -1) {
765 WPRINTF(("open of tap device %s failed\n", tbuf));
770 * Set non-blocking and register for read
771 * notifications with the event loop
774 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) {
775 WPRINTF(("tap device O_NONBLOCK failed\n"));
776 close(sc->vsc_tapfd);
780 #ifndef WITHOUT_CAPSICUM
781 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
782 if (cap_rights_limit(sc->vsc_tapfd, &rights) == -1 && errno != ENOSYS)
783 errx(EX_OSERR, "Unable to apply rights for sandbox");
786 sc->vsc_mevp = mevent_add(sc->vsc_tapfd,
788 pci_vtnet_rx_callback,
790 if (sc->vsc_mevp == NULL) {
791 WPRINTF(("Could not register event\n"));
792 close(sc->vsc_tapfd);
798 pci_vtnet_netmap_setup(struct pci_vtnet_softc *sc, char *ifname)
800 sc->pci_vtnet_rx = pci_vtnet_netmap_rx;
801 sc->pci_vtnet_tx = pci_vtnet_netmap_tx;
803 sc->vsc_nmd = nm_open(ifname, NULL, 0, 0);
804 if (sc->vsc_nmd == NULL) {
805 WPRINTF(("open of netmap device %s failed\n", ifname));
809 sc->vsc_mevp = mevent_add(sc->vsc_nmd->fd,
811 pci_vtnet_rx_callback,
813 if (sc->vsc_mevp == NULL) {
814 WPRINTF(("Could not register event\n"));
815 nm_close(sc->vsc_nmd);
821 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
824 unsigned char digest[16];
826 char tname[MAXCOMLEN + 1];
827 struct pci_vtnet_softc *sc;
832 sc = calloc(1, sizeof(struct pci_vtnet_softc));
834 pthread_mutex_init(&sc->vsc_mtx, NULL);
836 vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues);
837 sc->vsc_vs.vs_mtx = &sc->vsc_mtx;
839 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ;
840 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq;
841 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ;
842 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq;
844 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ;
845 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq;
849 * Attempt to open the tap device and read the MAC address
858 devname = vtopts = strdup(opts);
859 (void) strsep(&vtopts, ",");
861 if (vtopts != NULL) {
862 err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac);
870 if (strncmp(devname, "vale", 4) == 0)
871 pci_vtnet_netmap_setup(sc, devname);
872 if (strncmp(devname, "tap", 3) == 0 ||
873 strncmp(devname, "vmnet", 5) == 0)
874 pci_vtnet_tap_setup(sc, devname);
880 * The default MAC address is the standard NetApp OUI of 00-a0-98,
881 * followed by an MD5 of the PCI slot/func number and dev name
884 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
885 pi->pi_func, vmname);
888 MD5Update(&mdctx, nstr, strlen(nstr));
889 MD5Final(digest, &mdctx);
891 sc->vsc_config.mac[0] = 0x00;
892 sc->vsc_config.mac[1] = 0xa0;
893 sc->vsc_config.mac[2] = 0x98;
894 sc->vsc_config.mac[3] = digest[0];
895 sc->vsc_config.mac[4] = digest[1];
896 sc->vsc_config.mac[5] = digest[2];
899 /* initialize config space */
900 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
901 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
902 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
903 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
904 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
906 /* Link is up if we managed to open tap device or vale port. */
907 sc->vsc_config.status = (opts == NULL || sc->vsc_tapfd >= 0 ||
908 sc->vsc_nmd != NULL);
910 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */
911 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix()))
914 /* use BAR 0 to map config regs in IO space */
915 vi_set_io_bar(&sc->vsc_vs, 0);
920 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr);
921 sc->rx_in_progress = 0;
922 pthread_mutex_init(&sc->rx_mtx, NULL);
925 * Initialize tx semaphore & spawn TX processing thread.
926 * As of now, only one thread for TX desc processing is
929 sc->tx_in_progress = 0;
930 pthread_mutex_init(&sc->tx_mtx, NULL);
931 pthread_cond_init(&sc->tx_cond, NULL);
932 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
933 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot,
935 pthread_set_name_np(sc->tx_tid, tname);
941 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value)
943 struct pci_vtnet_softc *sc = vsc;
947 assert(offset + size <= 6);
949 * The driver is allowed to change the MAC address
951 ptr = &sc->vsc_config.mac[offset];
952 memcpy(ptr, &value, size);
954 /* silently ignore other writes */
955 DPRINTF(("vtnet: write to readonly reg %d\n\r", offset));
962 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval)
964 struct pci_vtnet_softc *sc = vsc;
967 ptr = (uint8_t *)&sc->vsc_config + offset;
968 memcpy(retval, ptr, size);
973 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
975 struct pci_vtnet_softc *sc = vsc;
977 sc->vsc_features = negotiated_features;
979 if (!(sc->vsc_features & VIRTIO_NET_F_MRG_RXBUF)) {
981 /* non-merge rx header is 2 bytes shorter */
986 struct pci_devemu pci_de_vnet = {
987 .pe_emu = "virtio-net",
988 .pe_init = pci_vtnet_init,
989 .pe_barwrite = vi_pci_write,
990 .pe_barread = vi_pci_read
992 PCI_EMUL_SET(pci_de_vnet);