2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/linker_set.h>
36 #include <sys/select.h>
38 #include <sys/ioctl.h>
39 #include <machine/vmm_snapshot.h>
40 #include <net/ethernet.h>
41 #include <net/if.h> /* IFNAMSIZ */
54 #include <pthread_np.h>
62 #include "net_utils.h"
63 #include "net_backends.h"
66 #define VTNET_RINGSZ 1024
68 #define VTNET_MAXSEGS 256
70 #define VTNET_MAX_PKT_LEN (65536 + 64)
72 #define VTNET_MIN_MTU ETHERMIN
73 #define VTNET_MAX_MTU 65535
75 #define VTNET_S_HOSTCAPS \
76 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | \
77 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC)
80 * PCI config-space "registers"
82 struct virtio_net_config {
85 uint16_t max_virtqueue_pairs;
94 #define VTNET_CTLQ 2 /* NB: not yet supported */
101 static int pci_vtnet_debug;
102 #define DPRINTF(params) if (pci_vtnet_debug) PRINTLN params
103 #define WPRINTF(params) PRINTLN params
108 struct pci_vtnet_softc {
109 struct virtio_softc vsc_vs;
110 struct vqueue_info vsc_queues[VTNET_MAXQ - 1];
111 pthread_mutex_t vsc_mtx;
113 net_backend_t *vsc_be;
115 bool features_negotiated; /* protected by rx_mtx */
117 int resetting; /* protected by tx_mtx */
119 uint64_t vsc_features; /* negotiated features */
121 pthread_mutex_t rx_mtx;
122 int rx_merge; /* merged rx bufs in use */
125 pthread_mutex_t tx_mtx;
126 pthread_cond_t tx_cond;
132 struct virtio_net_config vsc_config;
133 struct virtio_consts vsc_consts;
136 static void pci_vtnet_reset(void *);
137 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */
138 static int pci_vtnet_cfgread(void *, int, int, uint32_t *);
139 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t);
140 static void pci_vtnet_neg_features(void *, uint64_t);
141 #ifdef BHYVE_SNAPSHOT
142 static void pci_vtnet_pause(void *);
143 static void pci_vtnet_resume(void *);
144 static int pci_vtnet_snapshot(void *, struct vm_snapshot_meta *);
147 static struct virtio_consts vtnet_vi_consts = {
148 "vtnet", /* our name */
149 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */
150 sizeof(struct virtio_net_config), /* config reg size */
151 pci_vtnet_reset, /* reset */
152 NULL, /* device-wide qnotify -- not used */
153 pci_vtnet_cfgread, /* read PCI config */
154 pci_vtnet_cfgwrite, /* write PCI config */
155 pci_vtnet_neg_features, /* apply negotiated features */
156 VTNET_S_HOSTCAPS, /* our capabilities */
157 #ifdef BHYVE_SNAPSHOT
158 pci_vtnet_pause, /* pause rx/tx threads */
159 pci_vtnet_resume, /* resume rx/tx threads */
160 pci_vtnet_snapshot, /* save / restore device state */
165 pci_vtnet_reset(void *vsc)
167 struct pci_vtnet_softc *sc = vsc;
169 DPRINTF(("vtnet: device reset requested !"));
171 /* Acquire the RX lock to block RX processing. */
172 pthread_mutex_lock(&sc->rx_mtx);
175 * Make sure receive operation is disabled at least until we
176 * re-negotiate the features, since receive operation depends
177 * on the value of sc->rx_merge and the header length, which
178 * are both set in pci_vtnet_neg_features().
179 * Receive operation will be enabled again once the guest adds
180 * the first receive buffers and kicks us.
182 sc->features_negotiated = false;
183 netbe_rx_disable(sc->vsc_be);
185 /* Set sc->resetting and give a chance to the TX thread to stop. */
186 pthread_mutex_lock(&sc->tx_mtx);
188 while (sc->tx_in_progress) {
189 pthread_mutex_unlock(&sc->tx_mtx);
191 pthread_mutex_lock(&sc->tx_mtx);
195 * Now reset rings, MSI-X vectors, and negotiated capabilities.
196 * Do that with the TX lock held, since we need to reset
199 vi_reset_dev(&sc->vsc_vs);
202 pthread_mutex_unlock(&sc->tx_mtx);
203 pthread_mutex_unlock(&sc->rx_mtx);
206 static __inline struct iovec *
207 iov_trim_hdr(struct iovec *iov, int *iovcnt, unsigned int hlen)
211 if (iov[0].iov_len < hlen) {
213 * Not enough header space in the first fragment.
214 * That's not ok for us.
219 iov[0].iov_len -= hlen;
220 if (iov[0].iov_len == 0) {
224 * Only space for the header. That's not
231 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + hlen);
238 struct virtio_mrg_rxbuf_info {
245 pci_vtnet_rx(struct pci_vtnet_softc *sc)
247 int prepend_hdr_len = sc->vhdrlen - sc->be_vhdrlen;
248 struct virtio_mrg_rxbuf_info info[VTNET_MAXSEGS];
249 struct iovec iov[VTNET_MAXSEGS + 1];
250 struct vqueue_info *vq;
253 vq = &sc->vsc_queues[VTNET_RXQ];
255 /* Features must be negotiated */
256 if (!sc->features_negotiated) {
261 struct virtio_net_rxhdr *hdr;
270 plen = netbe_peek_recvlen(sc->vsc_be);
273 * No more packets (plen == 0), or backend errored
274 * (plen < 0). Interrupt if needed and stop.
276 vq_endchains(vq, /*used_all_avail=*/0);
279 plen += prepend_hdr_len;
282 * Get a descriptor chain to store the next ingress
283 * packet. In case of mergeable rx buffers, get as
284 * many chains as necessary in order to make room
292 int n = vq_getchain(vq, riov, VTNET_MAXSEGS - riov_len,
294 info[n_chains].idx = req.idx;
298 * No rx buffers. Enable RX kicks and double
302 if (!vq_has_descs(vq)) {
304 * Still no buffers. Return the unused
305 * chains (if any), interrupt if needed
306 * (including for NOTIFY_ON_EMPTY), and
307 * disable the backend until the next
310 vq_retchains(vq, n_chains);
311 vq_endchains(vq, /*used_all_avail=*/1);
312 netbe_rx_disable(sc->vsc_be);
316 /* More rx buffers found, so keep going. */
320 assert(n >= 1 && riov_len + n <= VTNET_MAXSEGS);
326 info[n_chains].len = (uint32_t)count_iov(riov, n);
327 riov_bytes += info[n_chains].len;
330 } while (riov_bytes < plen && riov_len < VTNET_MAXSEGS);
333 hdr = riov[0].iov_base;
334 if (prepend_hdr_len > 0) {
336 * The frontend uses a virtio-net header, but the
337 * backend does not. We need to prepend a zeroed
340 riov = iov_trim_hdr(riov, &riov_len, prepend_hdr_len);
343 * The first collected chain is nonsensical,
344 * as it is not even enough to store the
345 * virtio-net header. Just drop it.
347 vq_relchain(vq, info[0].idx, 0);
348 vq_retchains(vq, n_chains - 1);
351 memset(hdr, 0, prepend_hdr_len);
354 rlen = netbe_recv(sc->vsc_be, riov, riov_len);
355 if (rlen != plen - prepend_hdr_len) {
357 * If this happens it means there is something
358 * wrong with the backend (e.g., some other
359 * process is stealing our packets).
361 WPRINTF(("netbe_recv: expected %zd bytes, "
362 "got %zd", plen - prepend_hdr_len, rlen));
363 vq_retchains(vq, n_chains);
367 ulen = (uint32_t)plen;
370 * Publish the used buffers to the guest, reporting the
371 * number of bytes that we wrote.
374 vq_relchain(vq, info[0].idx, ulen);
384 vq_relchain_prepare(vq, info[i].idx, iolen);
390 vq_relchain_publish(vq);
391 assert(i == n_chains);
398 * Called when there is read activity on the backend file descriptor.
399 * Each buffer posted by the guest is assumed to be able to contain
400 * an entire ethernet frame + rx header.
403 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param)
405 struct pci_vtnet_softc *sc = param;
407 pthread_mutex_lock(&sc->rx_mtx);
409 pthread_mutex_unlock(&sc->rx_mtx);
413 /* Called on RX kick. */
415 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq)
417 struct pci_vtnet_softc *sc = vsc;
420 * A qnotify means that the rx process can now begin.
421 * Enable RX only if features are negotiated.
423 pthread_mutex_lock(&sc->rx_mtx);
424 if (!sc->features_negotiated) {
425 pthread_mutex_unlock(&sc->rx_mtx);
430 netbe_rx_enable(sc->vsc_be);
431 pthread_mutex_unlock(&sc->rx_mtx);
434 /* TX virtqueue processing, called by the TX thread. */
436 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
438 struct iovec iov[VTNET_MAXSEGS + 1];
439 struct iovec *siov = iov;
445 * Obtain chain of descriptors. The first descriptor also
446 * contains the virtio-net header.
448 n = vq_getchain(vq, iov, VTNET_MAXSEGS, &req);
449 assert(n >= 1 && n <= VTNET_MAXSEGS);
451 if (sc->vhdrlen != sc->be_vhdrlen) {
453 * The frontend uses a virtio-net header, but the backend
454 * does not. We simply strip the header and ignore it, as
455 * it should be zero-filled.
457 siov = iov_trim_hdr(siov, &n, sc->vhdrlen);
461 /* The chain is nonsensical. Just drop it. */
464 len = netbe_send(sc->vsc_be, siov, n);
467 * If send failed, report that 0 bytes
475 * Return the processed chain to the guest, reporting
476 * the number of bytes that we read.
478 vq_relchain(vq, req.idx, len);
481 /* Called on TX kick. */
483 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq)
485 struct pci_vtnet_softc *sc = vsc;
488 * Any ring entries to process?
490 if (!vq_has_descs(vq))
493 /* Signal the tx thread for processing */
494 pthread_mutex_lock(&sc->tx_mtx);
496 if (sc->tx_in_progress == 0)
497 pthread_cond_signal(&sc->tx_cond);
498 pthread_mutex_unlock(&sc->tx_mtx);
502 * Thread which will handle processing of TX desc
505 pci_vtnet_tx_thread(void *param)
507 struct pci_vtnet_softc *sc = param;
508 struct vqueue_info *vq;
511 vq = &sc->vsc_queues[VTNET_TXQ];
514 * Let us wait till the tx queue pointers get initialised &
517 pthread_mutex_lock(&sc->tx_mtx);
518 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
522 /* note - tx mutex is locked here */
523 while (sc->resetting || !vq_has_descs(vq)) {
525 if (!sc->resetting && vq_has_descs(vq))
528 sc->tx_in_progress = 0;
529 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx);
533 sc->tx_in_progress = 1;
534 pthread_mutex_unlock(&sc->tx_mtx);
538 * Run through entries, placing them into
539 * iovecs and sending when an end-of-packet
542 pci_vtnet_proctx(sc, vq);
543 } while (vq_has_descs(vq));
546 * Generate an interrupt if needed.
548 vq_endchains(vq, /*used_all_avail=*/1);
550 pthread_mutex_lock(&sc->tx_mtx);
556 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq)
559 DPRINTF(("vtnet: control qnotify!"));
564 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
566 struct pci_vtnet_softc *sc;
568 char tname[MAXCOMLEN + 1];
569 unsigned long mtu = ETHERMTU;
573 * Allocate data structures for further virtio initializations.
574 * sc also contains a copy of vtnet_vi_consts, since capabilities
575 * change depending on the backend.
577 sc = calloc(1, sizeof(struct pci_vtnet_softc));
579 sc->vsc_consts = vtnet_vi_consts;
580 pthread_mutex_init(&sc->vsc_mtx, NULL);
582 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ;
583 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq;
584 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ;
585 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq;
587 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ;
588 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq;
591 value = get_config_value_node(nvl, "mac");
593 err = net_parsemac(value, sc->vsc_config.mac);
599 net_genmac(pi, sc->vsc_config.mac);
601 value = get_config_value_node(nvl, "mtu");
603 err = net_parsemtu(value, &mtu);
609 if (mtu < VTNET_MIN_MTU || mtu > VTNET_MAX_MTU) {
615 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MTU;
617 sc->vsc_config.mtu = mtu;
619 /* Permit interfaces without a configured backend. */
620 if (get_config_value_node(nvl, "backend") != NULL) {
621 err = netbe_init(&sc->vsc_be, nvl, pci_vtnet_rx_callback, sc);
628 sc->vsc_consts.vc_hv_caps |= VIRTIO_NET_F_MRG_RXBUF |
629 netbe_get_cap(sc->vsc_be);
632 * Since we do not actually support multiqueue,
633 * set the maximum virtqueue pairs to 1.
635 sc->vsc_config.max_virtqueue_pairs = 1;
637 /* initialize config space */
638 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
639 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
640 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
641 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK);
642 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
644 /* Link is always up. */
645 sc->vsc_config.status = 1;
647 vi_softc_linkup(&sc->vsc_vs, &sc->vsc_consts, sc, pi, sc->vsc_queues);
648 sc->vsc_vs.vs_mtx = &sc->vsc_mtx;
650 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */
651 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) {
656 /* use BAR 0 to map config regs in IO space */
657 vi_set_io_bar(&sc->vsc_vs, 0);
662 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2;
663 pthread_mutex_init(&sc->rx_mtx, NULL);
666 * Initialize tx semaphore & spawn TX processing thread.
667 * As of now, only one thread for TX desc processing is
670 sc->tx_in_progress = 0;
671 pthread_mutex_init(&sc->tx_mtx, NULL);
672 pthread_cond_init(&sc->tx_cond, NULL);
673 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc);
674 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot,
676 pthread_set_name_np(sc->tx_tid, tname);
682 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value)
684 struct pci_vtnet_softc *sc = vsc;
687 if (offset < (int)sizeof(sc->vsc_config.mac)) {
688 assert(offset + size <= (int)sizeof(sc->vsc_config.mac));
690 * The driver is allowed to change the MAC address
692 ptr = &sc->vsc_config.mac[offset];
693 memcpy(ptr, &value, size);
695 /* silently ignore other writes */
696 DPRINTF(("vtnet: write to readonly reg %d", offset));
703 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval)
705 struct pci_vtnet_softc *sc = vsc;
708 ptr = (uint8_t *)&sc->vsc_config + offset;
709 memcpy(retval, ptr, size);
714 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features)
716 struct pci_vtnet_softc *sc = vsc;
718 sc->vsc_features = negotiated_features;
720 if (negotiated_features & VIRTIO_NET_F_MRG_RXBUF) {
721 sc->vhdrlen = sizeof(struct virtio_net_rxhdr);
725 * Without mergeable rx buffers, virtio-net header is 2
726 * bytes shorter than sizeof(struct virtio_net_rxhdr).
728 sc->vhdrlen = sizeof(struct virtio_net_rxhdr) - 2;
732 /* Tell the backend to enable some capabilities it has advertised. */
733 netbe_set_cap(sc->vsc_be, negotiated_features, sc->vhdrlen);
734 sc->be_vhdrlen = netbe_get_vnet_hdr_len(sc->vsc_be);
735 assert(sc->be_vhdrlen == 0 || sc->be_vhdrlen == sc->vhdrlen);
737 pthread_mutex_lock(&sc->rx_mtx);
738 sc->features_negotiated = true;
739 pthread_mutex_unlock(&sc->rx_mtx);
742 #ifdef BHYVE_SNAPSHOT
744 pci_vtnet_pause(void *vsc)
746 struct pci_vtnet_softc *sc = vsc;
748 DPRINTF(("vtnet: device pause requested !\n"));
750 /* Acquire the RX lock to block RX processing. */
751 pthread_mutex_lock(&sc->rx_mtx);
753 /* Wait for the transmit thread to finish its processing. */
754 pthread_mutex_lock(&sc->tx_mtx);
755 while (sc->tx_in_progress) {
756 pthread_mutex_unlock(&sc->tx_mtx);
758 pthread_mutex_lock(&sc->tx_mtx);
763 pci_vtnet_resume(void *vsc)
765 struct pci_vtnet_softc *sc = vsc;
767 DPRINTF(("vtnet: device resume requested !\n"));
769 pthread_mutex_unlock(&sc->tx_mtx);
770 /* The RX lock should have been acquired in vtnet_pause. */
771 pthread_mutex_unlock(&sc->rx_mtx);
775 pci_vtnet_snapshot(void *vsc, struct vm_snapshot_meta *meta)
778 struct pci_vtnet_softc *sc = vsc;
780 DPRINTF(("vtnet: device snapshot requested !\n"));
783 * Queues and consts should have been saved by the more generic
784 * vi_pci_snapshot function. We need to save only our features and
788 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_features, meta, ret, done);
790 /* Force reapply negociated features at restore time */
791 if (meta->op == VM_SNAPSHOT_RESTORE) {
792 pci_vtnet_neg_features(sc, sc->vsc_features);
793 netbe_rx_enable(sc->vsc_be);
796 SNAPSHOT_VAR_OR_LEAVE(sc->vsc_config, meta, ret, done);
797 SNAPSHOT_VAR_OR_LEAVE(sc->rx_merge, meta, ret, done);
799 SNAPSHOT_VAR_OR_LEAVE(sc->vhdrlen, meta, ret, done);
800 SNAPSHOT_VAR_OR_LEAVE(sc->be_vhdrlen, meta, ret, done);
807 static const struct pci_devemu pci_de_vnet = {
808 .pe_emu = "virtio-net",
809 .pe_init = pci_vtnet_init,
810 .pe_legacy_config = netbe_legacy_config,
811 .pe_barwrite = vi_pci_write,
812 .pe_barread = vi_pci_read,
813 #ifdef BHYVE_SNAPSHOT
814 .pe_snapshot = vi_pci_snapshot,
815 .pe_pause = vi_pci_pause,
816 .pe_resume = vi_pci_resume,
819 PCI_EMUL_SET(pci_de_vnet);