2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/sockio.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/socket.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 #include <sys/random.h>
47 #include <sys/sglist.h>
49 #include <sys/mutex.h>
53 #include <net/ethernet.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/if_vlan_var.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
67 #include <netinet/udp.h>
68 #include <netinet/tcp.h>
69 #include <netinet/sctp.h>
71 #include <machine/bus.h>
72 #include <machine/resource.h>
76 #include <dev/virtio/virtio.h>
77 #include <dev/virtio/virtqueue.h>
78 #include <dev/virtio/network/virtio_net.h>
79 #include <dev/virtio/network/if_vtnetvar.h>
81 #include "virtio_if.h"
83 static int vtnet_modevent(module_t, int, void *);
85 static int vtnet_probe(device_t);
86 static int vtnet_attach(device_t);
87 static int vtnet_detach(device_t);
88 static int vtnet_suspend(device_t);
89 static int vtnet_resume(device_t);
90 static int vtnet_shutdown(device_t);
91 static int vtnet_config_change(device_t);
93 static void vtnet_negotiate_features(struct vtnet_softc *);
94 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
95 static void vtnet_get_hwaddr(struct vtnet_softc *);
96 static void vtnet_set_hwaddr(struct vtnet_softc *);
97 static int vtnet_is_link_up(struct vtnet_softc *);
98 static void vtnet_update_link_status(struct vtnet_softc *);
99 static void vtnet_watchdog(struct vtnet_softc *);
100 static void vtnet_config_change_task(void *, int);
101 static int vtnet_change_mtu(struct vtnet_softc *, int);
102 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
104 static int vtnet_init_rx_vq(struct vtnet_softc *);
105 static void vtnet_free_rx_mbufs(struct vtnet_softc *);
106 static void vtnet_free_tx_mbufs(struct vtnet_softc *);
107 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
109 #ifdef DEVICE_POLLING
110 static poll_handler_t vtnet_poll;
113 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
115 static int vtnet_replace_rxbuf(struct vtnet_softc *,
117 static int vtnet_newbuf(struct vtnet_softc *);
118 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
119 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
120 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
121 static void vtnet_vlan_tag_remove(struct mbuf *);
122 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
123 struct virtio_net_hdr *);
124 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
125 static int vtnet_rxeof(struct vtnet_softc *, int, int *);
126 static void vtnet_rx_intr_task(void *, int);
127 static int vtnet_rx_vq_intr(void *);
129 static void vtnet_txeof(struct vtnet_softc *);
130 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
131 struct virtio_net_hdr *);
132 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
133 struct vtnet_tx_header *);
134 static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
135 static void vtnet_start_locked(struct ifnet *);
136 static void vtnet_start(struct ifnet *);
137 static void vtnet_tick(void *);
138 static void vtnet_tx_intr_task(void *, int);
139 static int vtnet_tx_vq_intr(void *);
141 static void vtnet_stop(struct vtnet_softc *);
142 static int vtnet_reinit(struct vtnet_softc *);
143 static void vtnet_init_locked(struct vtnet_softc *);
144 static void vtnet_init(void *);
146 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
147 struct sglist *, int, int);
149 static void vtnet_rx_filter(struct vtnet_softc *sc);
150 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
151 static int vtnet_set_promisc(struct vtnet_softc *, int);
152 static int vtnet_set_allmulti(struct vtnet_softc *, int);
153 static void vtnet_rx_filter_mac(struct vtnet_softc *);
155 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
156 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
157 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t);
158 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
159 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
161 static int vtnet_ifmedia_upd(struct ifnet *);
162 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
164 static void vtnet_add_statistics(struct vtnet_softc *);
166 static int vtnet_enable_rx_intr(struct vtnet_softc *);
167 static int vtnet_enable_tx_intr(struct vtnet_softc *);
168 static void vtnet_disable_rx_intr(struct vtnet_softc *);
169 static void vtnet_disable_tx_intr(struct vtnet_softc *);
172 static int vtnet_csum_disable = 0;
173 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
174 static int vtnet_tso_disable = 0;
175 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
176 static int vtnet_lro_disable = 0;
177 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
180 * Reducing the number of transmit completed interrupts can
181 * improve performance. To do so, the define below keeps the
182 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
183 * in the start and watchdog paths. The price to pay for this
184 * is the m_free'ing of transmitted mbufs may be delayed until
185 * the watchdog fires.
187 #define VTNET_TX_INTR_MODERATION
189 static uma_zone_t vtnet_tx_header_zone;
191 static struct virtio_feature_desc vtnet_feature_desc[] = {
192 { VIRTIO_NET_F_CSUM, "TxChecksum" },
193 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
194 { VIRTIO_NET_F_MAC, "MacAddress" },
195 { VIRTIO_NET_F_GSO, "TxAllGSO" },
196 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
197 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
198 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
199 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
200 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
201 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
202 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
203 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
204 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
205 { VIRTIO_NET_F_STATUS, "Status" },
206 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
207 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
208 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
209 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
214 static device_method_t vtnet_methods[] = {
215 /* Device methods. */
216 DEVMETHOD(device_probe, vtnet_probe),
217 DEVMETHOD(device_attach, vtnet_attach),
218 DEVMETHOD(device_detach, vtnet_detach),
219 DEVMETHOD(device_suspend, vtnet_suspend),
220 DEVMETHOD(device_resume, vtnet_resume),
221 DEVMETHOD(device_shutdown, vtnet_shutdown),
223 /* VirtIO methods. */
224 DEVMETHOD(virtio_config_change, vtnet_config_change),
229 static driver_t vtnet_driver = {
232 sizeof(struct vtnet_softc)
234 static devclass_t vtnet_devclass;
236 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
238 MODULE_VERSION(vtnet, 1);
239 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
242 vtnet_modevent(module_t mod, int type, void *unused)
250 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
251 sizeof(struct vtnet_tx_header),
252 NULL, NULL, NULL, NULL, 0, 0);
256 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
258 else if (type == MOD_UNLOAD) {
259 uma_zdestroy(vtnet_tx_header_zone);
260 vtnet_tx_header_zone = NULL;
274 vtnet_probe(device_t dev)
277 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
280 device_set_desc(dev, "VirtIO Networking Adapter");
282 return (BUS_PROBE_DEFAULT);
286 vtnet_attach(device_t dev)
288 struct vtnet_softc *sc;
292 sc = device_get_softc(dev);
296 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_MTX(sc), 0);
298 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
300 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
301 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
303 vtnet_add_statistics(sc);
305 virtio_set_feature_desc(dev, vtnet_feature_desc);
306 vtnet_negotiate_features(sc);
308 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
309 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
310 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
312 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
314 sc->vtnet_rx_mbuf_size = MCLBYTES;
315 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
317 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
318 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
320 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) {
321 sc->vtnet_mac_filter = malloc(
322 sizeof(struct vtnet_mac_filter), M_DEVBUF,
324 if (sc->vtnet_mac_filter == NULL) {
326 "cannot allocate mac filter table\n");
331 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
334 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
335 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
338 vtnet_get_hwaddr(sc);
340 error = vtnet_alloc_virtqueues(sc);
342 device_printf(dev, "cannot allocate virtqueues\n");
346 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
348 device_printf(dev, "cannot allocate ifnet structure\n");
354 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356 ifp->if_init = vtnet_init;
357 ifp->if_start = vtnet_start;
358 ifp->if_ioctl = vtnet_ioctl;
360 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
361 sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
363 tx_size = virtqueue_size(sc->vtnet_tx_vq);
364 sc->vtnet_tx_size = tx_size;
365 IFQ_SET_MAXLEN(&ifp->if_snd, tx_size - 1);
366 ifp->if_snd.ifq_drv_maxlen = tx_size - 1;
367 IFQ_SET_READY(&ifp->if_snd);
369 ether_ifattach(ifp, sc->vtnet_hwaddr);
371 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
372 ifp->if_capabilities |= IFCAP_LINKSTATE;
374 /* Tell the upper layer(s) we support long frames. */
375 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
376 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
378 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
379 ifp->if_capabilities |= IFCAP_TXCSUM;
381 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
382 ifp->if_capabilities |= IFCAP_TSO4;
383 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
384 ifp->if_capabilities |= IFCAP_TSO6;
385 if (ifp->if_capabilities & IFCAP_TSO)
386 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
388 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
389 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
392 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
393 ifp->if_capabilities |= IFCAP_RXCSUM;
395 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
396 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
397 ifp->if_capabilities |= IFCAP_LRO;
400 if (ifp->if_capabilities & IFCAP_HWCSUM) {
402 * VirtIO does not support VLAN tagging, but we can fake
403 * it by inserting and removing the 802.1Q header during
404 * transmit and receive. We are then able to do checksum
405 * offloading of VLAN frames.
407 ifp->if_capabilities |=
408 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
411 ifp->if_capenable = ifp->if_capabilities;
414 * Capabilities after here are not enabled by default.
417 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
418 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
420 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
421 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
422 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
423 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
426 #ifdef DEVICE_POLLING
427 ifp->if_capabilities |= IFCAP_POLLING;
430 TASK_INIT(&sc->vtnet_rx_intr_task, 0, vtnet_rx_intr_task, sc);
431 TASK_INIT(&sc->vtnet_tx_intr_task, 0, vtnet_tx_intr_task, sc);
432 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
434 sc->vtnet_tq = taskqueue_create_fast("vtnet_taskq", M_NOWAIT,
435 taskqueue_thread_enqueue, &sc->vtnet_tq);
436 if (sc->vtnet_tq == NULL) {
438 device_printf(dev, "cannot allocate taskqueue\n");
442 taskqueue_start_threads(&sc->vtnet_tq, 1, PI_NET, "%s taskq",
443 device_get_nameunit(dev));
445 error = virtio_setup_intr(dev, INTR_TYPE_NET);
447 device_printf(dev, "cannot setup virtqueue interrupts\n");
448 taskqueue_free(sc->vtnet_tq);
455 * Device defaults to promiscuous mode for backwards
456 * compatibility. Turn it off if possible.
458 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
460 if (vtnet_set_promisc(sc, 0) != 0) {
461 ifp->if_flags |= IFF_PROMISC;
463 "cannot disable promiscuous mode\n");
467 ifp->if_flags |= IFF_PROMISC;
477 vtnet_detach(device_t dev)
479 struct vtnet_softc *sc;
482 sc = device_get_softc(dev);
485 KASSERT(mtx_initialized(VTNET_MTX(sc)),
486 ("vtnet mutex not initialized"));
488 #ifdef DEVICE_POLLING
489 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
490 ether_poll_deregister(ifp);
493 if (device_is_attached(dev)) {
498 callout_drain(&sc->vtnet_tick_ch);
499 taskqueue_drain(taskqueue_fast, &sc->vtnet_cfgchg_task);
504 if (sc->vtnet_tq != NULL) {
505 taskqueue_drain(sc->vtnet_tq, &sc->vtnet_rx_intr_task);
506 taskqueue_drain(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
507 taskqueue_free(sc->vtnet_tq);
511 if (sc->vtnet_vlan_attach != NULL) {
512 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
513 sc->vtnet_vlan_attach = NULL;
515 if (sc->vtnet_vlan_detach != NULL) {
516 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
517 sc->vtnet_vlan_detach = NULL;
520 if (sc->vtnet_mac_filter != NULL) {
521 free(sc->vtnet_mac_filter, M_DEVBUF);
522 sc->vtnet_mac_filter = NULL;
527 sc->vtnet_ifp = NULL;
530 if (sc->vtnet_rx_vq != NULL)
531 vtnet_free_rx_mbufs(sc);
532 if (sc->vtnet_tx_vq != NULL)
533 vtnet_free_tx_mbufs(sc);
534 if (sc->vtnet_ctrl_vq != NULL)
535 vtnet_free_ctrl_vq(sc);
537 ifmedia_removeall(&sc->vtnet_media);
538 VTNET_LOCK_DESTROY(sc);
544 vtnet_suspend(device_t dev)
546 struct vtnet_softc *sc;
548 sc = device_get_softc(dev);
552 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
559 vtnet_resume(device_t dev)
561 struct vtnet_softc *sc;
564 sc = device_get_softc(dev);
568 if (ifp->if_flags & IFF_UP)
569 vtnet_init_locked(sc);
570 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
577 vtnet_shutdown(device_t dev)
581 * Suspend already does all of what we need to
582 * do here; we just never expect to be resumed.
584 return (vtnet_suspend(dev));
588 vtnet_config_change(device_t dev)
590 struct vtnet_softc *sc;
592 sc = device_get_softc(dev);
594 taskqueue_enqueue_fast(taskqueue_fast, &sc->vtnet_cfgchg_task);
600 vtnet_negotiate_features(struct vtnet_softc *sc)
603 uint64_t mask, features;
608 if (vtnet_csum_disable)
609 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
612 * TSO and LRO are only available when their corresponding
613 * checksum offload feature is also negotiated.
616 if (vtnet_csum_disable || vtnet_tso_disable)
617 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
618 VIRTIO_NET_F_HOST_ECN;
620 if (vtnet_csum_disable || vtnet_lro_disable)
621 mask |= VTNET_LRO_FEATURES;
623 features = VTNET_FEATURES & ~mask;
624 #ifdef VTNET_TX_INTR_MODERATION
625 features |= VIRTIO_F_NOTIFY_ON_EMPTY;
627 sc->vtnet_features = virtio_negotiate_features(dev, features);
629 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0 &&
630 virtio_with_feature(dev, VTNET_LRO_FEATURES)) {
632 * LRO without mergeable buffers requires special care. This
633 * is not ideal because every receive buffer must be large
634 * enough to hold the maximum TCP packet, the Ethernet header,
635 * and the vtnet_rx_header. This requires up to 34 descriptors
636 * when using MCLBYTES clusters. If we do not have indirect
637 * descriptors, LRO is disabled since the virtqueue will not
638 * be able to contain very many receive buffers.
640 if (virtio_with_feature(dev,
641 VIRTIO_RING_F_INDIRECT_DESC) == 0) {
643 "LRO disabled due to lack of both mergeable "
644 "buffers and indirect descriptors\n");
646 sc->vtnet_features = virtio_negotiate_features(dev,
647 features & ~VTNET_LRO_FEATURES);
649 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
654 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
657 struct vq_alloc_info vq_info[3];
664 * Indirect descriptors are not needed for the Rx
665 * virtqueue when mergeable buffers are negotiated.
666 * The header is placed inline with the data, not
667 * in a separate descriptor, and mbuf clusters are
668 * always physically contiguous.
670 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
671 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
672 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
676 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs,
677 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
678 "%s receive", device_get_nameunit(dev));
680 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS,
681 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
682 "%s transmit", device_get_nameunit(dev));
684 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
687 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
688 &sc->vtnet_ctrl_vq, "%s control",
689 device_get_nameunit(dev));
692 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
696 vtnet_get_hwaddr(struct vtnet_softc *sc)
702 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
703 virtio_read_device_config(dev,
704 offsetof(struct virtio_net_config, mac),
705 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
707 /* Generate random locally administered unicast address. */
708 sc->vtnet_hwaddr[0] = 0xB2;
709 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
711 vtnet_set_hwaddr(sc);
716 vtnet_set_hwaddr(struct vtnet_softc *sc)
722 virtio_write_device_config(dev,
723 offsetof(struct virtio_net_config, mac),
724 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
728 vtnet_is_link_up(struct vtnet_softc *sc)
737 VTNET_LOCK_ASSERT(sc);
739 if ((ifp->if_capenable & IFCAP_LINKSTATE) == 0)
742 status = virtio_read_dev_config_2(dev,
743 offsetof(struct virtio_net_config, status));
745 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
749 vtnet_update_link_status(struct vtnet_softc *sc)
758 link = vtnet_is_link_up(sc);
760 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
761 sc->vtnet_flags |= VTNET_FLAG_LINK;
762 if_link_state_change(ifp, LINK_STATE_UP);
763 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
764 vtnet_start_locked(ifp);
765 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
766 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
767 if_link_state_change(ifp, LINK_STATE_DOWN);
772 vtnet_watchdog(struct vtnet_softc *sc)
778 #ifdef VTNET_TX_INTR_MODERATION
782 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
785 if_printf(ifp, "watchdog timeout -- resetting\n");
787 virtqueue_dump(sc->vtnet_tx_vq);
790 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
791 vtnet_init_locked(sc);
795 vtnet_config_change_task(void *arg, int pending)
797 struct vtnet_softc *sc;
802 vtnet_update_link_status(sc);
807 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
809 struct vtnet_softc *sc;
811 int reinit, mask, error;
814 ifr = (struct ifreq *) data;
820 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
822 else if (ifp->if_mtu != ifr->ifr_mtu) {
824 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
831 if ((ifp->if_flags & IFF_UP) == 0) {
832 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
834 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
835 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
836 (IFF_PROMISC | IFF_ALLMULTI)) {
837 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
843 vtnet_init_locked(sc);
846 sc->vtnet_if_flags = ifp->if_flags;
853 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
854 (ifp->if_drv_flags & IFF_DRV_RUNNING))
855 vtnet_rx_filter_mac(sc);
861 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
865 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
867 #ifdef DEVICE_POLLING
868 if (mask & IFCAP_POLLING) {
869 if (ifr->ifr_reqcap & IFCAP_POLLING) {
870 error = ether_poll_register(vtnet_poll, ifp);
875 vtnet_disable_rx_intr(sc);
876 vtnet_disable_tx_intr(sc);
877 ifp->if_capenable |= IFCAP_POLLING;
880 error = ether_poll_deregister(ifp);
882 /* Enable interrupts even in error case. */
884 vtnet_enable_tx_intr(sc);
885 vtnet_enable_rx_intr(sc);
886 ifp->if_capenable &= ~IFCAP_POLLING;
893 if (mask & IFCAP_TXCSUM) {
894 ifp->if_capenable ^= IFCAP_TXCSUM;
895 if (ifp->if_capenable & IFCAP_TXCSUM)
896 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
898 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
901 if (mask & IFCAP_TSO4) {
902 ifp->if_capenable ^= IFCAP_TSO4;
903 if (ifp->if_capenable & IFCAP_TSO4)
904 ifp->if_hwassist |= CSUM_TSO;
906 ifp->if_hwassist &= ~CSUM_TSO;
909 if (mask & IFCAP_RXCSUM) {
910 ifp->if_capenable ^= IFCAP_RXCSUM;
914 if (mask & IFCAP_LRO) {
915 ifp->if_capenable ^= IFCAP_LRO;
919 if (mask & IFCAP_VLAN_HWFILTER) {
920 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
924 if (mask & IFCAP_VLAN_HWTSO)
925 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
927 if (mask & IFCAP_VLAN_HWTAGGING)
928 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
930 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
931 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
932 vtnet_init_locked(sc);
934 VLAN_CAPABILITIES(ifp);
940 error = ether_ioctl(ifp, cmd, data);
944 VTNET_LOCK_ASSERT_NOTOWNED(sc);
950 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
953 int new_frame_size, clsize;
957 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
958 new_frame_size = sizeof(struct vtnet_rx_header) +
959 sizeof(struct ether_vlan_header) + new_mtu;
961 if (new_frame_size > MJUM9BYTES)
964 if (new_frame_size <= MCLBYTES)
969 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
970 sizeof(struct ether_vlan_header) + new_mtu;
972 if (new_frame_size <= MCLBYTES)
975 clsize = MJUMPAGESIZE;
978 sc->vtnet_rx_mbuf_size = clsize;
979 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
980 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
981 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
983 ifp->if_mtu = new_mtu;
985 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
987 vtnet_init_locked(sc);
994 vtnet_init_rx_vq(struct vtnet_softc *sc)
996 struct virtqueue *vq;
999 vq = sc->vtnet_rx_vq;
1003 while (!virtqueue_full(vq)) {
1004 if ((error = vtnet_newbuf(sc)) != 0)
1010 virtqueue_notify(vq);
1013 * EMSGSIZE signifies the virtqueue did not have enough
1014 * entries available to hold the last mbuf. This is not
1015 * an error. We should not get ENOSPC since we check if
1016 * the virtqueue is full before attempting to add a
1019 if (error == EMSGSIZE)
1027 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1029 struct virtqueue *vq;
1033 vq = sc->vtnet_rx_vq;
1036 while ((m = virtqueue_drain(vq, &last)) != NULL)
1039 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1043 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1045 struct virtqueue *vq;
1046 struct vtnet_tx_header *txhdr;
1049 vq = sc->vtnet_tx_vq;
1052 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1053 m_freem(txhdr->vth_mbuf);
1054 uma_zfree(vtnet_tx_header_zone, txhdr);
1057 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1061 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1065 * The control virtqueue is only polled, therefore
1066 * it should already be empty.
1068 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1069 ("Ctrl Vq not empty"));
1072 #ifdef DEVICE_POLLING
1074 vtnet_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1076 struct vtnet_softc *sc;
1083 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1084 if (cmd == POLL_AND_CHECK_STATUS)
1085 vtnet_update_link_status(sc);
1087 if (virtqueue_nused(sc->vtnet_rx_vq) > 0)
1088 vtnet_rxeof(sc, count, &rx_done);
1091 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1092 vtnet_start_locked(ifp);
1098 #endif /* DEVICE_POLLING */
1100 static struct mbuf *
1101 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1103 struct mbuf *m_head, *m_tail, *m;
1106 clsize = sc->vtnet_rx_mbuf_size;
1108 m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1112 m_head->m_len = clsize;
1116 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1117 ("chained Rx mbuf requested without LRO_NOMRG"));
1119 for (i = 1; i < nbufs; i++) {
1120 m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1130 if (m_tailp != NULL)
1136 sc->vtnet_stats.mbuf_alloc_failed++;
1143 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1145 struct mbuf *m, *m_prev;
1146 struct mbuf *m_new, *m_tail;
1147 int len, clsize, nreplace, error;
1154 clsize = sc->vtnet_rx_mbuf_size;
1157 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ||
1158 m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG"));
1161 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1162 * allocating an entire chain for each received frame. When
1163 * the received frame's length is less than that of the chain,
1164 * the unused mbufs are reassigned to the new chain.
1168 * Something is seriously wrong if we received
1169 * a frame larger than the mbuf chain. Drop it.
1172 sc->vtnet_stats.rx_frame_too_large++;
1176 KASSERT(m->m_len == clsize,
1177 ("mbuf length not expected cluster size: %d",
1180 m->m_len = MIN(m->m_len, len);
1188 KASSERT(m_prev != NULL, ("m_prev == NULL"));
1189 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1190 ("too many replacement mbufs: %d/%d", nreplace,
1191 sc->vtnet_rx_mbuf_count));
1193 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1194 if (m_new == NULL) {
1195 m_prev->m_len = clsize;
1200 * Move unused mbufs, if any, from the original chain
1201 * onto the end of the new chain.
1203 if (m_prev->m_next != NULL) {
1204 m_tail->m_next = m_prev->m_next;
1205 m_prev->m_next = NULL;
1208 error = vtnet_enqueue_rxbuf(sc, m_new);
1211 * BAD! We could not enqueue the replacement mbuf chain. We
1212 * must restore the m0 chain to the original state if it was
1213 * modified so we can subsequently discard it.
1215 * NOTE: The replacement is suppose to be an identical copy
1216 * to the one just dequeued so this is an unexpected error.
1218 sc->vtnet_stats.rx_enq_replacement_failed++;
1220 if (m_tail->m_next != NULL) {
1221 m_prev->m_next = m_tail->m_next;
1222 m_tail->m_next = NULL;
1225 m_prev->m_len = clsize;
1233 vtnet_newbuf(struct vtnet_softc *sc)
1238 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1242 error = vtnet_enqueue_rxbuf(sc, m);
1250 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1252 struct virtqueue *vq;
1255 vq = sc->vtnet_rx_vq;
1257 while (--nbufs > 0) {
1258 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1260 vtnet_discard_rxbuf(sc, m);
1265 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1270 * Requeue the discarded mbuf. This should always be
1271 * successful since it was just dequeued.
1273 error = vtnet_enqueue_rxbuf(sc, m);
1274 KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1278 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1281 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1282 struct vtnet_rx_header *rxhdr;
1283 struct virtio_net_hdr *hdr;
1287 VTNET_LOCK_ASSERT(sc);
1288 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ||
1289 m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG"));
1291 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
1293 mdata = mtod(m, uint8_t *);
1296 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1297 rxhdr = (struct vtnet_rx_header *) mdata;
1298 hdr = &rxhdr->vrh_hdr;
1299 offset += sizeof(struct vtnet_rx_header);
1301 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1302 KASSERT(error == 0, ("cannot add header to sglist"));
1305 error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1309 if (m->m_next != NULL) {
1310 error = sglist_append_mbuf(&sg, m->m_next);
1315 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1319 vtnet_vlan_tag_remove(struct mbuf *m)
1321 struct ether_vlan_header *evl;
1323 evl = mtod(m, struct ether_vlan_header *);
1325 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
1326 m->m_flags |= M_VLANTAG;
1328 /* Strip the 802.1Q header. */
1329 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1330 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1331 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1336 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1337 struct virtio_net_hdr *hdr)
1339 struct ether_header *eh;
1340 struct ether_vlan_header *evh;
1342 struct ip6_hdr *ip6;
1344 int ip_offset, csum_start, csum_offset, hlen;
1349 * Convert the VirtIO checksum interface to FreeBSD's interface.
1350 * The host only provides us with the offset at which to start
1351 * checksumming, and the offset from that to place the completed
1352 * checksum. While this maps well with how Linux does checksums,
1353 * for FreeBSD, we must parse the received packet in order to set
1354 * the appropriate CSUM_* flags.
1358 * Every mbuf added to the receive virtqueue is always at least
1359 * MCLBYTES big, so assume something is amiss if the first mbuf
1360 * does not contain both the Ethernet and protocol headers.
1362 ip_offset = sizeof(struct ether_header);
1363 if (m->m_len < ip_offset)
1366 eh = mtod(m, struct ether_header *);
1367 eth_type = ntohs(eh->ether_type);
1368 if (eth_type == ETHERTYPE_VLAN) {
1369 ip_offset = sizeof(struct ether_vlan_header);
1370 if (m->m_len < ip_offset)
1372 evh = mtod(m, struct ether_vlan_header *);
1373 eth_type = ntohs(evh->evl_proto);
1378 if (m->m_len < ip_offset + sizeof(struct ip))
1381 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1382 /* Sanity check the IP header. */
1383 if (ip->ip_v != IPVERSION)
1385 hlen = ip->ip_hl << 2;
1386 if (hlen < sizeof(struct ip))
1388 if (ntohs(ip->ip_len) < hlen)
1390 if (ntohs(ip->ip_len) != (m->m_pkthdr.len - ip_offset))
1393 ip_proto = ip->ip_p;
1394 csum_start = ip_offset + hlen;
1397 case ETHERTYPE_IPV6:
1398 if (m->m_len < ip_offset + sizeof(struct ip6_hdr))
1402 * XXX FreeBSD does not handle any IPv6 checksum offloading
1406 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1407 /* XXX Assume no extension headers are present. */
1408 ip_proto = ip6->ip6_nxt;
1409 csum_start = ip_offset + sizeof(struct ip6_hdr);
1413 sc->vtnet_stats.rx_csum_bad_ethtype++;
1417 /* Assume checksum begins right after the IP header. */
1418 if (hdr->csum_start != csum_start) {
1419 sc->vtnet_stats.rx_csum_bad_start++;
1425 csum_offset = offsetof(struct tcphdr, th_sum);
1429 csum_offset = offsetof(struct udphdr, uh_sum);
1433 csum_offset = offsetof(struct sctphdr, checksum);
1437 sc->vtnet_stats.rx_csum_bad_ipproto++;
1441 if (hdr->csum_offset != csum_offset) {
1442 sc->vtnet_stats.rx_csum_bad_offset++;
1447 * The IP header checksum is almost certainly valid but I'm
1448 * uncertain if that is guaranteed.
1450 * m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1455 if (m->m_len < csum_start + sizeof(struct udphdr))
1458 udp = (struct udphdr *)(mtod(m, uint8_t *) + csum_start);
1459 if (udp->uh_sum == 0)
1465 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1466 m->m_pkthdr.csum_data = 0xFFFF;
1470 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1474 sc->vtnet_stats.rx_csum_offloaded++;
1481 * Alternative method of doing receive checksum offloading. Rather
1482 * than parsing the received frame down to the IP header, use the
1483 * csum_offset to determine which CSUM_* flags are appropriate. We
1484 * can get by with doing this only because the checksum offsets are
1485 * unique for the things we care about.
1488 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1489 struct virtio_net_hdr *hdr)
1491 struct ether_header *eh;
1492 struct ether_vlan_header *evh;
1497 csum_len = hdr->csum_start + hdr->csum_offset;
1499 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1501 if (m->m_len < csum_len)
1504 eh = mtod(m, struct ether_header *);
1505 eth_type = ntohs(eh->ether_type);
1506 if (eth_type == ETHERTYPE_VLAN) {
1507 evh = mtod(m, struct ether_vlan_header *);
1508 eth_type = ntohs(evh->evl_proto);
1511 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1512 sc->vtnet_stats.rx_csum_bad_ethtype++;
1516 /* Use the offset to determine the appropriate CSUM_* flags. */
1517 switch (hdr->csum_offset) {
1518 case offsetof(struct udphdr, uh_sum):
1519 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1521 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1522 if (udp->uh_sum == 0)
1527 case offsetof(struct tcphdr, th_sum):
1528 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1529 m->m_pkthdr.csum_data = 0xFFFF;
1532 case offsetof(struct sctphdr, checksum):
1533 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1537 sc->vtnet_stats.rx_csum_bad_offset++;
1541 sc->vtnet_stats.rx_csum_offloaded++;
1547 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1550 struct virtqueue *vq;
1551 struct mbuf *m, *m_tail;
1554 ifp = sc->vtnet_ifp;
1555 vq = sc->vtnet_rx_vq;
1558 while (--nbufs > 0) {
1559 m = virtqueue_dequeue(vq, &len);
1565 if (vtnet_newbuf(sc) != 0) {
1567 vtnet_discard_rxbuf(sc, m);
1569 vtnet_discard_merged_rxbuf(sc, nbufs);
1577 m->m_flags &= ~M_PKTHDR;
1579 m_head->m_pkthdr.len += len;
1587 sc->vtnet_stats.rx_mergeable_failed++;
1594 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1596 struct virtio_net_hdr lhdr;
1598 struct virtqueue *vq;
1600 struct ether_header *eh;
1601 struct virtio_net_hdr *hdr;
1602 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1603 int len, deq, nbufs, adjsz, rx_npkts;
1605 ifp = sc->vtnet_ifp;
1606 vq = sc->vtnet_rx_vq;
1611 VTNET_LOCK_ASSERT(sc);
1613 while (--count >= 0) {
1614 m = virtqueue_dequeue(vq, &len);
1619 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1621 vtnet_discard_rxbuf(sc, m);
1625 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1627 adjsz = sizeof(struct vtnet_rx_header);
1629 * Account for our pad between the header and
1630 * the actual start of the frame.
1632 len += VTNET_RX_HEADER_PAD;
1634 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1635 nbufs = mhdr->num_buffers;
1636 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1639 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1641 vtnet_discard_rxbuf(sc, m);
1643 vtnet_discard_merged_rxbuf(sc, nbufs);
1647 m->m_pkthdr.len = len;
1648 m->m_pkthdr.rcvif = ifp;
1649 m->m_pkthdr.csum_flags = 0;
1652 if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1659 * Save copy of header before we strip it. For both mergeable
1660 * and non-mergeable, the VirtIO header is placed first in the
1661 * mbuf's data. We no longer need num_buffers, so always use a
1664 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1667 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1668 eh = mtod(m, struct ether_header *);
1669 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1670 vtnet_vlan_tag_remove(m);
1673 * With the 802.1Q header removed, update the
1674 * checksum starting location accordingly.
1676 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1678 ETHER_VLAN_ENCAP_LEN;
1682 if (ifp->if_capenable & IFCAP_RXCSUM &&
1683 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1684 if (vtnet_rx_csum(sc, m, hdr) != 0)
1685 sc->vtnet_stats.rx_csum_failed++;
1690 (*ifp->if_input)(ifp, m);
1694 * The interface may have been stopped while we were
1695 * passing the packet up the network stack.
1697 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1702 virtqueue_notify(vq);
1704 if (rx_npktsp != NULL)
1705 *rx_npktsp = rx_npkts;
1707 return (count > 0 ? 0 : EAGAIN);
1711 vtnet_rx_intr_task(void *arg, int pending)
1713 struct vtnet_softc *sc;
1718 ifp = sc->vtnet_ifp;
1722 #ifdef DEVICE_POLLING
1723 if (ifp->if_capenable & IFCAP_POLLING) {
1729 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1730 vtnet_enable_rx_intr(sc);
1735 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1736 if (!more && vtnet_enable_rx_intr(sc) != 0) {
1737 vtnet_disable_rx_intr(sc);
1744 sc->vtnet_stats.rx_task_rescheduled++;
1745 taskqueue_enqueue_fast(sc->vtnet_tq,
1746 &sc->vtnet_rx_intr_task);
1751 vtnet_rx_vq_intr(void *xsc)
1753 struct vtnet_softc *sc;
1757 vtnet_disable_rx_intr(sc);
1758 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_rx_intr_task);
1764 vtnet_txeof(struct vtnet_softc *sc)
1766 struct virtqueue *vq;
1768 struct vtnet_tx_header *txhdr;
1771 vq = sc->vtnet_tx_vq;
1772 ifp = sc->vtnet_ifp;
1775 VTNET_LOCK_ASSERT(sc);
1777 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1780 m_freem(txhdr->vth_mbuf);
1781 uma_zfree(vtnet_tx_header_zone, txhdr);
1785 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1786 if (virtqueue_empty(vq))
1787 sc->vtnet_watchdog_timer = 0;
1791 static struct mbuf *
1792 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1793 struct virtio_net_hdr *hdr)
1796 struct ether_header *eh;
1797 struct ether_vlan_header *evh;
1799 struct ip6_hdr *ip6;
1802 uint16_t eth_type, csum_start;
1803 uint8_t ip_proto, gso_type;
1805 ifp = sc->vtnet_ifp;
1808 ip_offset = sizeof(struct ether_header);
1809 if (m->m_len < ip_offset) {
1810 if ((m = m_pullup(m, ip_offset)) == NULL)
1814 eh = mtod(m, struct ether_header *);
1815 eth_type = ntohs(eh->ether_type);
1816 if (eth_type == ETHERTYPE_VLAN) {
1817 ip_offset = sizeof(struct ether_vlan_header);
1818 if (m->m_len < ip_offset) {
1819 if ((m = m_pullup(m, ip_offset)) == NULL)
1822 evh = mtod(m, struct ether_vlan_header *);
1823 eth_type = ntohs(evh->evl_proto);
1828 if (m->m_len < ip_offset + sizeof(struct ip)) {
1829 m = m_pullup(m, ip_offset + sizeof(struct ip));
1834 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1835 ip_proto = ip->ip_p;
1836 csum_start = ip_offset + (ip->ip_hl << 2);
1837 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1840 case ETHERTYPE_IPV6:
1841 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1842 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1847 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1849 * XXX Assume no extension headers are present. Presently,
1850 * this will always be true in the case of TSO, and FreeBSD
1851 * does not perform checksum offloading of IPv6 yet.
1853 ip_proto = ip6->ip6_nxt;
1854 csum_start = ip_offset + sizeof(struct ip6_hdr);
1855 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1862 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1863 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1864 hdr->csum_start = csum_start;
1865 hdr->csum_offset = m->m_pkthdr.csum_data;
1867 sc->vtnet_stats.tx_csum_offloaded++;
1870 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1871 if (ip_proto != IPPROTO_TCP)
1874 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1875 m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1880 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1881 hdr->gso_type = gso_type;
1882 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1883 hdr->gso_size = m->m_pkthdr.tso_segsz;
1885 if (tcp->th_flags & TH_CWR) {
1887 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1888 * ECN support is only configurable globally with the
1889 * net.inet.tcp.ecn.enable sysctl knob.
1891 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1892 if_printf(ifp, "TSO with ECN not supported "
1898 hdr->flags |= VIRTIO_NET_HDR_GSO_ECN;
1901 sc->vtnet_stats.tx_tso_offloaded++;
1908 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1909 struct vtnet_tx_header *txhdr)
1912 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1913 struct virtqueue *vq;
1915 int collapsed, error;
1917 vq = sc->vtnet_tx_vq;
1921 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
1922 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1923 KASSERT(error == 0 && sg.sg_nseg == 1,
1924 ("cannot add header to sglist"));
1927 error = sglist_append_mbuf(&sg, m);
1932 m = m_collapse(m, M_DONTWAIT, VTNET_MAX_TX_SEGS - 1);
1941 txhdr->vth_mbuf = m;
1943 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0));
1953 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1955 struct vtnet_tx_header *txhdr;
1956 struct virtio_net_hdr *hdr;
1962 txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
1963 if (txhdr == NULL) {
1970 * Always use the non-mergeable header to simplify things. When
1971 * the mergeable feature is negotiated, the num_buffers field
1972 * must be set to zero. We use vtnet_hdr_size later to enqueue
1973 * the correct header size to the host.
1975 hdr = &txhdr->vth_uhdr.hdr;
1977 if (m->m_flags & M_VLANTAG) {
1978 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1979 if ((*m_head = m) == NULL) {
1983 m->m_flags &= ~M_VLANTAG;
1986 if (m->m_pkthdr.csum_flags != 0) {
1987 m = vtnet_tx_offload(sc, m, hdr);
1988 if ((*m_head = m) == NULL) {
1994 error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1997 uma_zfree(vtnet_tx_header_zone, txhdr);
2003 vtnet_start(struct ifnet *ifp)
2005 struct vtnet_softc *sc;
2010 vtnet_start_locked(ifp);
2015 vtnet_start_locked(struct ifnet *ifp)
2017 struct vtnet_softc *sc;
2018 struct virtqueue *vq;
2023 vq = sc->vtnet_tx_vq;
2026 VTNET_LOCK_ASSERT(sc);
2028 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2029 IFF_DRV_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
2032 #ifdef VTNET_TX_INTR_MODERATION
2033 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
2037 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2038 if (virtqueue_full(vq)) {
2039 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2043 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2047 if (vtnet_encap(sc, &m0) != 0) {
2050 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2051 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2056 ETHER_BPF_MTAP(ifp, m0);
2060 virtqueue_notify(vq);
2061 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
2066 vtnet_tick(void *xsc)
2068 struct vtnet_softc *sc;
2072 VTNET_LOCK_ASSERT(sc);
2074 virtqueue_dump(sc->vtnet_rx_vq);
2075 virtqueue_dump(sc->vtnet_tx_vq);
2079 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2083 vtnet_tx_intr_task(void *arg, int pending)
2085 struct vtnet_softc *sc;
2089 ifp = sc->vtnet_ifp;
2093 #ifdef DEVICE_POLLING
2094 if (ifp->if_capenable & IFCAP_POLLING) {
2100 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2101 vtnet_enable_tx_intr(sc);
2108 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2109 vtnet_start_locked(ifp);
2111 if (vtnet_enable_tx_intr(sc) != 0) {
2112 vtnet_disable_tx_intr(sc);
2113 sc->vtnet_stats.tx_task_rescheduled++;
2115 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
2123 vtnet_tx_vq_intr(void *xsc)
2125 struct vtnet_softc *sc;
2129 vtnet_disable_tx_intr(sc);
2130 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task);
2136 vtnet_stop(struct vtnet_softc *sc)
2141 dev = sc->vtnet_dev;
2142 ifp = sc->vtnet_ifp;
2144 VTNET_LOCK_ASSERT(sc);
2146 sc->vtnet_watchdog_timer = 0;
2147 callout_stop(&sc->vtnet_tick_ch);
2148 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2150 vtnet_disable_rx_intr(sc);
2151 vtnet_disable_tx_intr(sc);
2154 * Stop the host VirtIO adapter. Note this will reset the host
2155 * adapter's state back to the pre-initialized state, so in
2156 * order to make the device usable again, we must drive it
2157 * through virtio_reinit() and virtio_reinit_complete().
2161 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2163 vtnet_free_rx_mbufs(sc);
2164 vtnet_free_tx_mbufs(sc);
2168 vtnet_reinit(struct vtnet_softc *sc)
2173 ifp = sc->vtnet_ifp;
2174 features = sc->vtnet_features;
2177 * Re-negotiate with the host, removing any disabled receive
2178 * features. Transmit features are disabled only on our side
2179 * via if_capenable and if_hwassist.
2182 if (ifp->if_capabilities & IFCAP_RXCSUM) {
2183 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2184 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2187 if (ifp->if_capabilities & IFCAP_LRO) {
2188 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2189 features &= ~VTNET_LRO_FEATURES;
2192 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2193 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2194 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2197 return (virtio_reinit(sc->vtnet_dev, features));
2201 vtnet_init_locked(struct vtnet_softc *sc)
2207 dev = sc->vtnet_dev;
2208 ifp = sc->vtnet_ifp;
2210 VTNET_LOCK_ASSERT(sc);
2212 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2215 /* Stop host's adapter, cancel any pending I/O. */
2218 /* Reinitialize the host device. */
2219 error = vtnet_reinit(sc);
2222 "reinitialization failed, stopping device...\n");
2227 /* Update host with assigned MAC address. */
2228 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2229 vtnet_set_hwaddr(sc);
2231 ifp->if_hwassist = 0;
2232 if (ifp->if_capenable & IFCAP_TXCSUM)
2233 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2234 if (ifp->if_capenable & IFCAP_TSO4)
2235 ifp->if_hwassist |= CSUM_TSO;
2237 error = vtnet_init_rx_vq(sc);
2240 "cannot allocate mbufs for Rx virtqueue\n");
2245 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2246 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2247 /* Restore promiscuous and all-multicast modes. */
2248 vtnet_rx_filter(sc);
2250 /* Restore filtered MAC addresses. */
2251 vtnet_rx_filter_mac(sc);
2254 /* Restore VLAN filters. */
2255 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2256 vtnet_rx_filter_vlan(sc);
2259 #ifdef DEVICE_POLLING
2260 if (ifp->if_capenable & IFCAP_POLLING) {
2261 vtnet_disable_rx_intr(sc);
2262 vtnet_disable_tx_intr(sc);
2266 vtnet_enable_rx_intr(sc);
2267 vtnet_enable_tx_intr(sc);
2270 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2271 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2273 virtio_reinit_complete(dev);
2275 vtnet_update_link_status(sc);
2276 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2280 vtnet_init(void *xsc)
2282 struct vtnet_softc *sc;
2287 vtnet_init_locked(sc);
2292 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2293 struct sglist *sg, int readable, int writable)
2295 struct virtqueue *vq;
2298 vq = sc->vtnet_ctrl_vq;
2300 VTNET_LOCK_ASSERT(sc);
2301 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2302 ("no control virtqueue"));
2303 KASSERT(virtqueue_empty(vq),
2304 ("control command already enqueued"));
2306 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2309 virtqueue_notify(vq);
2312 * Poll until the command is complete. Previously, we would
2313 * sleep until the control virtqueue interrupt handler woke
2314 * us up, but dropping the VTNET_MTX leads to serialization
2317 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2318 * vectors. Two of those vectors are needed for the Rx and Tx
2319 * virtqueues. We do not support sharing both a Vq and config
2320 * changed notification on the same MSIX vector.
2322 c = virtqueue_poll(vq, NULL);
2323 KASSERT(c == cookie, ("unexpected control command response"));
2327 vtnet_rx_filter(struct vtnet_softc *sc)
2332 dev = sc->vtnet_dev;
2333 ifp = sc->vtnet_ifp;
2335 VTNET_LOCK_ASSERT(sc);
2336 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2337 ("CTRL_RX feature not negotiated"));
2339 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2340 device_printf(dev, "cannot %s promiscuous mode\n",
2341 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
2343 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2344 device_printf(dev, "cannot %s all-multicast mode\n",
2345 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
2349 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2351 struct virtio_net_ctrl_hdr hdr;
2352 struct sglist_seg segs[3];
2357 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
2362 hdr.class = VIRTIO_NET_CTRL_RX;
2365 ack = VIRTIO_NET_ERR;
2367 sglist_init(&sg, 3, segs);
2368 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2369 error |= sglist_append(&sg, &onoff, sizeof(uint8_t));
2370 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2371 KASSERT(error == 0 && sg.sg_nseg == 3,
2372 ("error adding Rx filter message to sglist"));
2374 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2376 return (ack == VIRTIO_NET_OK ? 0 : EIO);
2380 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2383 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2387 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2390 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2394 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2396 struct virtio_net_ctrl_hdr hdr;
2397 struct vtnet_mac_filter *filter;
2398 struct sglist_seg segs[4];
2402 struct ifmultiaddr *ifma;
2403 int ucnt, mcnt, promisc, allmulti, error;
2406 ifp = sc->vtnet_ifp;
2407 filter = sc->vtnet_mac_filter;
2414 VTNET_LOCK_ASSERT(sc);
2415 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2416 ("CTRL_RX feature not negotiated"));
2418 /* Unicast MAC addresses: */
2420 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2421 if (ifa->ifa_addr->sa_family != AF_LINK)
2423 else if (ucnt == VTNET_MAX_MAC_ENTRIES)
2426 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2427 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2430 if_addr_runlock(ifp);
2432 if (ucnt >= VTNET_MAX_MAC_ENTRIES) {
2434 filter->vmf_unicast.nentries = 0;
2436 if_printf(ifp, "more than %d MAC addresses assigned, "
2437 "falling back to promiscuous mode\n",
2438 VTNET_MAX_MAC_ENTRIES);
2440 filter->vmf_unicast.nentries = ucnt;
2442 /* Multicast MAC addresses: */
2443 if_maddr_rlock(ifp);
2444 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2445 if (ifma->ifma_addr->sa_family != AF_LINK)
2447 else if (mcnt == VTNET_MAX_MAC_ENTRIES)
2450 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2451 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2454 if_maddr_runlock(ifp);
2456 if (mcnt >= VTNET_MAX_MAC_ENTRIES) {
2458 filter->vmf_multicast.nentries = 0;
2460 if_printf(ifp, "more than %d multicast MAC addresses "
2461 "assigned, falling back to all-multicast mode\n",
2462 VTNET_MAX_MAC_ENTRIES);
2464 filter->vmf_multicast.nentries = mcnt;
2466 if (promisc && allmulti)
2469 hdr.class = VIRTIO_NET_CTRL_MAC;
2470 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2471 ack = VIRTIO_NET_ERR;
2473 sglist_init(&sg, 4, segs);
2474 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2475 error |= sglist_append(&sg, &filter->vmf_unicast,
2476 sizeof(struct vtnet_mac_table));
2477 error |= sglist_append(&sg, &filter->vmf_multicast,
2478 sizeof(struct vtnet_mac_table));
2479 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2480 KASSERT(error == 0 && sg.sg_nseg == 4,
2481 ("error adding MAC filtering message to sglist"));
2483 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2485 if (ack != VIRTIO_NET_OK)
2486 if_printf(ifp, "error setting host MAC filter table\n");
2490 if (vtnet_set_promisc(sc, 1) != 0)
2491 if_printf(ifp, "cannot enable promiscuous mode\n");
2493 if (vtnet_set_allmulti(sc, 1) != 0)
2494 if_printf(ifp, "cannot enable all-multicast mode\n");
2498 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2500 struct virtio_net_ctrl_hdr hdr;
2501 struct sglist_seg segs[3];
2506 hdr.class = VIRTIO_NET_CTRL_VLAN;
2507 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2508 ack = VIRTIO_NET_ERR;
2511 sglist_init(&sg, 3, segs);
2512 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2513 error |= sglist_append(&sg, &tag, sizeof(uint16_t));
2514 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2515 KASSERT(error == 0 && sg.sg_nseg == 3,
2516 ("error adding VLAN control message to sglist"));
2518 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2520 return (ack == VIRTIO_NET_OK ? 0 : EIO);
2524 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2529 int i, nvlans, error;
2531 VTNET_LOCK_ASSERT(sc);
2532 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2533 ("VLAN_FILTER feature not negotiated"));
2535 dev = sc->vtnet_dev;
2536 nvlans = sc->vtnet_nvlans;
2539 /* Enable filtering for each configured VLAN. */
2540 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2541 w = sc->vtnet_vlan_shadow[i];
2542 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) {
2543 if ((w & mask) != 0) {
2546 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0)
2552 KASSERT(nvlans == 0, ("VLAN count incorrect"));
2554 device_printf(dev, "cannot restore VLAN filter table\n");
2558 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2563 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2564 ("VLAN_FILTER feature not negotiated"));
2566 if ((tag == 0) || (tag > 4095))
2569 ifp = sc->vtnet_ifp;
2570 idx = (tag >> 5) & 0x7F;
2575 /* Update shadow VLAN table. */
2578 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2581 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2584 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2585 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2586 device_printf(sc->vtnet_dev,
2587 "cannot %s VLAN %d %s the host filter table\n",
2588 add ? "add" : "remove", tag,
2589 add ? "to" : "from");
2597 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2600 if (ifp->if_softc != arg)
2603 vtnet_set_vlan_filter(arg, 1, tag);
2607 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2610 if (ifp->if_softc != arg)
2613 vtnet_set_vlan_filter(arg, 0, tag);
2617 vtnet_ifmedia_upd(struct ifnet *ifp)
2619 struct vtnet_softc *sc;
2620 struct ifmedia *ifm;
2623 ifm = &sc->vtnet_media;
2625 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2632 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2634 struct vtnet_softc *sc;
2638 ifmr->ifm_status = IFM_AVALID;
2639 ifmr->ifm_active = IFM_ETHER;
2642 if (vtnet_is_link_up(sc) != 0) {
2643 ifmr->ifm_status |= IFM_ACTIVE;
2644 ifmr->ifm_active |= VTNET_MEDIATYPE;
2646 ifmr->ifm_active |= IFM_NONE;
2651 vtnet_add_statistics(struct vtnet_softc *sc)
2654 struct vtnet_statistics *stats;
2655 struct sysctl_ctx_list *ctx;
2656 struct sysctl_oid *tree;
2657 struct sysctl_oid_list *child;
2659 dev = sc->vtnet_dev;
2660 stats = &sc->vtnet_stats;
2661 ctx = device_get_sysctl_ctx(dev);
2662 tree = device_get_sysctl_tree(dev);
2663 child = SYSCTL_CHILDREN(tree);
2665 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2666 CTLFLAG_RD, &stats->mbuf_alloc_failed,
2667 "Mbuf cluster allocation failures");
2669 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large",
2670 CTLFLAG_RD, &stats->rx_frame_too_large,
2671 "Received frame larger than the mbuf chain");
2672 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2673 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
2674 "Enqueuing the replacement receive mbuf failed");
2675 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed",
2676 CTLFLAG_RD, &stats->rx_mergeable_failed,
2677 "Mergeable buffers receive failures");
2678 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2679 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
2680 "Received checksum offloaded buffer with unsupported "
2682 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start",
2683 CTLFLAG_RD, &stats->rx_csum_bad_start,
2684 "Received checksum offloaded buffer with incorrect start offset");
2685 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2686 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
2687 "Received checksum offloaded buffer with incorrect IP protocol");
2688 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2689 CTLFLAG_RD, &stats->rx_csum_bad_offset,
2690 "Received checksum offloaded buffer with incorrect offset");
2691 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed",
2692 CTLFLAG_RD, &stats->rx_csum_failed,
2693 "Received buffer checksum offload failed");
2694 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded",
2695 CTLFLAG_RD, &stats->rx_csum_offloaded,
2696 "Received buffer checksum offload succeeded");
2697 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled",
2698 CTLFLAG_RD, &stats->rx_task_rescheduled,
2699 "Times the receive interrupt task rescheduled itself");
2701 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded",
2702 CTLFLAG_RD, &stats->tx_csum_offloaded,
2703 "Offloaded checksum of transmitted buffer");
2704 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded",
2705 CTLFLAG_RD, &stats->tx_tso_offloaded,
2706 "Segmentation offload of transmitted buffer");
2707 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2708 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
2709 "Aborted transmit of checksum offloaded buffer with unknown "
2711 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2712 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
2713 "Aborted transmit of TSO buffer with unknown Ethernet type");
2714 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled",
2715 CTLFLAG_RD, &stats->tx_task_rescheduled,
2716 "Times the transmit interrupt task rescheduled itself");
2720 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2723 return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2727 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2730 virtqueue_disable_intr(sc->vtnet_rx_vq);
2734 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2737 #ifdef VTNET_TX_INTR_MODERATION
2740 return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2745 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2748 virtqueue_disable_intr(sc->vtnet_tx_vq);