2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO network devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/sockio.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <sys/random.h>
45 #include <sys/sglist.h>
47 #include <sys/mutex.h>
48 #include <sys/taskqueue.h>
50 #include <machine/smp.h>
54 #include <net/debugnet.h>
55 #include <net/ethernet.h>
58 #include <net/if_var.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_types.h>
62 #include <net/if_media.h>
63 #include <net/if_vlan_var.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #include <netinet/udp.h>
73 #include <netinet/tcp.h>
75 #include <machine/bus.h>
76 #include <machine/resource.h>
80 #include <dev/virtio/virtio.h>
81 #include <dev/virtio/virtqueue.h>
82 #include <dev/virtio/network/virtio_net.h>
83 #include <dev/virtio/network/if_vtnetvar.h>
84 #include "virtio_if.h"
87 #include "opt_inet6.h"
89 static int vtnet_modevent(module_t, int, void *);
91 static int vtnet_probe(device_t);
92 static int vtnet_attach(device_t);
93 static int vtnet_detach(device_t);
94 static int vtnet_suspend(device_t);
95 static int vtnet_resume(device_t);
96 static int vtnet_shutdown(device_t);
97 static int vtnet_attach_completed(device_t);
98 static int vtnet_config_change(device_t);
100 static void vtnet_negotiate_features(struct vtnet_softc *);
101 static void vtnet_setup_features(struct vtnet_softc *);
102 static int vtnet_init_rxq(struct vtnet_softc *, int);
103 static int vtnet_init_txq(struct vtnet_softc *, int);
104 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
105 static void vtnet_free_rxtx_queues(struct vtnet_softc *);
106 static int vtnet_alloc_rx_filters(struct vtnet_softc *);
107 static void vtnet_free_rx_filters(struct vtnet_softc *);
108 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
109 static int vtnet_setup_interface(struct vtnet_softc *);
110 static int vtnet_change_mtu(struct vtnet_softc *, int);
111 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
112 static uint64_t vtnet_get_counter(struct ifnet *, ift_counter);
114 static int vtnet_rxq_populate(struct vtnet_rxq *);
115 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
117 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
118 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
120 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
121 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
122 static int vtnet_rxq_new_buf(struct vtnet_rxq *);
123 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
124 struct virtio_net_hdr *);
125 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
126 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
127 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
128 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
129 struct virtio_net_hdr *);
130 static int vtnet_rxq_eof(struct vtnet_rxq *);
131 static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
132 static void vtnet_rx_vq_intr(void *);
133 static void vtnet_rxq_tq_intr(void *, int);
135 static int vtnet_txq_below_threshold(struct vtnet_txq *);
136 static int vtnet_txq_notify(struct vtnet_txq *);
137 static void vtnet_txq_free_mbufs(struct vtnet_txq *);
138 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
139 int *, int *, int *);
140 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
141 int, struct virtio_net_hdr *);
143 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
144 struct virtio_net_hdr *);
145 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
146 struct vtnet_tx_header *);
147 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
148 #ifdef VTNET_LEGACY_TX
149 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
150 static void vtnet_start(struct ifnet *);
152 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
153 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
154 static void vtnet_txq_tq_deferred(void *, int);
156 static void vtnet_txq_start(struct vtnet_txq *);
157 static void vtnet_txq_tq_intr(void *, int);
158 static int vtnet_txq_eof(struct vtnet_txq *);
159 static void vtnet_tx_vq_intr(void *);
160 static void vtnet_tx_start_all(struct vtnet_softc *);
162 #ifndef VTNET_LEGACY_TX
163 static void vtnet_qflush(struct ifnet *);
166 static int vtnet_watchdog(struct vtnet_txq *);
167 static void vtnet_accum_stats(struct vtnet_softc *,
168 struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
169 static void vtnet_tick(void *);
171 static void vtnet_start_taskqueues(struct vtnet_softc *);
172 static void vtnet_free_taskqueues(struct vtnet_softc *);
173 static void vtnet_drain_taskqueues(struct vtnet_softc *);
175 static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
176 static void vtnet_stop_rendezvous(struct vtnet_softc *);
177 static void vtnet_stop(struct vtnet_softc *);
178 static int vtnet_virtio_reinit(struct vtnet_softc *);
179 static void vtnet_init_rx_filters(struct vtnet_softc *);
180 static int vtnet_init_rx_queues(struct vtnet_softc *);
181 static int vtnet_init_tx_queues(struct vtnet_softc *);
182 static int vtnet_init_rxtx_queues(struct vtnet_softc *);
183 static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
184 static int vtnet_reinit(struct vtnet_softc *);
185 static void vtnet_init_locked(struct vtnet_softc *, int);
186 static void vtnet_init(void *);
188 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
189 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
190 struct sglist *, int, int);
191 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
192 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
193 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
194 static int vtnet_set_promisc(struct vtnet_softc *, int);
195 static int vtnet_set_allmulti(struct vtnet_softc *, int);
196 static void vtnet_attach_disable_promisc(struct vtnet_softc *);
197 static void vtnet_rx_filter(struct vtnet_softc *);
198 static void vtnet_rx_filter_mac(struct vtnet_softc *);
199 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
200 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
201 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
202 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
203 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
205 static int vtnet_is_link_up(struct vtnet_softc *);
206 static void vtnet_update_link_status(struct vtnet_softc *);
207 static int vtnet_ifmedia_upd(struct ifnet *);
208 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
209 static void vtnet_get_hwaddr(struct vtnet_softc *);
210 static void vtnet_set_hwaddr(struct vtnet_softc *);
211 static void vtnet_vlan_tag_remove(struct mbuf *);
212 static void vtnet_set_rx_process_limit(struct vtnet_softc *);
213 static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
215 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
216 struct sysctl_oid_list *, struct vtnet_rxq *);
217 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
218 struct sysctl_oid_list *, struct vtnet_txq *);
219 static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
220 static void vtnet_setup_sysctl(struct vtnet_softc *);
222 static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
223 static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
224 static int vtnet_txq_enable_intr(struct vtnet_txq *);
225 static void vtnet_txq_disable_intr(struct vtnet_txq *);
226 static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
227 static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
228 static void vtnet_enable_interrupts(struct vtnet_softc *);
229 static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
230 static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
231 static void vtnet_disable_interrupts(struct vtnet_softc *);
233 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
235 DEBUGNET_DEFINE(vtnet);
238 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
239 "VNET driver parameters");
240 static int vtnet_csum_disable = 0;
241 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
242 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
243 &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
244 static int vtnet_tso_disable = 0;
245 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
246 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
247 0, "Disables TCP Segmentation Offload");
248 static int vtnet_lro_disable = 0;
249 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
250 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
251 0, "Disables TCP Large Receive Offload");
252 static int vtnet_mq_disable = 0;
253 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
254 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
255 0, "Disables Multi Queue support");
256 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
257 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
258 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
259 &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
260 static int vtnet_rx_process_limit = 512;
261 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
262 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
263 &vtnet_rx_process_limit, 0,
264 "Limits the number RX segments processed in a single pass");
266 static uma_zone_t vtnet_tx_header_zone;
268 static struct virtio_feature_desc vtnet_feature_desc[] = {
269 { VIRTIO_NET_F_CSUM, "TxChecksum" },
270 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
271 { VIRTIO_NET_F_MAC, "MacAddress" },
272 { VIRTIO_NET_F_GSO, "TxAllGSO" },
273 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
274 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
275 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
276 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
277 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
278 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
279 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
280 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
281 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
282 { VIRTIO_NET_F_STATUS, "Status" },
283 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
284 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
285 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
286 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
287 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
288 { VIRTIO_NET_F_MQ, "Multiqueue" },
289 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
293 static device_method_t vtnet_methods[] = {
294 /* Device methods. */
295 DEVMETHOD(device_probe, vtnet_probe),
296 DEVMETHOD(device_attach, vtnet_attach),
297 DEVMETHOD(device_detach, vtnet_detach),
298 DEVMETHOD(device_suspend, vtnet_suspend),
299 DEVMETHOD(device_resume, vtnet_resume),
300 DEVMETHOD(device_shutdown, vtnet_shutdown),
302 /* VirtIO methods. */
303 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
304 DEVMETHOD(virtio_config_change, vtnet_config_change),
310 #include <dev/netmap/if_vtnet_netmap.h>
311 #endif /* DEV_NETMAP */
313 static driver_t vtnet_driver = {
316 sizeof(struct vtnet_softc)
318 static devclass_t vtnet_devclass;
320 DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
322 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
324 MODULE_VERSION(vtnet, 1);
325 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
327 MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
328 #endif /* DEV_NETMAP */
330 VIRTIO_SIMPLE_PNPTABLE(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
331 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, vtnet);
332 VIRTIO_SIMPLE_PNPINFO(virtio_pci, vtnet);
335 vtnet_modevent(module_t mod, int type, void *unused)
338 static int loaded = 0;
343 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
344 sizeof(struct vtnet_tx_header),
345 NULL, NULL, NULL, NULL, 0, 0);
348 * We need to allocate from this zone in the transmit path, so ensure
349 * that we have at least one item per header available.
350 * XXX add a separate zone like we do for mbufs? otherwise we may alloc
353 uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
354 uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
359 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
364 uma_zdestroy(vtnet_tx_header_zone);
365 vtnet_tx_header_zone = NULL;
379 vtnet_probe(device_t dev)
381 return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
385 vtnet_attach(device_t dev)
387 struct vtnet_softc *sc;
390 sc = device_get_softc(dev);
393 /* Register our feature descriptions. */
394 virtio_set_feature_desc(dev, vtnet_feature_desc);
396 VTNET_CORE_LOCK_INIT(sc);
397 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
399 vtnet_setup_sysctl(sc);
400 vtnet_setup_features(sc);
402 error = vtnet_alloc_rx_filters(sc);
404 device_printf(dev, "cannot allocate Rx filters\n");
408 error = vtnet_alloc_rxtx_queues(sc);
410 device_printf(dev, "cannot allocate queues\n");
414 error = vtnet_alloc_virtqueues(sc);
416 device_printf(dev, "cannot allocate virtqueues\n");
420 error = vtnet_setup_interface(sc);
422 device_printf(dev, "cannot setup interface\n");
426 error = virtio_setup_intr(dev, INTR_TYPE_NET);
428 device_printf(dev, "cannot setup virtqueue interrupts\n");
429 /* BMV: This will crash if during boot! */
430 ether_ifdetach(sc->vtnet_ifp);
435 vtnet_netmap_attach(sc);
436 #endif /* DEV_NETMAP */
438 vtnet_start_taskqueues(sc);
448 vtnet_detach(device_t dev)
450 struct vtnet_softc *sc;
453 sc = device_get_softc(dev);
456 if (device_is_attached(dev)) {
459 VTNET_CORE_UNLOCK(sc);
461 callout_drain(&sc->vtnet_tick_ch);
462 vtnet_drain_taskqueues(sc);
469 #endif /* DEV_NETMAP */
471 vtnet_free_taskqueues(sc);
473 if (sc->vtnet_vlan_attach != NULL) {
474 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
475 sc->vtnet_vlan_attach = NULL;
477 if (sc->vtnet_vlan_detach != NULL) {
478 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
479 sc->vtnet_vlan_detach = NULL;
482 ifmedia_removeall(&sc->vtnet_media);
486 sc->vtnet_ifp = NULL;
489 vtnet_free_rxtx_queues(sc);
490 vtnet_free_rx_filters(sc);
492 if (sc->vtnet_ctrl_vq != NULL)
493 vtnet_free_ctrl_vq(sc);
495 VTNET_CORE_LOCK_DESTROY(sc);
501 vtnet_suspend(device_t dev)
503 struct vtnet_softc *sc;
505 sc = device_get_softc(dev);
509 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
510 VTNET_CORE_UNLOCK(sc);
516 vtnet_resume(device_t dev)
518 struct vtnet_softc *sc;
521 sc = device_get_softc(dev);
525 if (ifp->if_flags & IFF_UP)
526 vtnet_init_locked(sc, 0);
527 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
528 VTNET_CORE_UNLOCK(sc);
534 vtnet_shutdown(device_t dev)
538 * Suspend already does all of what we need to
539 * do here; we just never expect to be resumed.
541 return (vtnet_suspend(dev));
545 vtnet_attach_completed(device_t dev)
548 vtnet_attach_disable_promisc(device_get_softc(dev));
554 vtnet_config_change(device_t dev)
556 struct vtnet_softc *sc;
558 sc = device_get_softc(dev);
561 vtnet_update_link_status(sc);
562 if (sc->vtnet_link_active != 0)
563 vtnet_tx_start_all(sc);
564 VTNET_CORE_UNLOCK(sc);
570 vtnet_negotiate_features(struct vtnet_softc *sc)
573 uint64_t mask, features;
579 * TSO and LRO are only available when their corresponding checksum
580 * offload feature is also negotiated.
582 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
583 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
584 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
586 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
587 mask |= VTNET_TSO_FEATURES;
588 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
589 mask |= VTNET_LRO_FEATURES;
590 #ifndef VTNET_LEGACY_TX
591 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
592 mask |= VIRTIO_NET_F_MQ;
594 mask |= VIRTIO_NET_F_MQ;
597 features = VTNET_FEATURES & ~mask;
598 sc->vtnet_features = virtio_negotiate_features(dev, features);
600 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
601 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
603 * LRO without mergeable buffers requires special care. This
604 * is not ideal because every receive buffer must be large
605 * enough to hold the maximum TCP packet, the Ethernet header,
606 * and the header. This requires up to 34 descriptors with
607 * MCLBYTES clusters. If we do not have indirect descriptors,
608 * LRO is disabled since the virtqueue will not contain very
609 * many receive buffers.
611 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
613 "LRO disabled due to both mergeable buffers and "
614 "indirect descriptors not negotiated\n");
616 features &= ~VTNET_LRO_FEATURES;
618 virtio_negotiate_features(dev, features);
620 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
625 vtnet_setup_features(struct vtnet_softc *sc)
631 vtnet_negotiate_features(sc);
633 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
634 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
635 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
636 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
638 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
639 /* This feature should always be negotiated. */
640 sc->vtnet_flags |= VTNET_FLAG_MAC;
643 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF))
644 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
646 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) ||
647 virtio_with_feature(dev, VIRTIO_F_VERSION_1))
648 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
650 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
652 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
653 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
654 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
655 sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
657 sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
659 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
660 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
661 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
662 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
664 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
666 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
667 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
669 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
670 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
671 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
672 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
673 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
674 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
677 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
678 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
679 sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
680 offsetof(struct virtio_net_config, max_virtqueue_pairs));
682 sc->vtnet_max_vq_pairs = 1;
684 if (sc->vtnet_max_vq_pairs > 1) {
686 * Limit the maximum number of queue pairs to the lower of
687 * the number of CPUs and the configured maximum.
688 * The actual number of queues that get used may be less.
692 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
693 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
696 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
697 max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
699 sc->vtnet_requested_vq_pairs = max;
700 sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
707 vtnet_init_rxq(struct vtnet_softc *sc, int id)
709 struct vtnet_rxq *rxq;
711 rxq = &sc->vtnet_rxqs[id];
713 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
714 device_get_nameunit(sc->vtnet_dev), id);
715 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
720 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
721 if (rxq->vtnrx_sg == NULL)
724 NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
725 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
726 taskqueue_thread_enqueue, &rxq->vtnrx_tq);
728 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
732 vtnet_init_txq(struct vtnet_softc *sc, int id)
734 struct vtnet_txq *txq;
736 txq = &sc->vtnet_txqs[id];
738 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
739 device_get_nameunit(sc->vtnet_dev), id);
740 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
745 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
746 if (txq->vtntx_sg == NULL)
749 #ifndef VTNET_LEGACY_TX
750 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
751 M_NOWAIT, &txq->vtntx_mtx);
752 if (txq->vtntx_br == NULL)
755 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
757 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
758 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
759 taskqueue_thread_enqueue, &txq->vtntx_tq);
760 if (txq->vtntx_tq == NULL)
767 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
769 int i, npairs, error;
771 npairs = sc->vtnet_max_vq_pairs;
773 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
775 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
777 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
780 for (i = 0; i < npairs; i++) {
781 error = vtnet_init_rxq(sc, i);
784 error = vtnet_init_txq(sc, i);
789 vtnet_setup_queue_sysctl(sc);
795 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
798 rxq->vtnrx_sc = NULL;
801 if (rxq->vtnrx_sg != NULL) {
802 sglist_free(rxq->vtnrx_sg);
803 rxq->vtnrx_sg = NULL;
806 if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
807 mtx_destroy(&rxq->vtnrx_mtx);
811 vtnet_destroy_txq(struct vtnet_txq *txq)
814 txq->vtntx_sc = NULL;
817 if (txq->vtntx_sg != NULL) {
818 sglist_free(txq->vtntx_sg);
819 txq->vtntx_sg = NULL;
822 #ifndef VTNET_LEGACY_TX
823 if (txq->vtntx_br != NULL) {
824 buf_ring_free(txq->vtntx_br, M_DEVBUF);
825 txq->vtntx_br = NULL;
829 if (mtx_initialized(&txq->vtntx_mtx) != 0)
830 mtx_destroy(&txq->vtntx_mtx);
834 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
838 if (sc->vtnet_rxqs != NULL) {
839 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
840 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
841 free(sc->vtnet_rxqs, M_DEVBUF);
842 sc->vtnet_rxqs = NULL;
845 if (sc->vtnet_txqs != NULL) {
846 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
847 vtnet_destroy_txq(&sc->vtnet_txqs[i]);
848 free(sc->vtnet_txqs, M_DEVBUF);
849 sc->vtnet_txqs = NULL;
854 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
857 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
858 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
859 M_DEVBUF, M_NOWAIT | M_ZERO);
860 if (sc->vtnet_mac_filter == NULL)
864 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
865 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
866 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
867 if (sc->vtnet_vlan_filter == NULL)
875 vtnet_free_rx_filters(struct vtnet_softc *sc)
878 if (sc->vtnet_mac_filter != NULL) {
879 free(sc->vtnet_mac_filter, M_DEVBUF);
880 sc->vtnet_mac_filter = NULL;
883 if (sc->vtnet_vlan_filter != NULL) {
884 free(sc->vtnet_vlan_filter, M_DEVBUF);
885 sc->vtnet_vlan_filter = NULL;
890 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
893 struct vq_alloc_info *info;
894 struct vtnet_rxq *rxq;
895 struct vtnet_txq *txq;
896 int i, idx, flags, nvqs, error;
901 nvqs = sc->vtnet_max_vq_pairs * 2;
902 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
905 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
909 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
910 rxq = &sc->vtnet_rxqs[i];
911 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
912 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
913 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
915 txq = &sc->vtnet_txqs[i];
916 VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
917 vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
918 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
921 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
922 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
923 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
927 * Enable interrupt binding if this is multiqueue. This only matters
928 * when per-vq MSIX is available.
930 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
933 error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
940 vtnet_setup_interface(struct vtnet_softc *sc)
943 struct pfil_head_args pa;
948 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
950 device_printf(dev, "cannot allocate ifnet structure\n");
954 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
955 ifp->if_baudrate = IF_Gbps(10); /* Approx. */
957 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
959 ifp->if_init = vtnet_init;
960 ifp->if_ioctl = vtnet_ioctl;
961 ifp->if_get_counter = vtnet_get_counter;
962 #ifndef VTNET_LEGACY_TX
963 ifp->if_transmit = vtnet_txq_mq_start;
964 ifp->if_qflush = vtnet_qflush;
966 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
967 ifp->if_start = vtnet_start;
968 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
969 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
970 IFQ_SET_READY(&ifp->if_snd);
973 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
975 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
976 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
978 /* Read (or generate) the MAC address for the adapter. */
979 vtnet_get_hwaddr(sc);
981 ether_ifattach(ifp, sc->vtnet_hwaddr);
983 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
984 ifp->if_capabilities |= IFCAP_LINKSTATE;
986 /* Tell the upper layer(s) we support long frames. */
987 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
988 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
990 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
991 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
993 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
994 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
995 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
997 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
998 ifp->if_capabilities |= IFCAP_TSO4;
999 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1000 ifp->if_capabilities |= IFCAP_TSO6;
1001 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
1002 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
1005 if (ifp->if_capabilities & IFCAP_TSO)
1006 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1009 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1010 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
1012 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
1013 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
1014 ifp->if_capabilities |= IFCAP_LRO;
1017 if (ifp->if_capabilities & IFCAP_HWCSUM) {
1019 * VirtIO does not support VLAN tagging, but we can fake
1020 * it by inserting and removing the 802.1Q header during
1021 * transmit and receive. We are then able to do checksum
1022 * offloading of VLAN frames.
1024 ifp->if_capabilities |=
1025 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1028 ifp->if_capenable = ifp->if_capabilities;
1031 * Capabilities after here are not enabled by default.
1034 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1035 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1037 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1038 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1039 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1040 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1043 vtnet_set_rx_process_limit(sc);
1044 vtnet_set_tx_intr_threshold(sc);
1046 DEBUGNET_SET(ifp, vtnet);
1048 pa.pa_version = PFIL_VERSION;
1049 pa.pa_flags = PFIL_IN;
1050 pa.pa_type = PFIL_TYPE_ETHERNET;
1051 pa.pa_headname = ifp->if_xname;
1052 sc->vtnet_pfil = pfil_head_register(&pa);
1058 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1061 int frame_size, clsize;
1063 ifp = sc->vtnet_ifp;
1065 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1068 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1072 * Based on the new MTU (and hence frame size) determine which
1073 * cluster size is most appropriate for the receive queues.
1075 if (frame_size <= MCLBYTES) {
1077 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1078 /* Avoid going past 9K jumbos. */
1079 if (frame_size > MJUM9BYTES)
1081 clsize = MJUM9BYTES;
1083 clsize = MJUMPAGESIZE;
1085 ifp->if_mtu = new_mtu;
1086 sc->vtnet_rx_new_clsize = clsize;
1088 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1089 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1090 vtnet_init_locked(sc, 0);
1097 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1099 struct vtnet_softc *sc;
1101 int reinit, mask, error;
1104 ifr = (struct ifreq *) data;
1109 if (ifp->if_mtu != ifr->ifr_mtu) {
1110 VTNET_CORE_LOCK(sc);
1111 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1112 VTNET_CORE_UNLOCK(sc);
1117 VTNET_CORE_LOCK(sc);
1118 if ((ifp->if_flags & IFF_UP) == 0) {
1119 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1122 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1123 (IFF_PROMISC | IFF_ALLMULTI)) {
1124 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1125 vtnet_rx_filter(sc);
1127 ifp->if_flags |= IFF_PROMISC;
1128 if ((ifp->if_flags ^ sc->vtnet_if_flags)
1134 vtnet_init_locked(sc, 0);
1137 sc->vtnet_if_flags = ifp->if_flags;
1138 VTNET_CORE_UNLOCK(sc);
1143 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1145 VTNET_CORE_LOCK(sc);
1146 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1147 vtnet_rx_filter_mac(sc);
1148 VTNET_CORE_UNLOCK(sc);
1153 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1157 VTNET_CORE_LOCK(sc);
1158 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1160 if (mask & IFCAP_TXCSUM)
1161 ifp->if_capenable ^= IFCAP_TXCSUM;
1162 if (mask & IFCAP_TXCSUM_IPV6)
1163 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1164 if (mask & IFCAP_TSO4)
1165 ifp->if_capenable ^= IFCAP_TSO4;
1166 if (mask & IFCAP_TSO6)
1167 ifp->if_capenable ^= IFCAP_TSO6;
1169 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1170 IFCAP_VLAN_HWFILTER)) {
1171 /* These Rx features require us to renegotiate. */
1174 if (mask & IFCAP_RXCSUM)
1175 ifp->if_capenable ^= IFCAP_RXCSUM;
1176 if (mask & IFCAP_RXCSUM_IPV6)
1177 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1178 if (mask & IFCAP_LRO)
1179 ifp->if_capenable ^= IFCAP_LRO;
1180 if (mask & IFCAP_VLAN_HWFILTER)
1181 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1185 if (mask & IFCAP_VLAN_HWTSO)
1186 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1187 if (mask & IFCAP_VLAN_HWTAGGING)
1188 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1190 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1191 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1192 vtnet_init_locked(sc, 0);
1195 VTNET_CORE_UNLOCK(sc);
1196 VLAN_CAPABILITIES(ifp);
1201 error = ether_ioctl(ifp, cmd, data);
1205 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1211 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1213 struct virtqueue *vq;
1217 error = vtnet_netmap_rxq_populate(rxq);
1220 #endif /* DEV_NETMAP */
1225 for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1226 error = vtnet_rxq_new_buf(rxq);
1232 virtqueue_notify(vq);
1234 * EMSGSIZE signifies the virtqueue did not have enough
1235 * entries available to hold the last mbuf. This is not
1238 if (error == EMSGSIZE)
1246 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1248 struct virtqueue *vq;
1252 struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp),
1253 rxq->vtnrx_id, NR_RX);
1254 #else /* !DEV_NETMAP */
1256 #endif /* !DEV_NETMAP */
1261 while ((m = virtqueue_drain(vq, &last)) != NULL) {
1266 KASSERT(virtqueue_empty(vq),
1267 ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1270 static struct mbuf *
1271 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1273 struct mbuf *m_head, *m_tail, *m;
1276 clsize = sc->vtnet_rx_clsize;
1278 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1279 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1281 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1285 m_head->m_len = clsize;
1288 /* Allocate the rest of the chain. */
1289 for (i = 1; i < nbufs; i++) {
1290 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1299 if (m_tailp != NULL)
1305 sc->vtnet_stats.mbuf_alloc_failed++;
1312 * Slow path for when LRO without mergeable buffers is negotiated.
1315 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1318 struct vtnet_softc *sc;
1319 struct mbuf *m, *m_prev;
1320 struct mbuf *m_new, *m_tail;
1321 int len, clsize, nreplace, error;
1324 clsize = sc->vtnet_rx_clsize;
1334 * Since these mbuf chains are so large, we avoid allocating an
1335 * entire replacement chain if possible. When the received frame
1336 * did not consume the entire chain, the unused mbufs are moved
1337 * to the replacement chain.
1341 * Something is seriously wrong if we received a frame
1342 * larger than the chain. Drop it.
1345 sc->vtnet_stats.rx_frame_too_large++;
1349 /* We always allocate the same cluster size. */
1350 KASSERT(m->m_len == clsize,
1351 ("%s: mbuf size %d is not the cluster size %d",
1352 __func__, m->m_len, clsize));
1354 m->m_len = MIN(m->m_len, len);
1362 KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1363 ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1364 sc->vtnet_rx_nmbufs));
1366 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1367 if (m_new == NULL) {
1368 m_prev->m_len = clsize;
1373 * Move any unused mbufs from the received chain onto the end
1376 if (m_prev->m_next != NULL) {
1377 m_tail->m_next = m_prev->m_next;
1378 m_prev->m_next = NULL;
1381 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1384 * BAD! We could not enqueue the replacement mbuf chain. We
1385 * must restore the m0 chain to the original state if it was
1386 * modified so we can subsequently discard it.
1388 * NOTE: The replacement is suppose to be an identical copy
1389 * to the one just dequeued so this is an unexpected error.
1391 sc->vtnet_stats.rx_enq_replacement_failed++;
1393 if (m_tail->m_next != NULL) {
1394 m_prev->m_next = m_tail->m_next;
1395 m_tail->m_next = NULL;
1398 m_prev->m_len = clsize;
1406 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1408 struct vtnet_softc *sc;
1414 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1415 ("%s: chained mbuf without LRO_NOMRG", __func__));
1417 if (m->m_next == NULL) {
1418 /* Fast-path for the common case of just one mbuf. */
1422 m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1426 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1429 * The new mbuf is suppose to be an identical
1430 * copy of the one just dequeued so this is an
1434 sc->vtnet_stats.rx_enq_replacement_failed++;
1438 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1444 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1446 struct vtnet_softc *sc;
1448 struct vtnet_rx_header *rxhdr;
1454 mdata = mtod(m, uint8_t *);
1456 VTNET_RXQ_LOCK_ASSERT(rxq);
1457 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1458 ("%s: chained mbuf without LRO_NOMRG", __func__));
1459 KASSERT(m->m_len == sc->vtnet_rx_clsize,
1460 ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1461 sc->vtnet_rx_clsize));
1464 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1465 MPASS(sc->vtnet_hdr_size == sizeof(rxhdr->vrh_uhdr.hdr) ||
1466 sc->vtnet_hdr_size == sizeof(rxhdr->vrh_uhdr.mhdr));
1467 rxhdr = (struct vtnet_rx_header *) mdata;
1468 sglist_append(sg, &rxhdr->vrh_uhdr, sc->vtnet_hdr_size);
1469 offset = sizeof(struct vtnet_rx_header);
1473 sglist_append(sg, mdata + offset, m->m_len - offset);
1474 if (m->m_next != NULL) {
1475 error = sglist_append_mbuf(sg, m->m_next);
1479 error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1485 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1487 struct vtnet_softc *sc;
1493 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1497 error = vtnet_rxq_enqueue_buf(rxq, m);
1505 * Use the checksum offset in the VirtIO header to set the
1506 * correct CSUM_* flags.
1509 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1510 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1512 struct vtnet_softc *sc;
1513 #if defined(INET) || defined(INET6)
1514 int offset = hdr->csum_start + hdr->csum_offset;
1519 /* Only do a basic sanity check on the offset. */
1523 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1528 case ETHERTYPE_IPV6:
1529 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1534 sc->vtnet_stats.rx_csum_bad_ethtype++;
1539 * Use the offset to determine the appropriate CSUM_* flags. This is
1540 * a bit dirty, but we can get by with it since the checksum offsets
1541 * happen to be different. We assume the host host does not do IPv4
1542 * header checksum offloading.
1544 switch (hdr->csum_offset) {
1545 case offsetof(struct udphdr, uh_sum):
1546 case offsetof(struct tcphdr, th_sum):
1547 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1548 m->m_pkthdr.csum_data = 0xFFFF;
1551 sc->vtnet_stats.rx_csum_bad_offset++;
1559 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1560 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1562 struct vtnet_softc *sc;
1569 case ETHERTYPE_IP: {
1571 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1573 ip = (struct ip *)(m->m_data + ip_start);
1575 offset = ip_start + (ip->ip_hl << 2);
1580 case ETHERTYPE_IPV6:
1581 if (__predict_false(m->m_len < ip_start +
1582 sizeof(struct ip6_hdr)))
1584 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1585 if (__predict_false(offset < 0))
1590 sc->vtnet_stats.rx_csum_bad_ethtype++;
1596 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1598 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1599 m->m_pkthdr.csum_data = 0xFFFF;
1602 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1604 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1605 m->m_pkthdr.csum_data = 0xFFFF;
1609 * For the remaining protocols, FreeBSD does not support
1610 * checksum offloading, so the checksum will be recomputed.
1613 if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1614 "protocol eth_type=%#x proto=%d csum_start=%d "
1615 "csum_offset=%d\n", __func__, eth_type, proto,
1616 hdr->csum_start, hdr->csum_offset);
1625 * Set the appropriate CSUM_* flags. Unfortunately, the information
1626 * provided is not directly useful to us. The VirtIO header gives the
1627 * offset of the checksum, which is all Linux needs, but this is not
1628 * how FreeBSD does things. We are forced to peek inside the packet
1631 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1632 * could accept the offsets and let the stack figure it out.
1635 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1636 struct virtio_net_hdr *hdr)
1638 struct ether_header *eh;
1639 struct ether_vlan_header *evh;
1643 eh = mtod(m, struct ether_header *);
1644 eth_type = ntohs(eh->ether_type);
1645 if (eth_type == ETHERTYPE_VLAN) {
1646 /* BMV: We should handle nested VLAN tags too. */
1647 evh = mtod(m, struct ether_vlan_header *);
1648 eth_type = ntohs(evh->evl_proto);
1649 offset = sizeof(struct ether_vlan_header);
1651 offset = sizeof(struct ether_header);
1653 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1654 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1656 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1662 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1666 while (--nbufs > 0) {
1667 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1670 vtnet_rxq_discard_buf(rxq, m);
1675 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1680 * Requeue the discarded mbuf. This should always be successful
1681 * since it was just dequeued.
1683 error = vtnet_rxq_enqueue_buf(rxq, m);
1685 ("%s: cannot requeue discarded mbuf %d", __func__, error));
1689 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1691 struct vtnet_softc *sc;
1692 struct virtqueue *vq;
1693 struct mbuf *m, *m_tail;
1700 while (--nbufs > 0) {
1701 m = virtqueue_dequeue(vq, &len);
1703 rxq->vtnrx_stats.vrxs_ierrors++;
1707 if (vtnet_rxq_new_buf(rxq) != 0) {
1708 rxq->vtnrx_stats.vrxs_iqdrops++;
1709 vtnet_rxq_discard_buf(rxq, m);
1711 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1719 m->m_flags &= ~M_PKTHDR;
1721 m_head->m_pkthdr.len += len;
1729 sc->vtnet_stats.rx_mergeable_failed++;
1736 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1737 struct virtio_net_hdr *hdr)
1739 struct vtnet_softc *sc;
1741 struct ether_header *eh;
1744 ifp = sc->vtnet_ifp;
1746 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1747 eh = mtod(m, struct ether_header *);
1748 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1749 vtnet_vlan_tag_remove(m);
1751 * With the 802.1Q header removed, update the
1752 * checksum starting location accordingly.
1754 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1755 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1759 m->m_pkthdr.flowid = rxq->vtnrx_id;
1760 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1763 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1764 * distinction that Linux does. Need to reevaluate if performing
1765 * offloading for the NEEDS_CSUM case is really appropriate.
1767 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1768 VIRTIO_NET_HDR_F_DATA_VALID)) {
1769 if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1770 rxq->vtnrx_stats.vrxs_csum++;
1772 rxq->vtnrx_stats.vrxs_csum_failed++;
1775 rxq->vtnrx_stats.vrxs_ipackets++;
1776 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1778 VTNET_RXQ_UNLOCK(rxq);
1779 (*ifp->if_input)(ifp, m);
1780 VTNET_RXQ_LOCK(rxq);
1784 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1786 struct virtio_net_hdr lhdr, *hdr;
1787 struct vtnet_softc *sc;
1789 struct virtqueue *vq;
1790 struct mbuf *m, *mr;
1791 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1792 int len, deq, nbufs, adjsz, count;
1798 ifp = sc->vtnet_ifp;
1801 count = sc->vtnet_rx_process_limit;
1803 VTNET_RXQ_LOCK_ASSERT(rxq);
1805 while (count-- > 0) {
1806 m = virtqueue_dequeue(vq, &len);
1811 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1812 rxq->vtnrx_stats.vrxs_ierrors++;
1813 vtnet_rxq_discard_buf(rxq, m);
1817 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1819 adjsz = sizeof(struct vtnet_rx_header);
1821 * Account for our pad inserted between the header
1822 * and the actual start of the frame. This includes
1823 * the unused num_buffers when using a legacy device.
1825 len += adjsz - sc->vtnet_hdr_size;
1827 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1828 nbufs = mhdr->num_buffers;
1829 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1833 * If we have enough data in first mbuf, run it through
1834 * pfil as a memory buffer before dequeueing the rest.
1836 if (PFIL_HOOKED_IN(sc->vtnet_pfil) &&
1837 len - adjsz >= ETHER_HDR_LEN + max_protohdr) {
1838 pfil = pfil_run_hooks(sc->vtnet_pfil,
1839 m->m_data + adjsz, ifp,
1840 (len - adjsz) | PFIL_MEMPTR | PFIL_IN, NULL);
1842 case PFIL_REALLOCED:
1843 mr = pfil_mem2mbuf(m->m_data + adjsz);
1844 vtnet_rxq_input(rxq, mr, hdr);
1848 vtnet_rxq_discard_buf(rxq, m);
1850 vtnet_rxq_discard_merged_bufs(rxq,
1854 KASSERT(pfil == PFIL_PASS,
1855 ("Filter returned %d!\n", pfil));
1861 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1862 rxq->vtnrx_stats.vrxs_iqdrops++;
1863 vtnet_rxq_discard_buf(rxq, m);
1865 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1869 m->m_pkthdr.len = len;
1870 m->m_pkthdr.rcvif = ifp;
1871 m->m_pkthdr.csum_flags = 0;
1874 /* Dequeue the rest of chain. */
1875 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1880 * Save copy of header before we strip it. For both mergeable
1881 * and non-mergeable, the header is at the beginning of the
1882 * mbuf data. We no longer need num_buffers, so always use a
1885 * BMV: Is this memcpy() expensive? We know the mbuf data is
1886 * still valid even after the m_adj().
1888 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1891 if (PFIL_HOOKED_IN(sc->vtnet_pfil) && pfil_done == false) {
1892 pfil = pfil_run_hooks(sc->vtnet_pfil, &m, ifp, PFIL_IN,
1899 KASSERT(pfil == PFIL_PASS,
1900 ("Filter returned %d!\n", pfil));
1904 vtnet_rxq_input(rxq, m, hdr);
1906 /* Must recheck after dropping the Rx lock. */
1907 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1912 virtqueue_notify(vq);
1914 return (count > 0 ? 0 : EAGAIN);
1918 vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries)
1920 struct vtnet_softc *sc;
1925 #endif /* DEV_NETMAP */
1928 ifp = sc->vtnet_ifp;
1930 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1932 * Ignore this interrupt. Either this is a spurious interrupt
1933 * or multiqueue without per-VQ MSIX so every queue needs to
1934 * be polled (a brain dead configuration we could try harder
1937 vtnet_rxq_disable_intr(rxq);
1941 VTNET_RXQ_LOCK(rxq);
1945 * We call netmap_rx_irq() under lock to prevent concurrent calls.
1946 * This is not necessary to serialize the access to the RX vq, but
1947 * rather to avoid races that may happen if this interface is
1948 * attached to a VALE switch, which would cause received packets
1949 * to stall in the RX queue (nm_kr_tryget() could find the kring
1950 * busy when called from netmap_bwrap_intr_notify()).
1952 nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more);
1953 if (nmirq != NM_IRQ_PASS) {
1954 VTNET_RXQ_UNLOCK(rxq);
1955 if (nmirq == NM_IRQ_RESCHED) {
1956 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1960 #endif /* DEV_NETMAP */
1963 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1964 VTNET_RXQ_UNLOCK(rxq);
1968 more = vtnet_rxq_eof(rxq);
1969 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1971 vtnet_rxq_disable_intr(rxq);
1973 * This is an occasional condition or race (when !more),
1974 * so retry a few times before scheduling the taskqueue.
1979 rxq->vtnrx_stats.vrxs_rescheduled++;
1980 VTNET_RXQ_UNLOCK(rxq);
1981 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1983 VTNET_RXQ_UNLOCK(rxq);
1987 vtnet_rx_vq_intr(void *xrxq)
1989 struct vtnet_rxq *rxq;
1992 vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES);
1996 vtnet_rxq_tq_intr(void *xrxq, int pending)
1998 struct vtnet_rxq *rxq;
2001 vtnet_rx_vq_process(rxq, 0);
2005 vtnet_txq_below_threshold(struct vtnet_txq *txq)
2007 struct vtnet_softc *sc;
2008 struct virtqueue *vq;
2013 return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
2017 vtnet_txq_notify(struct vtnet_txq *txq)
2019 struct virtqueue *vq;
2023 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2024 virtqueue_notify(vq);
2026 if (vtnet_txq_enable_intr(txq) == 0)
2030 * Drain frames that were completed since last checked. If this
2031 * causes the queue to go above the threshold, the caller should
2032 * continue transmitting.
2034 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
2035 virtqueue_disable_intr(vq);
2043 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
2045 struct virtqueue *vq;
2046 struct vtnet_tx_header *txhdr;
2049 struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp),
2050 txq->vtntx_id, NR_TX);
2051 #else /* !DEV_NETMAP */
2053 #endif /* !DEV_NETMAP */
2058 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2059 if (kring == NULL) {
2060 m_freem(txhdr->vth_mbuf);
2061 uma_zfree(vtnet_tx_header_zone, txhdr);
2065 KASSERT(virtqueue_empty(vq),
2066 ("%s: mbufs remaining in tx queue %p", __func__, txq));
2070 * BMV: Much of this can go away once we finally have offsets in
2071 * the mbuf packet header. Bug andre@.
2074 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2075 int *etype, int *proto, int *start)
2077 struct vtnet_softc *sc;
2078 struct ether_vlan_header *evh;
2083 evh = mtod(m, struct ether_vlan_header *);
2084 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2085 /* BMV: We should handle nested VLAN tags too. */
2086 *etype = ntohs(evh->evl_proto);
2087 offset = sizeof(struct ether_vlan_header);
2089 *etype = ntohs(evh->evl_encap_proto);
2090 offset = sizeof(struct ether_header);
2095 case ETHERTYPE_IP: {
2096 struct ip *ip, iphdr;
2097 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2098 m_copydata(m, offset, sizeof(struct ip),
2102 ip = (struct ip *)(m->m_data + offset);
2104 *start = offset + (ip->ip_hl << 2);
2109 case ETHERTYPE_IPV6:
2111 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2112 /* Assert the network stack sent us a valid packet. */
2113 KASSERT(*start > offset,
2114 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2115 *start, offset, *proto));
2119 sc->vtnet_stats.tx_csum_bad_ethtype++;
2127 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2128 int offset, struct virtio_net_hdr *hdr)
2130 static struct timeval lastecn;
2132 struct vtnet_softc *sc;
2133 struct tcphdr *tcp, tcphdr;
2137 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
2138 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
2141 tcp = (struct tcphdr *)(m->m_data + offset);
2143 hdr->hdr_len = offset + (tcp->th_off << 2);
2144 hdr->gso_size = m->m_pkthdr.tso_segsz;
2145 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2146 VIRTIO_NET_HDR_GSO_TCPV6;
2148 if (tcp->th_flags & TH_CWR) {
2150 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2151 * ECN support is not on a per-interface basis, but globally via
2152 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2154 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2155 if (ppsratecheck(&lastecn, &curecn, 1))
2156 if_printf(sc->vtnet_ifp,
2157 "TSO with ECN not negotiated with host\n");
2160 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2163 txq->vtntx_stats.vtxs_tso++;
2168 static struct mbuf *
2169 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2170 struct virtio_net_hdr *hdr)
2172 struct vtnet_softc *sc;
2173 int flags, etype, csum_start, proto, error;
2176 flags = m->m_pkthdr.csum_flags;
2178 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2182 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2183 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2185 * We could compare the IP protocol vs the CSUM_ flag too,
2186 * but that really should not be necessary.
2188 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2189 hdr->csum_start = csum_start;
2190 hdr->csum_offset = m->m_pkthdr.csum_data;
2191 txq->vtntx_stats.vtxs_csum++;
2194 if (flags & CSUM_TSO) {
2195 if (__predict_false(proto != IPPROTO_TCP)) {
2196 /* Likely failed to correctly parse the mbuf. */
2197 sc->vtnet_stats.tx_tso_not_tcp++;
2201 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2202 ("%s: mbuf %p TSO without checksum offload %#x",
2203 __func__, m, flags));
2205 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2218 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2219 struct vtnet_tx_header *txhdr)
2221 struct vtnet_softc *sc;
2222 struct virtqueue *vq;
2233 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2234 KASSERT(error == 0 && sg->sg_nseg == 1,
2235 ("%s: error %d adding header to sglist", __func__, error));
2237 error = sglist_append_mbuf(sg, m);
2239 m = m_defrag(m, M_NOWAIT);
2244 sc->vtnet_stats.tx_defragged++;
2246 error = sglist_append_mbuf(sg, m);
2251 txhdr->vth_mbuf = m;
2252 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2257 sc->vtnet_stats.tx_defrag_failed++;
2265 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
2267 struct vtnet_tx_header *txhdr;
2268 struct virtio_net_hdr *hdr;
2275 txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2276 if (txhdr == NULL) {
2283 * Always use the non-mergeable header, regardless if the feature
2284 * was negotiated. For transmit, num_buffers is always zero. The
2285 * vtnet_hdr_size is used to enqueue the correct header size.
2287 hdr = &txhdr->vth_uhdr.hdr;
2289 if (m->m_flags & M_VLANTAG) {
2290 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2291 if ((*m_head = m) == NULL) {
2295 m->m_flags &= ~M_VLANTAG;
2298 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2299 m = vtnet_txq_offload(txq, m, hdr);
2300 if ((*m_head = m) == NULL) {
2306 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2311 uma_zfree(vtnet_tx_header_zone, txhdr);
2316 #ifdef VTNET_LEGACY_TX
2319 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2321 struct vtnet_softc *sc;
2322 struct virtqueue *vq;
2330 VTNET_TXQ_LOCK_ASSERT(txq);
2332 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2333 sc->vtnet_link_active == 0)
2341 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2342 if (virtqueue_full(vq))
2345 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2349 if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
2351 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2356 ETHER_BPF_MTAP(ifp, m0);
2359 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2360 if (tries++ < VTNET_NOTIFY_RETRIES)
2363 txq->vtntx_stats.vtxs_rescheduled++;
2364 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2369 vtnet_start(struct ifnet *ifp)
2371 struct vtnet_softc *sc;
2372 struct vtnet_txq *txq;
2375 txq = &sc->vtnet_txqs[0];
2377 VTNET_TXQ_LOCK(txq);
2378 vtnet_start_locked(txq, ifp);
2379 VTNET_TXQ_UNLOCK(txq);
2382 #else /* !VTNET_LEGACY_TX */
2385 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2387 struct vtnet_softc *sc;
2388 struct virtqueue *vq;
2389 struct buf_ring *br;
2391 int enq, tries, error;
2396 ifp = sc->vtnet_ifp;
2400 VTNET_TXQ_LOCK_ASSERT(txq);
2402 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2403 sc->vtnet_link_active == 0) {
2405 error = drbr_enqueue(ifp, br, m);
2410 error = drbr_enqueue(ifp, br, m);
2420 while ((m = drbr_peek(ifp, br)) != NULL) {
2421 if (virtqueue_full(vq)) {
2422 drbr_putback(ifp, br, m);
2426 if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
2428 drbr_putback(ifp, br, m);
2430 drbr_advance(ifp, br);
2433 drbr_advance(ifp, br);
2436 ETHER_BPF_MTAP(ifp, m);
2439 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2440 if (tries++ < VTNET_NOTIFY_RETRIES)
2443 txq->vtntx_stats.vtxs_rescheduled++;
2444 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2451 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2453 struct vtnet_softc *sc;
2454 struct vtnet_txq *txq;
2455 int i, npairs, error;
2458 npairs = sc->vtnet_act_vq_pairs;
2460 /* check if flowid is set */
2461 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2462 i = m->m_pkthdr.flowid % npairs;
2464 i = curcpu % npairs;
2466 txq = &sc->vtnet_txqs[i];
2468 if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2469 error = vtnet_txq_mq_start_locked(txq, m);
2470 VTNET_TXQ_UNLOCK(txq);
2472 error = drbr_enqueue(ifp, txq->vtntx_br, m);
2473 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2480 vtnet_txq_tq_deferred(void *xtxq, int pending)
2482 struct vtnet_softc *sc;
2483 struct vtnet_txq *txq;
2488 VTNET_TXQ_LOCK(txq);
2489 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2490 vtnet_txq_mq_start_locked(txq, NULL);
2491 VTNET_TXQ_UNLOCK(txq);
2494 #endif /* VTNET_LEGACY_TX */
2497 vtnet_txq_start(struct vtnet_txq *txq)
2499 struct vtnet_softc *sc;
2503 ifp = sc->vtnet_ifp;
2505 #ifdef VTNET_LEGACY_TX
2506 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2507 vtnet_start_locked(txq, ifp);
2509 if (!drbr_empty(ifp, txq->vtntx_br))
2510 vtnet_txq_mq_start_locked(txq, NULL);
2515 vtnet_txq_tq_intr(void *xtxq, int pending)
2517 struct vtnet_softc *sc;
2518 struct vtnet_txq *txq;
2523 ifp = sc->vtnet_ifp;
2525 VTNET_TXQ_LOCK(txq);
2527 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2528 VTNET_TXQ_UNLOCK(txq);
2533 vtnet_txq_start(txq);
2535 VTNET_TXQ_UNLOCK(txq);
2539 vtnet_txq_eof(struct vtnet_txq *txq)
2541 struct virtqueue *vq;
2542 struct vtnet_tx_header *txhdr;
2548 VTNET_TXQ_LOCK_ASSERT(txq);
2550 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2551 m = txhdr->vth_mbuf;
2554 txq->vtntx_stats.vtxs_opackets++;
2555 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2556 if (m->m_flags & M_MCAST)
2557 txq->vtntx_stats.vtxs_omcasts++;
2560 uma_zfree(vtnet_tx_header_zone, txhdr);
2563 if (virtqueue_empty(vq))
2564 txq->vtntx_watchdog = 0;
2570 vtnet_tx_vq_intr(void *xtxq)
2572 struct vtnet_softc *sc;
2573 struct vtnet_txq *txq;
2578 ifp = sc->vtnet_ifp;
2580 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2582 * Ignore this interrupt. Either this is a spurious interrupt
2583 * or multiqueue without per-VQ MSIX so every queue needs to
2584 * be polled (a brain dead configuration we could try harder
2587 vtnet_txq_disable_intr(txq);
2592 if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2594 #endif /* DEV_NETMAP */
2596 VTNET_TXQ_LOCK(txq);
2598 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2599 VTNET_TXQ_UNLOCK(txq);
2604 vtnet_txq_start(txq);
2606 VTNET_TXQ_UNLOCK(txq);
2610 vtnet_tx_start_all(struct vtnet_softc *sc)
2612 struct vtnet_txq *txq;
2615 VTNET_CORE_LOCK_ASSERT(sc);
2617 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2618 txq = &sc->vtnet_txqs[i];
2620 VTNET_TXQ_LOCK(txq);
2621 vtnet_txq_start(txq);
2622 VTNET_TXQ_UNLOCK(txq);
2626 #ifndef VTNET_LEGACY_TX
2628 vtnet_qflush(struct ifnet *ifp)
2630 struct vtnet_softc *sc;
2631 struct vtnet_txq *txq;
2637 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2638 txq = &sc->vtnet_txqs[i];
2640 VTNET_TXQ_LOCK(txq);
2641 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2643 VTNET_TXQ_UNLOCK(txq);
2651 vtnet_watchdog(struct vtnet_txq *txq)
2655 ifp = txq->vtntx_sc->vtnet_ifp;
2657 VTNET_TXQ_LOCK(txq);
2658 if (txq->vtntx_watchdog == 1) {
2660 * Only drain completed frames if the watchdog is about to
2661 * expire. If any frames were drained, there may be enough
2662 * free descriptors now available to transmit queued frames.
2663 * In that case, the timer will immediately be decremented
2664 * below, but the timeout is generous enough that should not
2667 if (vtnet_txq_eof(txq) != 0)
2668 vtnet_txq_start(txq);
2671 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2672 VTNET_TXQ_UNLOCK(txq);
2675 VTNET_TXQ_UNLOCK(txq);
2677 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
2682 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
2683 struct vtnet_txq_stats *txacc)
2686 bzero(rxacc, sizeof(struct vtnet_rxq_stats));
2687 bzero(txacc, sizeof(struct vtnet_txq_stats));
2689 for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2690 struct vtnet_rxq_stats *rxst;
2691 struct vtnet_txq_stats *txst;
2693 rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
2694 rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
2695 rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
2696 rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
2697 rxacc->vrxs_csum += rxst->vrxs_csum;
2698 rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
2699 rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
2701 txst = &sc->vtnet_txqs[i].vtntx_stats;
2702 txacc->vtxs_opackets += txst->vtxs_opackets;
2703 txacc->vtxs_obytes += txst->vtxs_obytes;
2704 txacc->vtxs_csum += txst->vtxs_csum;
2705 txacc->vtxs_tso += txst->vtxs_tso;
2706 txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
2711 vtnet_get_counter(if_t ifp, ift_counter cnt)
2713 struct vtnet_softc *sc;
2714 struct vtnet_rxq_stats rxaccum;
2715 struct vtnet_txq_stats txaccum;
2717 sc = if_getsoftc(ifp);
2718 vtnet_accum_stats(sc, &rxaccum, &txaccum);
2721 case IFCOUNTER_IPACKETS:
2722 return (rxaccum.vrxs_ipackets);
2723 case IFCOUNTER_IQDROPS:
2724 return (rxaccum.vrxs_iqdrops);
2725 case IFCOUNTER_IERRORS:
2726 return (rxaccum.vrxs_ierrors);
2727 case IFCOUNTER_OPACKETS:
2728 return (txaccum.vtxs_opackets);
2729 #ifndef VTNET_LEGACY_TX
2730 case IFCOUNTER_OBYTES:
2731 return (txaccum.vtxs_obytes);
2732 case IFCOUNTER_OMCASTS:
2733 return (txaccum.vtxs_omcasts);
2736 return (if_get_counter_default(ifp, cnt));
2741 vtnet_tick(void *xsc)
2743 struct vtnet_softc *sc;
2748 ifp = sc->vtnet_ifp;
2751 VTNET_CORE_LOCK_ASSERT(sc);
2753 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2754 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2756 if (timedout != 0) {
2757 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2758 vtnet_init_locked(sc, 0);
2760 callout_schedule(&sc->vtnet_tick_ch, hz);
2764 vtnet_start_taskqueues(struct vtnet_softc *sc)
2767 struct vtnet_rxq *rxq;
2768 struct vtnet_txq *txq;
2771 dev = sc->vtnet_dev;
2774 * Errors here are very difficult to recover from - we cannot
2775 * easily fail because, if this is during boot, we will hang
2776 * when freeing any successfully started taskqueues because
2777 * the scheduler isn't up yet.
2779 * Most drivers just ignore the return value - it only fails
2780 * with ENOMEM so an error is not likely.
2782 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2783 rxq = &sc->vtnet_rxqs[i];
2784 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2785 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2787 device_printf(dev, "failed to start rx taskq %d\n",
2791 txq = &sc->vtnet_txqs[i];
2792 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2793 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2795 device_printf(dev, "failed to start tx taskq %d\n",
2802 vtnet_free_taskqueues(struct vtnet_softc *sc)
2804 struct vtnet_rxq *rxq;
2805 struct vtnet_txq *txq;
2808 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2809 rxq = &sc->vtnet_rxqs[i];
2810 if (rxq->vtnrx_tq != NULL) {
2811 taskqueue_free(rxq->vtnrx_tq);
2812 rxq->vtnrx_tq = NULL;
2815 txq = &sc->vtnet_txqs[i];
2816 if (txq->vtntx_tq != NULL) {
2817 taskqueue_free(txq->vtntx_tq);
2818 txq->vtntx_tq = NULL;
2824 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2826 struct vtnet_rxq *rxq;
2827 struct vtnet_txq *txq;
2830 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2831 rxq = &sc->vtnet_rxqs[i];
2832 if (rxq->vtnrx_tq != NULL)
2833 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2835 txq = &sc->vtnet_txqs[i];
2836 if (txq->vtntx_tq != NULL) {
2837 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2838 #ifndef VTNET_LEGACY_TX
2839 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2846 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2848 struct vtnet_rxq *rxq;
2849 struct vtnet_txq *txq;
2852 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2853 rxq = &sc->vtnet_rxqs[i];
2854 vtnet_rxq_free_mbufs(rxq);
2856 txq = &sc->vtnet_txqs[i];
2857 vtnet_txq_free_mbufs(txq);
2862 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2864 struct vtnet_rxq *rxq;
2865 struct vtnet_txq *txq;
2869 * Lock and unlock the per-queue mutex so we known the stop
2870 * state is visible. Doing only the active queues should be
2871 * sufficient, but it does not cost much extra to do all the
2872 * queues. Note we hold the core mutex here too.
2874 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2875 rxq = &sc->vtnet_rxqs[i];
2876 VTNET_RXQ_LOCK(rxq);
2877 VTNET_RXQ_UNLOCK(rxq);
2879 txq = &sc->vtnet_txqs[i];
2880 VTNET_TXQ_LOCK(txq);
2881 VTNET_TXQ_UNLOCK(txq);
2886 vtnet_stop(struct vtnet_softc *sc)
2891 dev = sc->vtnet_dev;
2892 ifp = sc->vtnet_ifp;
2894 VTNET_CORE_LOCK_ASSERT(sc);
2896 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2897 sc->vtnet_link_active = 0;
2898 callout_stop(&sc->vtnet_tick_ch);
2900 /* Only advisory. */
2901 vtnet_disable_interrupts(sc);
2904 /* Stop any pending txsync/rxsync and disable them. */
2905 netmap_disable_all_rings(ifp);
2906 #endif /* DEV_NETMAP */
2909 * Stop the host adapter. This resets it to the pre-initialized
2910 * state. It will not generate any interrupts until after it is
2914 vtnet_stop_rendezvous(sc);
2916 /* Free any mbufs left in the virtqueues. */
2917 vtnet_drain_rxtx_queues(sc);
2921 vtnet_virtio_reinit(struct vtnet_softc *sc)
2928 dev = sc->vtnet_dev;
2929 ifp = sc->vtnet_ifp;
2930 features = sc->vtnet_features;
2934 mask |= IFCAP_RXCSUM;
2937 mask |= IFCAP_RXCSUM_IPV6;
2941 * Re-negotiate with the host, removing any disabled receive
2942 * features. Transmit features are disabled only on our side
2943 * via if_capenable and if_hwassist.
2946 if (ifp->if_capabilities & mask) {
2948 * We require both IPv4 and IPv6 offloading to be enabled
2949 * in order to negotiated it: VirtIO does not distinguish
2952 if ((ifp->if_capenable & mask) != mask)
2953 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2956 if (ifp->if_capabilities & IFCAP_LRO) {
2957 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2958 features &= ~VTNET_LRO_FEATURES;
2961 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2962 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2963 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2966 error = virtio_reinit(dev, features);
2968 device_printf(dev, "virtio reinit error %d\n", error);
2974 vtnet_init_rx_filters(struct vtnet_softc *sc)
2978 ifp = sc->vtnet_ifp;
2980 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2981 /* Restore promiscuous and all-multicast modes. */
2982 vtnet_rx_filter(sc);
2983 /* Restore filtered MAC addresses. */
2984 vtnet_rx_filter_mac(sc);
2987 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2988 vtnet_rx_filter_vlan(sc);
2992 vtnet_init_rx_queues(struct vtnet_softc *sc)
2995 struct vtnet_rxq *rxq;
2996 int i, clsize, error;
2998 dev = sc->vtnet_dev;
3001 * Use the new cluster size if one has been set (via a MTU
3002 * change). Otherwise, use the standard 2K clusters.
3004 * BMV: It might make sense to use page sized clusters as
3005 * the default (depending on the features negotiated).
3007 if (sc->vtnet_rx_new_clsize != 0) {
3008 clsize = sc->vtnet_rx_new_clsize;
3009 sc->vtnet_rx_new_clsize = 0;
3013 sc->vtnet_rx_clsize = clsize;
3014 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
3016 KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
3017 sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
3018 ("%s: too many rx mbufs %d for %d segments", __func__,
3019 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
3021 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3022 rxq = &sc->vtnet_rxqs[i];
3024 /* Hold the lock to satisfy asserts. */
3025 VTNET_RXQ_LOCK(rxq);
3026 error = vtnet_rxq_populate(rxq);
3027 VTNET_RXQ_UNLOCK(rxq);
3031 "cannot allocate mbufs for Rx queue %d\n", i);
3040 vtnet_init_tx_queues(struct vtnet_softc *sc)
3042 struct vtnet_txq *txq;
3045 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3046 txq = &sc->vtnet_txqs[i];
3047 txq->vtntx_watchdog = 0;
3049 netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
3050 #endif /* DEV_NETMAP */
3057 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
3061 error = vtnet_init_rx_queues(sc);
3065 error = vtnet_init_tx_queues(sc);
3073 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
3078 dev = sc->vtnet_dev;
3080 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3081 sc->vtnet_act_vq_pairs = 1;
3085 npairs = sc->vtnet_requested_vq_pairs;
3087 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3089 "cannot set active queue pairs to %d\n", npairs);
3093 sc->vtnet_act_vq_pairs = npairs;
3097 vtnet_reinit(struct vtnet_softc *sc)
3102 ifp = sc->vtnet_ifp;
3104 /* Use the current MAC address. */
3105 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3106 vtnet_set_hwaddr(sc);
3108 vtnet_set_active_vq_pairs(sc);
3110 ifp->if_hwassist = 0;
3111 if (ifp->if_capenable & IFCAP_TXCSUM)
3112 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3113 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3114 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
3115 if (ifp->if_capenable & IFCAP_TSO4)
3116 ifp->if_hwassist |= CSUM_IP_TSO;
3117 if (ifp->if_capenable & IFCAP_TSO6)
3118 ifp->if_hwassist |= CSUM_IP6_TSO;
3120 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3121 vtnet_init_rx_filters(sc);
3123 error = vtnet_init_rxtx_queues(sc);
3127 vtnet_enable_interrupts(sc);
3128 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3134 vtnet_init_locked(struct vtnet_softc *sc, int init_mode)
3139 dev = sc->vtnet_dev;
3140 ifp = sc->vtnet_ifp;
3142 VTNET_CORE_LOCK_ASSERT(sc);
3144 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3150 /* Once stopped we can update the netmap flags, if necessary. */
3151 switch (init_mode) {
3152 case VTNET_INIT_NETMAP_ENTER:
3153 nm_set_native_flags(NA(ifp));
3155 case VTNET_INIT_NETMAP_EXIT:
3156 nm_clear_native_flags(NA(ifp));
3159 #endif /* DEV_NETMAP */
3161 /* Reinitialize with the host. */
3162 if (vtnet_virtio_reinit(sc) != 0)
3165 if (vtnet_reinit(sc) != 0)
3168 virtio_reinit_complete(dev);
3170 vtnet_update_link_status(sc);
3171 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3174 /* Re-enable txsync/rxsync. */
3175 netmap_enable_all_rings(ifp);
3176 #endif /* DEV_NETMAP */
3185 vtnet_init(void *xsc)
3187 struct vtnet_softc *sc;
3191 VTNET_CORE_LOCK(sc);
3192 vtnet_init_locked(sc, 0);
3193 VTNET_CORE_UNLOCK(sc);
3197 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3199 struct virtqueue *vq;
3201 vq = sc->vtnet_ctrl_vq;
3204 * The control virtqueue is only polled and therefore it should
3207 KASSERT(virtqueue_empty(vq),
3208 ("%s: ctrl vq %p not empty", __func__, vq));
3212 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3213 struct sglist *sg, int readable, int writable)
3215 struct virtqueue *vq;
3217 vq = sc->vtnet_ctrl_vq;
3219 VTNET_CORE_LOCK_ASSERT(sc);
3220 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3221 ("%s: CTRL_VQ feature not negotiated", __func__));
3223 if (!virtqueue_empty(vq))
3225 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3229 * Poll for the response, but the command is likely already
3230 * done when we return from the notify.
3232 virtqueue_notify(vq);
3233 virtqueue_poll(vq, NULL);
3237 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3239 struct virtio_net_ctrl_hdr hdr __aligned(2);
3240 struct sglist_seg segs[3];
3245 hdr.class = VIRTIO_NET_CTRL_MAC;
3246 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3247 ack = VIRTIO_NET_ERR;
3249 sglist_init(&sg, 3, segs);
3251 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3252 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3253 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3254 KASSERT(error == 0 && sg.sg_nseg == 3,
3255 ("%s: error %d adding set MAC msg to sglist", __func__, error));
3257 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3259 return (ack == VIRTIO_NET_OK ? 0 : EIO);
3263 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3265 struct sglist_seg segs[3];
3268 struct virtio_net_ctrl_hdr hdr;
3270 struct virtio_net_ctrl_mq mq;
3276 s.hdr.class = VIRTIO_NET_CTRL_MQ;
3277 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3278 s.mq.virtqueue_pairs = npairs;
3279 s.ack = VIRTIO_NET_ERR;
3281 sglist_init(&sg, 3, segs);
3283 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3284 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3285 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3286 KASSERT(error == 0 && sg.sg_nseg == 3,
3287 ("%s: error %d adding MQ message to sglist", __func__, error));
3289 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3291 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3295 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3297 struct sglist_seg segs[3];
3300 struct virtio_net_ctrl_hdr hdr;
3308 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3309 ("%s: CTRL_RX feature not negotiated", __func__));
3311 s.hdr.class = VIRTIO_NET_CTRL_RX;
3314 s.ack = VIRTIO_NET_ERR;
3316 sglist_init(&sg, 3, segs);
3318 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3319 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3320 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3321 KASSERT(error == 0 && sg.sg_nseg == 3,
3322 ("%s: error %d adding Rx message to sglist", __func__, error));
3324 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3326 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3330 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3333 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3337 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3340 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3344 * The device defaults to promiscuous mode for backwards compatibility.
3345 * Turn it off at attach time if possible.
3348 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3352 ifp = sc->vtnet_ifp;
3354 VTNET_CORE_LOCK(sc);
3355 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3356 ifp->if_flags |= IFF_PROMISC;
3357 } else if (vtnet_set_promisc(sc, 0) != 0) {
3358 ifp->if_flags |= IFF_PROMISC;
3359 device_printf(sc->vtnet_dev,
3360 "cannot disable default promiscuous mode\n");
3362 VTNET_CORE_UNLOCK(sc);
3366 vtnet_rx_filter(struct vtnet_softc *sc)
3371 dev = sc->vtnet_dev;
3372 ifp = sc->vtnet_ifp;
3374 VTNET_CORE_LOCK_ASSERT(sc);
3376 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3377 device_printf(dev, "cannot %s promiscuous mode\n",
3378 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3380 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3381 device_printf(dev, "cannot %s all-multicast mode\n",
3382 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3386 vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
3388 struct vtnet_softc *sc = arg;
3390 if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3393 if (ucnt < VTNET_MAX_MAC_ENTRIES)
3395 &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
3402 vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
3404 struct vtnet_mac_filter *filter = arg;
3406 if (mcnt < VTNET_MAX_MAC_ENTRIES)
3407 bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
3414 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3416 struct virtio_net_ctrl_hdr hdr __aligned(2);
3417 struct vtnet_mac_filter *filter;
3418 struct sglist_seg segs[4];
3421 bool promisc, allmulti;
3426 ifp = sc->vtnet_ifp;
3427 filter = sc->vtnet_mac_filter;
3429 VTNET_CORE_LOCK_ASSERT(sc);
3430 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3431 ("%s: CTRL_RX feature not negotiated", __func__));
3433 /* Unicast MAC addresses: */
3434 ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
3435 promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
3438 filter->vmf_unicast.nentries = 0;
3439 if_printf(ifp, "more than %d MAC addresses assigned, "
3440 "falling back to promiscuous mode\n",
3441 VTNET_MAX_MAC_ENTRIES);
3443 filter->vmf_unicast.nentries = ucnt;
3445 /* Multicast MAC addresses: */
3446 mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
3447 allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
3450 filter->vmf_multicast.nentries = 0;
3451 if_printf(ifp, "more than %d multicast MAC addresses "
3452 "assigned, falling back to all-multicast mode\n",
3453 VTNET_MAX_MAC_ENTRIES);
3455 filter->vmf_multicast.nentries = mcnt;
3457 if (promisc && allmulti)
3460 hdr.class = VIRTIO_NET_CTRL_MAC;
3461 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3462 ack = VIRTIO_NET_ERR;
3464 sglist_init(&sg, 4, segs);
3466 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3467 error |= sglist_append(&sg, &filter->vmf_unicast,
3468 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3469 error |= sglist_append(&sg, &filter->vmf_multicast,
3470 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3471 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3472 KASSERT(error == 0 && sg.sg_nseg == 4,
3473 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3475 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3477 if (ack != VIRTIO_NET_OK)
3478 if_printf(ifp, "error setting host MAC filter table\n");
3481 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3482 if_printf(ifp, "cannot enable promiscuous mode\n");
3483 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3484 if_printf(ifp, "cannot enable all-multicast mode\n");
3488 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3490 struct sglist_seg segs[3];
3493 struct virtio_net_ctrl_hdr hdr;
3501 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3502 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3504 s.ack = VIRTIO_NET_ERR;
3506 sglist_init(&sg, 3, segs);
3508 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3509 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3510 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3511 KASSERT(error == 0 && sg.sg_nseg == 3,
3512 ("%s: error %d adding VLAN message to sglist", __func__, error));
3514 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3516 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3520 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3526 VTNET_CORE_LOCK_ASSERT(sc);
3527 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3528 ("%s: VLAN_FILTER feature not negotiated", __func__));
3530 /* Enable the filter for each configured VLAN. */
3531 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3532 w = sc->vtnet_vlan_filter[i];
3534 while ((bit = ffs(w) - 1) != -1) {
3536 tag = sizeof(w) * CHAR_BIT * i + bit;
3538 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3539 device_printf(sc->vtnet_dev,
3540 "cannot enable VLAN %d filter\n", tag);
3547 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3552 ifp = sc->vtnet_ifp;
3553 idx = (tag >> 5) & 0x7F;
3556 if (tag == 0 || tag > 4095)
3559 VTNET_CORE_LOCK(sc);
3562 sc->vtnet_vlan_filter[idx] |= (1 << bit);
3564 sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3566 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3567 ifp->if_drv_flags & IFF_DRV_RUNNING &&
3568 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3569 device_printf(sc->vtnet_dev,
3570 "cannot %s VLAN %d %s the host filter table\n",
3571 add ? "add" : "remove", tag, add ? "to" : "from");
3574 VTNET_CORE_UNLOCK(sc);
3578 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3581 if (ifp->if_softc != arg)
3584 vtnet_update_vlan_filter(arg, 1, tag);
3588 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3591 if (ifp->if_softc != arg)
3594 vtnet_update_vlan_filter(arg, 0, tag);
3598 vtnet_is_link_up(struct vtnet_softc *sc)
3604 dev = sc->vtnet_dev;
3605 ifp = sc->vtnet_ifp;
3607 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3608 status = VIRTIO_NET_S_LINK_UP;
3610 status = virtio_read_dev_config_2(dev,
3611 offsetof(struct virtio_net_config, status));
3613 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3617 vtnet_update_link_status(struct vtnet_softc *sc)
3622 ifp = sc->vtnet_ifp;
3624 VTNET_CORE_LOCK_ASSERT(sc);
3625 link = vtnet_is_link_up(sc);
3627 /* Notify if the link status has changed. */
3628 if (link != 0 && sc->vtnet_link_active == 0) {
3629 sc->vtnet_link_active = 1;
3630 if_link_state_change(ifp, LINK_STATE_UP);
3631 } else if (link == 0 && sc->vtnet_link_active != 0) {
3632 sc->vtnet_link_active = 0;
3633 if_link_state_change(ifp, LINK_STATE_DOWN);
3638 vtnet_ifmedia_upd(struct ifnet *ifp)
3640 struct vtnet_softc *sc;
3641 struct ifmedia *ifm;
3644 ifm = &sc->vtnet_media;
3646 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3653 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3655 struct vtnet_softc *sc;
3659 ifmr->ifm_status = IFM_AVALID;
3660 ifmr->ifm_active = IFM_ETHER;
3662 VTNET_CORE_LOCK(sc);
3663 if (vtnet_is_link_up(sc) != 0) {
3664 ifmr->ifm_status |= IFM_ACTIVE;
3665 ifmr->ifm_active |= VTNET_MEDIATYPE;
3667 ifmr->ifm_active |= IFM_NONE;
3668 VTNET_CORE_UNLOCK(sc);
3672 vtnet_set_hwaddr(struct vtnet_softc *sc)
3677 dev = sc->vtnet_dev;
3679 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3680 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3681 device_printf(dev, "unable to set MAC address\n");
3682 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3683 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3684 virtio_write_dev_config_1(dev,
3685 offsetof(struct virtio_net_config, mac) + i,
3686 sc->vtnet_hwaddr[i]);
3692 vtnet_get_hwaddr(struct vtnet_softc *sc)
3697 dev = sc->vtnet_dev;
3699 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3701 * Generate a random locally administered unicast address.
3703 * It would be nice to generate the same MAC address across
3704 * reboots, but it seems all the hosts currently available
3705 * support the MAC feature, so this isn't too important.
3707 sc->vtnet_hwaddr[0] = 0xB2;
3708 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3709 vtnet_set_hwaddr(sc);
3713 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3714 sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3715 offsetof(struct virtio_net_config, mac) + i);
3720 vtnet_vlan_tag_remove(struct mbuf *m)
3722 struct ether_vlan_header *evh;
3724 evh = mtod(m, struct ether_vlan_header *);
3725 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3726 m->m_flags |= M_VLANTAG;
3728 /* Strip the 802.1Q header. */
3729 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3730 ETHER_HDR_LEN - ETHER_TYPE_LEN);
3731 m_adj(m, ETHER_VLAN_ENCAP_LEN);
3735 vtnet_set_rx_process_limit(struct vtnet_softc *sc)
3739 limit = vtnet_tunable_int(sc, "rx_process_limit",
3740 vtnet_rx_process_limit);
3743 sc->vtnet_rx_process_limit = limit;
3747 vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3751 size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3754 * The Tx interrupt is disabled until the queue free count falls
3755 * below our threshold. Completed frames are drained from the Tx
3756 * virtqueue before transmitting new frames and in the watchdog
3757 * callout, so the frequency of Tx interrupts is greatly reduced,
3758 * at the cost of not freeing mbufs as quickly as they otherwise
3761 * N.B. We assume all the Tx queues are the same size.
3766 * Without indirect descriptors, leave enough room for the most
3767 * segments we handle.
3769 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3770 thresh < sc->vtnet_tx_nsegs)
3771 thresh = sc->vtnet_tx_nsegs;
3773 sc->vtnet_tx_intr_thresh = thresh;
3777 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3778 struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3780 struct sysctl_oid *node;
3781 struct sysctl_oid_list *list;
3782 struct vtnet_rxq_stats *stats;
3785 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3786 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3787 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
3788 list = SYSCTL_CHILDREN(node);
3790 stats = &rxq->vtnrx_stats;
3792 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3793 &stats->vrxs_ipackets, "Receive packets");
3794 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3795 &stats->vrxs_ibytes, "Receive bytes");
3796 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3797 &stats->vrxs_iqdrops, "Receive drops");
3798 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3799 &stats->vrxs_ierrors, "Receive errors");
3800 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3801 &stats->vrxs_csum, "Receive checksum offloaded");
3802 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3803 &stats->vrxs_csum_failed, "Receive checksum offload failed");
3804 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3805 &stats->vrxs_rescheduled,
3806 "Receive interrupt handler rescheduled");
3810 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3811 struct sysctl_oid_list *child, struct vtnet_txq *txq)
3813 struct sysctl_oid *node;
3814 struct sysctl_oid_list *list;
3815 struct vtnet_txq_stats *stats;
3818 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3819 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3820 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
3821 list = SYSCTL_CHILDREN(node);
3823 stats = &txq->vtntx_stats;
3825 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3826 &stats->vtxs_opackets, "Transmit packets");
3827 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3828 &stats->vtxs_obytes, "Transmit bytes");
3829 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3830 &stats->vtxs_omcasts, "Transmit multicasts");
3831 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3832 &stats->vtxs_csum, "Transmit checksum offloaded");
3833 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3834 &stats->vtxs_tso, "Transmit segmentation offloaded");
3835 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3836 &stats->vtxs_rescheduled,
3837 "Transmit interrupt handler rescheduled");
3841 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3844 struct sysctl_ctx_list *ctx;
3845 struct sysctl_oid *tree;
3846 struct sysctl_oid_list *child;
3849 dev = sc->vtnet_dev;
3850 ctx = device_get_sysctl_ctx(dev);
3851 tree = device_get_sysctl_tree(dev);
3852 child = SYSCTL_CHILDREN(tree);
3854 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3855 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3856 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3861 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3862 struct sysctl_oid_list *child, struct vtnet_softc *sc)
3864 struct vtnet_statistics *stats;
3865 struct vtnet_rxq_stats rxaccum;
3866 struct vtnet_txq_stats txaccum;
3868 vtnet_accum_stats(sc, &rxaccum, &txaccum);
3870 stats = &sc->vtnet_stats;
3871 stats->rx_csum_offloaded = rxaccum.vrxs_csum;
3872 stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
3873 stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
3874 stats->tx_csum_offloaded = txaccum.vtxs_csum;
3875 stats->tx_tso_offloaded = txaccum.vtxs_tso;
3876 stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
3878 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3879 CTLFLAG_RD, &stats->mbuf_alloc_failed,
3880 "Mbuf cluster allocation failures");
3882 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3883 CTLFLAG_RD, &stats->rx_frame_too_large,
3884 "Received frame larger than the mbuf chain");
3885 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3886 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3887 "Enqueuing the replacement receive mbuf failed");
3888 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3889 CTLFLAG_RD, &stats->rx_mergeable_failed,
3890 "Mergeable buffers receive failures");
3891 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3892 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3893 "Received checksum offloaded buffer with unsupported "
3895 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3896 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3897 "Received checksum offloaded buffer with incorrect IP protocol");
3898 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3899 CTLFLAG_RD, &stats->rx_csum_bad_offset,
3900 "Received checksum offloaded buffer with incorrect offset");
3901 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3902 CTLFLAG_RD, &stats->rx_csum_bad_proto,
3903 "Received checksum offloaded buffer with incorrect protocol");
3904 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3905 CTLFLAG_RD, &stats->rx_csum_failed,
3906 "Received buffer checksum offload failed");
3907 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3908 CTLFLAG_RD, &stats->rx_csum_offloaded,
3909 "Received buffer checksum offload succeeded");
3910 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3911 CTLFLAG_RD, &stats->rx_task_rescheduled,
3912 "Times the receive interrupt task rescheduled itself");
3914 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3915 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3916 "Aborted transmit of checksum offloaded buffer with unknown "
3918 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3919 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3920 "Aborted transmit of TSO buffer with unknown Ethernet type");
3921 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3922 CTLFLAG_RD, &stats->tx_tso_not_tcp,
3923 "Aborted transmit of TSO buffer with non TCP protocol");
3924 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3925 CTLFLAG_RD, &stats->tx_defragged,
3926 "Transmit mbufs defragged");
3927 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
3928 CTLFLAG_RD, &stats->tx_defrag_failed,
3929 "Aborted transmit of buffer because defrag failed");
3930 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3931 CTLFLAG_RD, &stats->tx_csum_offloaded,
3932 "Offloaded checksum of transmitted buffer");
3933 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3934 CTLFLAG_RD, &stats->tx_tso_offloaded,
3935 "Segmentation offload of transmitted buffer");
3936 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3937 CTLFLAG_RD, &stats->tx_task_rescheduled,
3938 "Times the transmit interrupt task rescheduled itself");
3942 vtnet_setup_sysctl(struct vtnet_softc *sc)
3945 struct sysctl_ctx_list *ctx;
3946 struct sysctl_oid *tree;
3947 struct sysctl_oid_list *child;
3949 dev = sc->vtnet_dev;
3950 ctx = device_get_sysctl_ctx(dev);
3951 tree = device_get_sysctl_tree(dev);
3952 child = SYSCTL_CHILDREN(tree);
3954 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3955 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3956 "Maximum number of supported virtqueue pairs");
3957 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
3958 CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
3959 "Requested number of virtqueue pairs");
3960 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3961 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3962 "Number of active virtqueue pairs");
3964 vtnet_setup_stat_sysctl(ctx, child, sc);
3968 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3971 return (virtqueue_enable_intr(rxq->vtnrx_vq));
3975 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3978 virtqueue_disable_intr(rxq->vtnrx_vq);
3982 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3984 struct virtqueue *vq;
3988 if (vtnet_txq_below_threshold(txq) != 0)
3989 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
3992 * The free count is above our threshold. Keep the Tx interrupt
3993 * disabled until the queue is fuller.
3999 vtnet_txq_disable_intr(struct vtnet_txq *txq)
4002 virtqueue_disable_intr(txq->vtntx_vq);
4006 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4010 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4011 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
4015 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
4019 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4020 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
4024 vtnet_enable_interrupts(struct vtnet_softc *sc)
4027 vtnet_enable_rx_interrupts(sc);
4028 vtnet_enable_tx_interrupts(sc);
4032 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
4036 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4037 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4041 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
4045 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4046 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4050 vtnet_disable_interrupts(struct vtnet_softc *sc)
4053 vtnet_disable_rx_interrupts(sc);
4054 vtnet_disable_tx_interrupts(sc);
4058 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
4062 snprintf(path, sizeof(path),
4063 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
4064 TUNABLE_INT_FETCH(path, &def);
4071 vtnet_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
4073 struct vtnet_softc *sc;
4075 sc = if_getsoftc(ifp);
4077 VTNET_CORE_LOCK(sc);
4078 *nrxr = sc->vtnet_max_vq_pairs;
4079 *ncl = DEBUGNET_MAX_IN_FLIGHT;
4080 *clsize = sc->vtnet_rx_clsize;
4081 VTNET_CORE_UNLOCK(sc);
4085 vtnet_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4090 vtnet_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4092 struct vtnet_softc *sc;
4093 struct vtnet_txq *txq;
4096 sc = if_getsoftc(ifp);
4097 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4101 txq = &sc->vtnet_txqs[0];
4102 error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
4104 (void)vtnet_txq_notify(txq);
4109 vtnet_debugnet_poll(struct ifnet *ifp, int count)
4111 struct vtnet_softc *sc;
4114 sc = if_getsoftc(ifp);
4115 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4119 (void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4120 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4121 (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4124 #endif /* DEBUGNET */