2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO network devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/sockio.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <sys/random.h>
45 #include <sys/sglist.h>
47 #include <sys/mutex.h>
48 #include <sys/taskqueue.h>
50 #include <machine/smp.h>
54 #include <net/debugnet.h>
55 #include <net/ethernet.h>
57 #include <net/if_var.h>
58 #include <net/if_arp.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
61 #include <net/if_media.h>
62 #include <net/if_vlan_var.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70 #include <netinet6/ip6_var.h>
71 #include <netinet/udp.h>
72 #include <netinet/tcp.h>
74 #include <machine/bus.h>
75 #include <machine/resource.h>
79 #include <dev/virtio/virtio.h>
80 #include <dev/virtio/virtqueue.h>
81 #include <dev/virtio/network/virtio_net.h>
82 #include <dev/virtio/network/if_vtnetvar.h>
83 #include "virtio_if.h"
86 #include "opt_inet6.h"
88 static int vtnet_modevent(module_t, int, void *);
90 static int vtnet_probe(device_t);
91 static int vtnet_attach(device_t);
92 static int vtnet_detach(device_t);
93 static int vtnet_suspend(device_t);
94 static int vtnet_resume(device_t);
95 static int vtnet_shutdown(device_t);
96 static int vtnet_attach_completed(device_t);
97 static int vtnet_config_change(device_t);
99 static void vtnet_negotiate_features(struct vtnet_softc *);
100 static void vtnet_setup_features(struct vtnet_softc *);
101 static int vtnet_init_rxq(struct vtnet_softc *, int);
102 static int vtnet_init_txq(struct vtnet_softc *, int);
103 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
104 static void vtnet_free_rxtx_queues(struct vtnet_softc *);
105 static int vtnet_alloc_rx_filters(struct vtnet_softc *);
106 static void vtnet_free_rx_filters(struct vtnet_softc *);
107 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
108 static int vtnet_setup_interface(struct vtnet_softc *);
109 static int vtnet_change_mtu(struct vtnet_softc *, int);
110 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
111 static uint64_t vtnet_get_counter(struct ifnet *, ift_counter);
113 static int vtnet_rxq_populate(struct vtnet_rxq *);
114 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
116 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
117 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
119 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
120 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
121 static int vtnet_rxq_new_buf(struct vtnet_rxq *);
122 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
123 struct virtio_net_hdr *);
124 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
125 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
126 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
127 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
128 struct virtio_net_hdr *);
129 static int vtnet_rxq_eof(struct vtnet_rxq *);
130 static void vtnet_rx_vq_intr(void *);
131 static void vtnet_rxq_tq_intr(void *, int);
133 static int vtnet_txq_below_threshold(struct vtnet_txq *);
134 static int vtnet_txq_notify(struct vtnet_txq *);
135 static void vtnet_txq_free_mbufs(struct vtnet_txq *);
136 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
137 int *, int *, int *);
138 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
139 int, struct virtio_net_hdr *);
141 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
142 struct virtio_net_hdr *);
143 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
144 struct vtnet_tx_header *);
145 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
146 #ifdef VTNET_LEGACY_TX
147 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
148 static void vtnet_start(struct ifnet *);
150 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
151 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
152 static void vtnet_txq_tq_deferred(void *, int);
154 static void vtnet_txq_start(struct vtnet_txq *);
155 static void vtnet_txq_tq_intr(void *, int);
156 static int vtnet_txq_eof(struct vtnet_txq *);
157 static void vtnet_tx_vq_intr(void *);
158 static void vtnet_tx_start_all(struct vtnet_softc *);
160 #ifndef VTNET_LEGACY_TX
161 static void vtnet_qflush(struct ifnet *);
164 static int vtnet_watchdog(struct vtnet_txq *);
165 static void vtnet_accum_stats(struct vtnet_softc *,
166 struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
167 static void vtnet_tick(void *);
169 static void vtnet_start_taskqueues(struct vtnet_softc *);
170 static void vtnet_free_taskqueues(struct vtnet_softc *);
171 static void vtnet_drain_taskqueues(struct vtnet_softc *);
173 static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
174 static void vtnet_stop_rendezvous(struct vtnet_softc *);
175 static void vtnet_stop(struct vtnet_softc *);
176 static int vtnet_virtio_reinit(struct vtnet_softc *);
177 static void vtnet_init_rx_filters(struct vtnet_softc *);
178 static int vtnet_init_rx_queues(struct vtnet_softc *);
179 static int vtnet_init_tx_queues(struct vtnet_softc *);
180 static int vtnet_init_rxtx_queues(struct vtnet_softc *);
181 static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
182 static int vtnet_reinit(struct vtnet_softc *);
183 static void vtnet_init_locked(struct vtnet_softc *);
184 static void vtnet_init(void *);
186 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
187 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
188 struct sglist *, int, int);
189 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
190 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
191 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
192 static int vtnet_set_promisc(struct vtnet_softc *, int);
193 static int vtnet_set_allmulti(struct vtnet_softc *, int);
194 static void vtnet_attach_disable_promisc(struct vtnet_softc *);
195 static void vtnet_rx_filter(struct vtnet_softc *);
196 static void vtnet_rx_filter_mac(struct vtnet_softc *);
197 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
198 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
199 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
200 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
201 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
203 static int vtnet_is_link_up(struct vtnet_softc *);
204 static void vtnet_update_link_status(struct vtnet_softc *);
205 static int vtnet_ifmedia_upd(struct ifnet *);
206 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
207 static void vtnet_get_hwaddr(struct vtnet_softc *);
208 static void vtnet_set_hwaddr(struct vtnet_softc *);
209 static void vtnet_vlan_tag_remove(struct mbuf *);
210 static void vtnet_set_rx_process_limit(struct vtnet_softc *);
211 static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
213 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
214 struct sysctl_oid_list *, struct vtnet_rxq *);
215 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
216 struct sysctl_oid_list *, struct vtnet_txq *);
217 static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
218 static void vtnet_setup_sysctl(struct vtnet_softc *);
220 static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
221 static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
222 static int vtnet_txq_enable_intr(struct vtnet_txq *);
223 static void vtnet_txq_disable_intr(struct vtnet_txq *);
224 static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
225 static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
226 static void vtnet_enable_interrupts(struct vtnet_softc *);
227 static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
228 static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
229 static void vtnet_disable_interrupts(struct vtnet_softc *);
231 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
233 DEBUGNET_DEFINE(vtnet);
236 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
237 static int vtnet_csum_disable = 0;
238 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
239 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
240 &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
241 static int vtnet_tso_disable = 0;
242 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
243 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
244 0, "Disables TCP Segmentation Offload");
245 static int vtnet_lro_disable = 0;
246 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
247 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
248 0, "Disables TCP Large Receive Offload");
249 static int vtnet_mq_disable = 0;
250 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
251 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
252 0, "Disables Multi Queue support");
253 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
254 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
255 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
256 &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
257 static int vtnet_rx_process_limit = 512;
258 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
259 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
260 &vtnet_rx_process_limit, 0,
261 "Limits the number RX segments processed in a single pass");
263 static uma_zone_t vtnet_tx_header_zone;
265 static struct virtio_feature_desc vtnet_feature_desc[] = {
266 { VIRTIO_NET_F_CSUM, "TxChecksum" },
267 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
268 { VIRTIO_NET_F_MAC, "MacAddress" },
269 { VIRTIO_NET_F_GSO, "TxAllGSO" },
270 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
271 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
272 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
273 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
274 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
275 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
276 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
277 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
278 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
279 { VIRTIO_NET_F_STATUS, "Status" },
280 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
281 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
282 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
283 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
284 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
285 { VIRTIO_NET_F_MQ, "Multiqueue" },
286 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
291 static device_method_t vtnet_methods[] = {
292 /* Device methods. */
293 DEVMETHOD(device_probe, vtnet_probe),
294 DEVMETHOD(device_attach, vtnet_attach),
295 DEVMETHOD(device_detach, vtnet_detach),
296 DEVMETHOD(device_suspend, vtnet_suspend),
297 DEVMETHOD(device_resume, vtnet_resume),
298 DEVMETHOD(device_shutdown, vtnet_shutdown),
300 /* VirtIO methods. */
301 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
302 DEVMETHOD(virtio_config_change, vtnet_config_change),
308 #include <dev/netmap/if_vtnet_netmap.h>
309 #endif /* DEV_NETMAP */
311 static driver_t vtnet_driver = {
314 sizeof(struct vtnet_softc)
316 static devclass_t vtnet_devclass;
318 DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
320 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
322 MODULE_VERSION(vtnet, 1);
323 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
325 MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
326 #endif /* DEV_NETMAP */
328 VIRTIO_SIMPLE_PNPTABLE(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
329 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, vtnet);
330 VIRTIO_SIMPLE_PNPINFO(virtio_pci, vtnet);
333 vtnet_modevent(module_t mod, int type, void *unused)
336 static int loaded = 0;
341 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
342 sizeof(struct vtnet_tx_header),
343 NULL, NULL, NULL, NULL, 0, 0);
346 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
351 uma_zdestroy(vtnet_tx_header_zone);
352 vtnet_tx_header_zone = NULL;
366 vtnet_probe(device_t dev)
368 return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
372 vtnet_attach(device_t dev)
374 struct vtnet_softc *sc;
377 sc = device_get_softc(dev);
380 /* Register our feature descriptions. */
381 virtio_set_feature_desc(dev, vtnet_feature_desc);
383 VTNET_CORE_LOCK_INIT(sc);
384 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
386 vtnet_setup_sysctl(sc);
387 vtnet_setup_features(sc);
389 error = vtnet_alloc_rx_filters(sc);
391 device_printf(dev, "cannot allocate Rx filters\n");
395 error = vtnet_alloc_rxtx_queues(sc);
397 device_printf(dev, "cannot allocate queues\n");
401 error = vtnet_alloc_virtqueues(sc);
403 device_printf(dev, "cannot allocate virtqueues\n");
407 error = vtnet_setup_interface(sc);
409 device_printf(dev, "cannot setup interface\n");
413 error = virtio_setup_intr(dev, INTR_TYPE_NET);
415 device_printf(dev, "cannot setup virtqueue interrupts\n");
416 /* BMV: This will crash if during boot! */
417 ether_ifdetach(sc->vtnet_ifp);
422 vtnet_netmap_attach(sc);
423 #endif /* DEV_NETMAP */
425 vtnet_start_taskqueues(sc);
435 vtnet_detach(device_t dev)
437 struct vtnet_softc *sc;
440 sc = device_get_softc(dev);
443 if (device_is_attached(dev)) {
446 VTNET_CORE_UNLOCK(sc);
448 callout_drain(&sc->vtnet_tick_ch);
449 vtnet_drain_taskqueues(sc);
456 #endif /* DEV_NETMAP */
458 vtnet_free_taskqueues(sc);
460 if (sc->vtnet_vlan_attach != NULL) {
461 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
462 sc->vtnet_vlan_attach = NULL;
464 if (sc->vtnet_vlan_detach != NULL) {
465 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
466 sc->vtnet_vlan_detach = NULL;
469 ifmedia_removeall(&sc->vtnet_media);
473 sc->vtnet_ifp = NULL;
476 vtnet_free_rxtx_queues(sc);
477 vtnet_free_rx_filters(sc);
479 if (sc->vtnet_ctrl_vq != NULL)
480 vtnet_free_ctrl_vq(sc);
482 VTNET_CORE_LOCK_DESTROY(sc);
488 vtnet_suspend(device_t dev)
490 struct vtnet_softc *sc;
492 sc = device_get_softc(dev);
496 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
497 VTNET_CORE_UNLOCK(sc);
503 vtnet_resume(device_t dev)
505 struct vtnet_softc *sc;
508 sc = device_get_softc(dev);
512 if (ifp->if_flags & IFF_UP)
513 vtnet_init_locked(sc);
514 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
515 VTNET_CORE_UNLOCK(sc);
521 vtnet_shutdown(device_t dev)
525 * Suspend already does all of what we need to
526 * do here; we just never expect to be resumed.
528 return (vtnet_suspend(dev));
532 vtnet_attach_completed(device_t dev)
535 vtnet_attach_disable_promisc(device_get_softc(dev));
541 vtnet_config_change(device_t dev)
543 struct vtnet_softc *sc;
545 sc = device_get_softc(dev);
548 vtnet_update_link_status(sc);
549 if (sc->vtnet_link_active != 0)
550 vtnet_tx_start_all(sc);
551 VTNET_CORE_UNLOCK(sc);
557 vtnet_negotiate_features(struct vtnet_softc *sc)
560 uint64_t mask, features;
566 * TSO and LRO are only available when their corresponding checksum
567 * offload feature is also negotiated.
569 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
570 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
571 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
573 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
574 mask |= VTNET_TSO_FEATURES;
575 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
576 mask |= VTNET_LRO_FEATURES;
577 #ifndef VTNET_LEGACY_TX
578 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
579 mask |= VIRTIO_NET_F_MQ;
581 mask |= VIRTIO_NET_F_MQ;
584 features = VTNET_FEATURES & ~mask;
585 sc->vtnet_features = virtio_negotiate_features(dev, features);
587 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
588 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
590 * LRO without mergeable buffers requires special care. This
591 * is not ideal because every receive buffer must be large
592 * enough to hold the maximum TCP packet, the Ethernet header,
593 * and the header. This requires up to 34 descriptors with
594 * MCLBYTES clusters. If we do not have indirect descriptors,
595 * LRO is disabled since the virtqueue will not contain very
596 * many receive buffers.
598 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
600 "LRO disabled due to both mergeable buffers and "
601 "indirect descriptors not negotiated\n");
603 features &= ~VTNET_LRO_FEATURES;
605 virtio_negotiate_features(dev, features);
607 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
612 vtnet_setup_features(struct vtnet_softc *sc)
618 vtnet_negotiate_features(sc);
620 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
621 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
622 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
623 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
625 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
626 /* This feature should always be negotiated. */
627 sc->vtnet_flags |= VTNET_FLAG_MAC;
630 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
631 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
632 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
634 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
636 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
637 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
638 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
639 sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
641 sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
643 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
644 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
645 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
646 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
648 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
650 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
651 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
653 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
654 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
655 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
656 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
657 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
658 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
661 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
662 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
663 sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
664 offsetof(struct virtio_net_config, max_virtqueue_pairs));
666 sc->vtnet_max_vq_pairs = 1;
668 if (sc->vtnet_max_vq_pairs > 1) {
670 * Limit the maximum number of queue pairs to the lower of
671 * the number of CPUs and the configured maximum.
672 * The actual number of queues that get used may be less.
676 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
677 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
680 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
681 max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
683 sc->vtnet_requested_vq_pairs = max;
684 sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
691 vtnet_init_rxq(struct vtnet_softc *sc, int id)
693 struct vtnet_rxq *rxq;
695 rxq = &sc->vtnet_rxqs[id];
697 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
698 device_get_nameunit(sc->vtnet_dev), id);
699 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
704 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
705 if (rxq->vtnrx_sg == NULL)
708 TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
709 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
710 taskqueue_thread_enqueue, &rxq->vtnrx_tq);
712 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
716 vtnet_init_txq(struct vtnet_softc *sc, int id)
718 struct vtnet_txq *txq;
720 txq = &sc->vtnet_txqs[id];
722 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
723 device_get_nameunit(sc->vtnet_dev), id);
724 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
729 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
730 if (txq->vtntx_sg == NULL)
733 #ifndef VTNET_LEGACY_TX
734 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
735 M_NOWAIT, &txq->vtntx_mtx);
736 if (txq->vtntx_br == NULL)
739 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
741 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
742 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
743 taskqueue_thread_enqueue, &txq->vtntx_tq);
744 if (txq->vtntx_tq == NULL)
751 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
753 int i, npairs, error;
755 npairs = sc->vtnet_max_vq_pairs;
757 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
759 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
761 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
764 for (i = 0; i < npairs; i++) {
765 error = vtnet_init_rxq(sc, i);
768 error = vtnet_init_txq(sc, i);
773 vtnet_setup_queue_sysctl(sc);
779 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
782 rxq->vtnrx_sc = NULL;
785 if (rxq->vtnrx_sg != NULL) {
786 sglist_free(rxq->vtnrx_sg);
787 rxq->vtnrx_sg = NULL;
790 if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
791 mtx_destroy(&rxq->vtnrx_mtx);
795 vtnet_destroy_txq(struct vtnet_txq *txq)
798 txq->vtntx_sc = NULL;
801 if (txq->vtntx_sg != NULL) {
802 sglist_free(txq->vtntx_sg);
803 txq->vtntx_sg = NULL;
806 #ifndef VTNET_LEGACY_TX
807 if (txq->vtntx_br != NULL) {
808 buf_ring_free(txq->vtntx_br, M_DEVBUF);
809 txq->vtntx_br = NULL;
813 if (mtx_initialized(&txq->vtntx_mtx) != 0)
814 mtx_destroy(&txq->vtntx_mtx);
818 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
822 if (sc->vtnet_rxqs != NULL) {
823 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
824 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
825 free(sc->vtnet_rxqs, M_DEVBUF);
826 sc->vtnet_rxqs = NULL;
829 if (sc->vtnet_txqs != NULL) {
830 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
831 vtnet_destroy_txq(&sc->vtnet_txqs[i]);
832 free(sc->vtnet_txqs, M_DEVBUF);
833 sc->vtnet_txqs = NULL;
838 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
841 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
842 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
843 M_DEVBUF, M_NOWAIT | M_ZERO);
844 if (sc->vtnet_mac_filter == NULL)
848 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
849 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
850 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
851 if (sc->vtnet_vlan_filter == NULL)
859 vtnet_free_rx_filters(struct vtnet_softc *sc)
862 if (sc->vtnet_mac_filter != NULL) {
863 free(sc->vtnet_mac_filter, M_DEVBUF);
864 sc->vtnet_mac_filter = NULL;
867 if (sc->vtnet_vlan_filter != NULL) {
868 free(sc->vtnet_vlan_filter, M_DEVBUF);
869 sc->vtnet_vlan_filter = NULL;
874 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
877 struct vq_alloc_info *info;
878 struct vtnet_rxq *rxq;
879 struct vtnet_txq *txq;
880 int i, idx, flags, nvqs, error;
885 nvqs = sc->vtnet_max_vq_pairs * 2;
886 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
889 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
893 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
894 rxq = &sc->vtnet_rxqs[i];
895 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
896 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
897 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
899 txq = &sc->vtnet_txqs[i];
900 VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
901 vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
902 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
905 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
906 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
907 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
911 * Enable interrupt binding if this is multiqueue. This only matters
912 * when per-vq MSIX is available.
914 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
917 error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
924 vtnet_setup_interface(struct vtnet_softc *sc)
931 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
933 device_printf(dev, "cannot allocate ifnet structure\n");
937 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
938 ifp->if_baudrate = IF_Gbps(10); /* Approx. */
940 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
941 ifp->if_init = vtnet_init;
942 ifp->if_ioctl = vtnet_ioctl;
943 ifp->if_get_counter = vtnet_get_counter;
944 #ifndef VTNET_LEGACY_TX
945 ifp->if_transmit = vtnet_txq_mq_start;
946 ifp->if_qflush = vtnet_qflush;
948 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
949 ifp->if_start = vtnet_start;
950 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
951 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
952 IFQ_SET_READY(&ifp->if_snd);
955 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
957 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
958 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
960 /* Read (or generate) the MAC address for the adapter. */
961 vtnet_get_hwaddr(sc);
963 ether_ifattach(ifp, sc->vtnet_hwaddr);
965 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
966 ifp->if_capabilities |= IFCAP_LINKSTATE;
968 /* Tell the upper layer(s) we support long frames. */
969 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
970 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
972 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
973 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
975 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
976 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
977 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
979 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
980 ifp->if_capabilities |= IFCAP_TSO4;
981 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
982 ifp->if_capabilities |= IFCAP_TSO6;
983 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
984 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
987 if (ifp->if_capabilities & IFCAP_TSO)
988 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
991 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
992 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
994 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
995 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
996 ifp->if_capabilities |= IFCAP_LRO;
999 if (ifp->if_capabilities & IFCAP_HWCSUM) {
1001 * VirtIO does not support VLAN tagging, but we can fake
1002 * it by inserting and removing the 802.1Q header during
1003 * transmit and receive. We are then able to do checksum
1004 * offloading of VLAN frames.
1006 ifp->if_capabilities |=
1007 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1010 ifp->if_capenable = ifp->if_capabilities;
1013 * Capabilities after here are not enabled by default.
1016 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1017 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1019 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1020 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1021 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1022 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1025 vtnet_set_rx_process_limit(sc);
1026 vtnet_set_tx_intr_threshold(sc);
1028 DEBUGNET_SET(ifp, vtnet);
1034 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1037 int frame_size, clsize;
1039 ifp = sc->vtnet_ifp;
1041 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1044 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1048 * Based on the new MTU (and hence frame size) determine which
1049 * cluster size is most appropriate for the receive queues.
1051 if (frame_size <= MCLBYTES) {
1053 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1054 /* Avoid going past 9K jumbos. */
1055 if (frame_size > MJUM9BYTES)
1057 clsize = MJUM9BYTES;
1059 clsize = MJUMPAGESIZE;
1061 ifp->if_mtu = new_mtu;
1062 sc->vtnet_rx_new_clsize = clsize;
1064 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1065 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1066 vtnet_init_locked(sc);
1073 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1075 struct vtnet_softc *sc;
1077 int reinit, mask, error;
1080 ifr = (struct ifreq *) data;
1085 if (ifp->if_mtu != ifr->ifr_mtu) {
1086 VTNET_CORE_LOCK(sc);
1087 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1088 VTNET_CORE_UNLOCK(sc);
1093 VTNET_CORE_LOCK(sc);
1094 if ((ifp->if_flags & IFF_UP) == 0) {
1095 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1097 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1098 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1099 (IFF_PROMISC | IFF_ALLMULTI)) {
1100 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1101 vtnet_rx_filter(sc);
1103 ifp->if_flags |= IFF_PROMISC;
1104 if ((ifp->if_flags ^ sc->vtnet_if_flags)
1110 vtnet_init_locked(sc);
1113 sc->vtnet_if_flags = ifp->if_flags;
1114 VTNET_CORE_UNLOCK(sc);
1119 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1121 VTNET_CORE_LOCK(sc);
1122 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1123 vtnet_rx_filter_mac(sc);
1124 VTNET_CORE_UNLOCK(sc);
1129 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1133 VTNET_CORE_LOCK(sc);
1134 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1136 if (mask & IFCAP_TXCSUM)
1137 ifp->if_capenable ^= IFCAP_TXCSUM;
1138 if (mask & IFCAP_TXCSUM_IPV6)
1139 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1140 if (mask & IFCAP_TSO4)
1141 ifp->if_capenable ^= IFCAP_TSO4;
1142 if (mask & IFCAP_TSO6)
1143 ifp->if_capenable ^= IFCAP_TSO6;
1145 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1146 IFCAP_VLAN_HWFILTER)) {
1147 /* These Rx features require us to renegotiate. */
1150 if (mask & IFCAP_RXCSUM)
1151 ifp->if_capenable ^= IFCAP_RXCSUM;
1152 if (mask & IFCAP_RXCSUM_IPV6)
1153 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1154 if (mask & IFCAP_LRO)
1155 ifp->if_capenable ^= IFCAP_LRO;
1156 if (mask & IFCAP_VLAN_HWFILTER)
1157 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1161 if (mask & IFCAP_VLAN_HWTSO)
1162 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1163 if (mask & IFCAP_VLAN_HWTAGGING)
1164 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1166 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1167 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1168 vtnet_init_locked(sc);
1171 VTNET_CORE_UNLOCK(sc);
1172 VLAN_CAPABILITIES(ifp);
1177 error = ether_ioctl(ifp, cmd, data);
1181 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1187 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1189 struct virtqueue *vq;
1193 error = vtnet_netmap_rxq_populate(rxq);
1196 #endif /* DEV_NETMAP */
1201 for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1202 error = vtnet_rxq_new_buf(rxq);
1208 virtqueue_notify(vq);
1210 * EMSGSIZE signifies the virtqueue did not have enough
1211 * entries available to hold the last mbuf. This is not
1214 if (error == EMSGSIZE)
1222 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1224 struct virtqueue *vq;
1228 int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX,
1230 #else /* !DEV_NETMAP */
1231 int netmap_bufs = 0;
1232 #endif /* !DEV_NETMAP */
1237 while ((m = virtqueue_drain(vq, &last)) != NULL) {
1242 KASSERT(virtqueue_empty(vq),
1243 ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1246 static struct mbuf *
1247 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1249 struct mbuf *m_head, *m_tail, *m;
1252 clsize = sc->vtnet_rx_clsize;
1254 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1255 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1257 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1261 m_head->m_len = clsize;
1264 /* Allocate the rest of the chain. */
1265 for (i = 1; i < nbufs; i++) {
1266 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1275 if (m_tailp != NULL)
1281 sc->vtnet_stats.mbuf_alloc_failed++;
1288 * Slow path for when LRO without mergeable buffers is negotiated.
1291 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1294 struct vtnet_softc *sc;
1295 struct mbuf *m, *m_prev;
1296 struct mbuf *m_new, *m_tail;
1297 int len, clsize, nreplace, error;
1300 clsize = sc->vtnet_rx_clsize;
1310 * Since these mbuf chains are so large, we avoid allocating an
1311 * entire replacement chain if possible. When the received frame
1312 * did not consume the entire chain, the unused mbufs are moved
1313 * to the replacement chain.
1317 * Something is seriously wrong if we received a frame
1318 * larger than the chain. Drop it.
1321 sc->vtnet_stats.rx_frame_too_large++;
1325 /* We always allocate the same cluster size. */
1326 KASSERT(m->m_len == clsize,
1327 ("%s: mbuf size %d is not the cluster size %d",
1328 __func__, m->m_len, clsize));
1330 m->m_len = MIN(m->m_len, len);
1338 KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1339 ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1340 sc->vtnet_rx_nmbufs));
1342 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1343 if (m_new == NULL) {
1344 m_prev->m_len = clsize;
1349 * Move any unused mbufs from the received chain onto the end
1352 if (m_prev->m_next != NULL) {
1353 m_tail->m_next = m_prev->m_next;
1354 m_prev->m_next = NULL;
1357 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1360 * BAD! We could not enqueue the replacement mbuf chain. We
1361 * must restore the m0 chain to the original state if it was
1362 * modified so we can subsequently discard it.
1364 * NOTE: The replacement is suppose to be an identical copy
1365 * to the one just dequeued so this is an unexpected error.
1367 sc->vtnet_stats.rx_enq_replacement_failed++;
1369 if (m_tail->m_next != NULL) {
1370 m_prev->m_next = m_tail->m_next;
1371 m_tail->m_next = NULL;
1374 m_prev->m_len = clsize;
1382 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1384 struct vtnet_softc *sc;
1390 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1391 ("%s: chained mbuf without LRO_NOMRG", __func__));
1393 if (m->m_next == NULL) {
1394 /* Fast-path for the common case of just one mbuf. */
1398 m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1402 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1405 * The new mbuf is suppose to be an identical
1406 * copy of the one just dequeued so this is an
1410 sc->vtnet_stats.rx_enq_replacement_failed++;
1414 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1420 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1422 struct vtnet_softc *sc;
1424 struct vtnet_rx_header *rxhdr;
1430 mdata = mtod(m, uint8_t *);
1432 VTNET_RXQ_LOCK_ASSERT(rxq);
1433 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1434 ("%s: chained mbuf without LRO_NOMRG", __func__));
1435 KASSERT(m->m_len == sc->vtnet_rx_clsize,
1436 ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1437 sc->vtnet_rx_clsize));
1440 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1441 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1442 rxhdr = (struct vtnet_rx_header *) mdata;
1443 sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1444 offset = sizeof(struct vtnet_rx_header);
1448 sglist_append(sg, mdata + offset, m->m_len - offset);
1449 if (m->m_next != NULL) {
1450 error = sglist_append_mbuf(sg, m->m_next);
1454 error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1460 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1462 struct vtnet_softc *sc;
1468 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1472 error = vtnet_rxq_enqueue_buf(rxq, m);
1480 * Use the checksum offset in the VirtIO header to set the
1481 * correct CSUM_* flags.
1484 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1485 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1487 struct vtnet_softc *sc;
1488 #if defined(INET) || defined(INET6)
1489 int offset = hdr->csum_start + hdr->csum_offset;
1494 /* Only do a basic sanity check on the offset. */
1498 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1503 case ETHERTYPE_IPV6:
1504 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1509 sc->vtnet_stats.rx_csum_bad_ethtype++;
1514 * Use the offset to determine the appropriate CSUM_* flags. This is
1515 * a bit dirty, but we can get by with it since the checksum offsets
1516 * happen to be different. We assume the host host does not do IPv4
1517 * header checksum offloading.
1519 switch (hdr->csum_offset) {
1520 case offsetof(struct udphdr, uh_sum):
1521 case offsetof(struct tcphdr, th_sum):
1522 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1523 m->m_pkthdr.csum_data = 0xFFFF;
1526 sc->vtnet_stats.rx_csum_bad_offset++;
1534 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1535 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1537 struct vtnet_softc *sc;
1544 case ETHERTYPE_IP: {
1546 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1548 ip = (struct ip *)(m->m_data + ip_start);
1550 offset = ip_start + (ip->ip_hl << 2);
1555 case ETHERTYPE_IPV6:
1556 if (__predict_false(m->m_len < ip_start +
1557 sizeof(struct ip6_hdr)))
1559 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1560 if (__predict_false(offset < 0))
1565 sc->vtnet_stats.rx_csum_bad_ethtype++;
1571 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1573 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1574 m->m_pkthdr.csum_data = 0xFFFF;
1577 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1579 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1580 m->m_pkthdr.csum_data = 0xFFFF;
1584 * For the remaining protocols, FreeBSD does not support
1585 * checksum offloading, so the checksum will be recomputed.
1588 if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1589 "protocol eth_type=%#x proto=%d csum_start=%d "
1590 "csum_offset=%d\n", __func__, eth_type, proto,
1591 hdr->csum_start, hdr->csum_offset);
1600 * Set the appropriate CSUM_* flags. Unfortunately, the information
1601 * provided is not directly useful to us. The VirtIO header gives the
1602 * offset of the checksum, which is all Linux needs, but this is not
1603 * how FreeBSD does things. We are forced to peek inside the packet
1606 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1607 * could accept the offsets and let the stack figure it out.
1610 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1611 struct virtio_net_hdr *hdr)
1613 struct ether_header *eh;
1614 struct ether_vlan_header *evh;
1618 eh = mtod(m, struct ether_header *);
1619 eth_type = ntohs(eh->ether_type);
1620 if (eth_type == ETHERTYPE_VLAN) {
1621 /* BMV: We should handle nested VLAN tags too. */
1622 evh = mtod(m, struct ether_vlan_header *);
1623 eth_type = ntohs(evh->evl_proto);
1624 offset = sizeof(struct ether_vlan_header);
1626 offset = sizeof(struct ether_header);
1628 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1629 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1631 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1637 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1641 while (--nbufs > 0) {
1642 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1645 vtnet_rxq_discard_buf(rxq, m);
1650 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1655 * Requeue the discarded mbuf. This should always be successful
1656 * since it was just dequeued.
1658 error = vtnet_rxq_enqueue_buf(rxq, m);
1660 ("%s: cannot requeue discarded mbuf %d", __func__, error));
1664 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1666 struct vtnet_softc *sc;
1667 struct virtqueue *vq;
1668 struct mbuf *m, *m_tail;
1675 while (--nbufs > 0) {
1676 m = virtqueue_dequeue(vq, &len);
1678 rxq->vtnrx_stats.vrxs_ierrors++;
1682 if (vtnet_rxq_new_buf(rxq) != 0) {
1683 rxq->vtnrx_stats.vrxs_iqdrops++;
1684 vtnet_rxq_discard_buf(rxq, m);
1686 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1694 m->m_flags &= ~M_PKTHDR;
1696 m_head->m_pkthdr.len += len;
1704 sc->vtnet_stats.rx_mergeable_failed++;
1711 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1712 struct virtio_net_hdr *hdr)
1714 struct vtnet_softc *sc;
1716 struct ether_header *eh;
1719 ifp = sc->vtnet_ifp;
1721 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1722 eh = mtod(m, struct ether_header *);
1723 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1724 vtnet_vlan_tag_remove(m);
1726 * With the 802.1Q header removed, update the
1727 * checksum starting location accordingly.
1729 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1730 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1734 m->m_pkthdr.flowid = rxq->vtnrx_id;
1735 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1738 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1739 * distinction that Linux does. Need to reevaluate if performing
1740 * offloading for the NEEDS_CSUM case is really appropriate.
1742 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1743 VIRTIO_NET_HDR_F_DATA_VALID)) {
1744 if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1745 rxq->vtnrx_stats.vrxs_csum++;
1747 rxq->vtnrx_stats.vrxs_csum_failed++;
1750 rxq->vtnrx_stats.vrxs_ipackets++;
1751 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1753 VTNET_RXQ_UNLOCK(rxq);
1754 (*ifp->if_input)(ifp, m);
1755 VTNET_RXQ_LOCK(rxq);
1759 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1761 struct virtio_net_hdr lhdr, *hdr;
1762 struct vtnet_softc *sc;
1764 struct virtqueue *vq;
1766 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1767 int len, deq, nbufs, adjsz, count;
1771 ifp = sc->vtnet_ifp;
1774 count = sc->vtnet_rx_process_limit;
1776 VTNET_RXQ_LOCK_ASSERT(rxq);
1778 while (count-- > 0) {
1779 m = virtqueue_dequeue(vq, &len);
1784 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1785 rxq->vtnrx_stats.vrxs_ierrors++;
1786 vtnet_rxq_discard_buf(rxq, m);
1790 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1792 adjsz = sizeof(struct vtnet_rx_header);
1794 * Account for our pad inserted between the header
1795 * and the actual start of the frame.
1797 len += VTNET_RX_HEADER_PAD;
1799 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1800 nbufs = mhdr->num_buffers;
1801 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1804 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1805 rxq->vtnrx_stats.vrxs_iqdrops++;
1806 vtnet_rxq_discard_buf(rxq, m);
1808 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1812 m->m_pkthdr.len = len;
1813 m->m_pkthdr.rcvif = ifp;
1814 m->m_pkthdr.csum_flags = 0;
1817 /* Dequeue the rest of chain. */
1818 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1823 * Save copy of header before we strip it. For both mergeable
1824 * and non-mergeable, the header is at the beginning of the
1825 * mbuf data. We no longer need num_buffers, so always use a
1828 * BMV: Is this memcpy() expensive? We know the mbuf data is
1829 * still valid even after the m_adj().
1831 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1834 vtnet_rxq_input(rxq, m, hdr);
1836 /* Must recheck after dropping the Rx lock. */
1837 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1842 virtqueue_notify(vq);
1844 return (count > 0 ? 0 : EAGAIN);
1848 vtnet_rx_vq_intr(void *xrxq)
1850 struct vtnet_softc *sc;
1851 struct vtnet_rxq *rxq;
1857 ifp = sc->vtnet_ifp;
1860 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1862 * Ignore this interrupt. Either this is a spurious interrupt
1863 * or multiqueue without per-VQ MSIX so every queue needs to
1864 * be polled (a brain dead configuration we could try harder
1867 vtnet_rxq_disable_intr(rxq);
1872 if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS)
1874 #endif /* DEV_NETMAP */
1876 VTNET_RXQ_LOCK(rxq);
1879 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1880 VTNET_RXQ_UNLOCK(rxq);
1884 more = vtnet_rxq_eof(rxq);
1885 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1887 vtnet_rxq_disable_intr(rxq);
1889 * This is an occasional condition or race (when !more),
1890 * so retry a few times before scheduling the taskqueue.
1892 if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1895 VTNET_RXQ_UNLOCK(rxq);
1896 rxq->vtnrx_stats.vrxs_rescheduled++;
1897 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1899 VTNET_RXQ_UNLOCK(rxq);
1903 vtnet_rxq_tq_intr(void *xrxq, int pending)
1905 struct vtnet_softc *sc;
1906 struct vtnet_rxq *rxq;
1912 ifp = sc->vtnet_ifp;
1914 VTNET_RXQ_LOCK(rxq);
1916 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1917 VTNET_RXQ_UNLOCK(rxq);
1921 more = vtnet_rxq_eof(rxq);
1922 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1924 vtnet_rxq_disable_intr(rxq);
1925 rxq->vtnrx_stats.vrxs_rescheduled++;
1926 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1929 VTNET_RXQ_UNLOCK(rxq);
1933 vtnet_txq_below_threshold(struct vtnet_txq *txq)
1935 struct vtnet_softc *sc;
1936 struct virtqueue *vq;
1941 return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
1945 vtnet_txq_notify(struct vtnet_txq *txq)
1947 struct virtqueue *vq;
1951 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
1952 virtqueue_notify(vq);
1954 if (vtnet_txq_enable_intr(txq) == 0)
1958 * Drain frames that were completed since last checked. If this
1959 * causes the queue to go above the threshold, the caller should
1960 * continue transmitting.
1962 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
1963 virtqueue_disable_intr(vq);
1971 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
1973 struct virtqueue *vq;
1974 struct vtnet_tx_header *txhdr;
1977 int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX,
1979 #else /* !DEV_NETMAP */
1980 int netmap_bufs = 0;
1981 #endif /* !DEV_NETMAP */
1986 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1988 m_freem(txhdr->vth_mbuf);
1989 uma_zfree(vtnet_tx_header_zone, txhdr);
1993 KASSERT(virtqueue_empty(vq),
1994 ("%s: mbufs remaining in tx queue %p", __func__, txq));
1998 * BMV: Much of this can go away once we finally have offsets in
1999 * the mbuf packet header. Bug andre@.
2002 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
2003 int *etype, int *proto, int *start)
2005 struct vtnet_softc *sc;
2006 struct ether_vlan_header *evh;
2011 evh = mtod(m, struct ether_vlan_header *);
2012 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2013 /* BMV: We should handle nested VLAN tags too. */
2014 *etype = ntohs(evh->evl_proto);
2015 offset = sizeof(struct ether_vlan_header);
2017 *etype = ntohs(evh->evl_encap_proto);
2018 offset = sizeof(struct ether_header);
2023 case ETHERTYPE_IP: {
2024 struct ip *ip, iphdr;
2025 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2026 m_copydata(m, offset, sizeof(struct ip),
2030 ip = (struct ip *)(m->m_data + offset);
2032 *start = offset + (ip->ip_hl << 2);
2037 case ETHERTYPE_IPV6:
2039 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2040 /* Assert the network stack sent us a valid packet. */
2041 KASSERT(*start > offset,
2042 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2043 *start, offset, *proto));
2047 sc->vtnet_stats.tx_csum_bad_ethtype++;
2055 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2056 int offset, struct virtio_net_hdr *hdr)
2058 static struct timeval lastecn;
2060 struct vtnet_softc *sc;
2061 struct tcphdr *tcp, tcphdr;
2065 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
2066 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
2069 tcp = (struct tcphdr *)(m->m_data + offset);
2071 hdr->hdr_len = offset + (tcp->th_off << 2);
2072 hdr->gso_size = m->m_pkthdr.tso_segsz;
2073 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2074 VIRTIO_NET_HDR_GSO_TCPV6;
2076 if (tcp->th_flags & TH_CWR) {
2078 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2079 * ECN support is not on a per-interface basis, but globally via
2080 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2082 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2083 if (ppsratecheck(&lastecn, &curecn, 1))
2084 if_printf(sc->vtnet_ifp,
2085 "TSO with ECN not negotiated with host\n");
2088 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2091 txq->vtntx_stats.vtxs_tso++;
2096 static struct mbuf *
2097 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2098 struct virtio_net_hdr *hdr)
2100 struct vtnet_softc *sc;
2101 int flags, etype, csum_start, proto, error;
2104 flags = m->m_pkthdr.csum_flags;
2106 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2110 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2111 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2113 * We could compare the IP protocol vs the CSUM_ flag too,
2114 * but that really should not be necessary.
2116 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2117 hdr->csum_start = csum_start;
2118 hdr->csum_offset = m->m_pkthdr.csum_data;
2119 txq->vtntx_stats.vtxs_csum++;
2122 if (flags & CSUM_TSO) {
2123 if (__predict_false(proto != IPPROTO_TCP)) {
2124 /* Likely failed to correctly parse the mbuf. */
2125 sc->vtnet_stats.tx_tso_not_tcp++;
2129 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2130 ("%s: mbuf %p TSO without checksum offload %#x",
2131 __func__, m, flags));
2133 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2146 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2147 struct vtnet_tx_header *txhdr)
2149 struct vtnet_softc *sc;
2150 struct virtqueue *vq;
2161 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2162 KASSERT(error == 0 && sg->sg_nseg == 1,
2163 ("%s: error %d adding header to sglist", __func__, error));
2165 error = sglist_append_mbuf(sg, m);
2167 m = m_defrag(m, M_NOWAIT);
2172 sc->vtnet_stats.tx_defragged++;
2174 error = sglist_append_mbuf(sg, m);
2179 txhdr->vth_mbuf = m;
2180 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2185 sc->vtnet_stats.tx_defrag_failed++;
2193 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
2195 struct vtnet_tx_header *txhdr;
2196 struct virtio_net_hdr *hdr;
2203 txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2204 if (txhdr == NULL) {
2211 * Always use the non-mergeable header, regardless if the feature
2212 * was negotiated. For transmit, num_buffers is always zero. The
2213 * vtnet_hdr_size is used to enqueue the correct header size.
2215 hdr = &txhdr->vth_uhdr.hdr;
2217 if (m->m_flags & M_VLANTAG) {
2218 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2219 if ((*m_head = m) == NULL) {
2223 m->m_flags &= ~M_VLANTAG;
2226 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2227 m = vtnet_txq_offload(txq, m, hdr);
2228 if ((*m_head = m) == NULL) {
2234 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2239 uma_zfree(vtnet_tx_header_zone, txhdr);
2244 #ifdef VTNET_LEGACY_TX
2247 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2249 struct vtnet_softc *sc;
2250 struct virtqueue *vq;
2258 VTNET_TXQ_LOCK_ASSERT(txq);
2260 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2261 sc->vtnet_link_active == 0)
2269 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2270 if (virtqueue_full(vq))
2273 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2277 if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
2279 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2284 ETHER_BPF_MTAP(ifp, m0);
2287 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2288 if (tries++ < VTNET_NOTIFY_RETRIES)
2291 txq->vtntx_stats.vtxs_rescheduled++;
2292 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2297 vtnet_start(struct ifnet *ifp)
2299 struct vtnet_softc *sc;
2300 struct vtnet_txq *txq;
2303 txq = &sc->vtnet_txqs[0];
2305 VTNET_TXQ_LOCK(txq);
2306 vtnet_start_locked(txq, ifp);
2307 VTNET_TXQ_UNLOCK(txq);
2310 #else /* !VTNET_LEGACY_TX */
2313 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2315 struct vtnet_softc *sc;
2316 struct virtqueue *vq;
2317 struct buf_ring *br;
2319 int enq, tries, error;
2324 ifp = sc->vtnet_ifp;
2328 VTNET_TXQ_LOCK_ASSERT(txq);
2330 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2331 sc->vtnet_link_active == 0) {
2333 error = drbr_enqueue(ifp, br, m);
2338 error = drbr_enqueue(ifp, br, m);
2348 while ((m = drbr_peek(ifp, br)) != NULL) {
2349 if (virtqueue_full(vq)) {
2350 drbr_putback(ifp, br, m);
2354 if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
2356 drbr_putback(ifp, br, m);
2358 drbr_advance(ifp, br);
2361 drbr_advance(ifp, br);
2364 ETHER_BPF_MTAP(ifp, m);
2367 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2368 if (tries++ < VTNET_NOTIFY_RETRIES)
2371 txq->vtntx_stats.vtxs_rescheduled++;
2372 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2379 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2381 struct vtnet_softc *sc;
2382 struct vtnet_txq *txq;
2383 int i, npairs, error;
2386 npairs = sc->vtnet_act_vq_pairs;
2388 /* check if flowid is set */
2389 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2390 i = m->m_pkthdr.flowid % npairs;
2392 i = curcpu % npairs;
2394 txq = &sc->vtnet_txqs[i];
2396 if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2397 error = vtnet_txq_mq_start_locked(txq, m);
2398 VTNET_TXQ_UNLOCK(txq);
2400 error = drbr_enqueue(ifp, txq->vtntx_br, m);
2401 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2408 vtnet_txq_tq_deferred(void *xtxq, int pending)
2410 struct vtnet_softc *sc;
2411 struct vtnet_txq *txq;
2416 VTNET_TXQ_LOCK(txq);
2417 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2418 vtnet_txq_mq_start_locked(txq, NULL);
2419 VTNET_TXQ_UNLOCK(txq);
2422 #endif /* VTNET_LEGACY_TX */
2425 vtnet_txq_start(struct vtnet_txq *txq)
2427 struct vtnet_softc *sc;
2431 ifp = sc->vtnet_ifp;
2433 #ifdef VTNET_LEGACY_TX
2434 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2435 vtnet_start_locked(txq, ifp);
2437 if (!drbr_empty(ifp, txq->vtntx_br))
2438 vtnet_txq_mq_start_locked(txq, NULL);
2443 vtnet_txq_tq_intr(void *xtxq, int pending)
2445 struct vtnet_softc *sc;
2446 struct vtnet_txq *txq;
2451 ifp = sc->vtnet_ifp;
2453 VTNET_TXQ_LOCK(txq);
2455 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2456 VTNET_TXQ_UNLOCK(txq);
2461 vtnet_txq_start(txq);
2463 VTNET_TXQ_UNLOCK(txq);
2467 vtnet_txq_eof(struct vtnet_txq *txq)
2469 struct virtqueue *vq;
2470 struct vtnet_tx_header *txhdr;
2476 VTNET_TXQ_LOCK_ASSERT(txq);
2478 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2479 m = txhdr->vth_mbuf;
2482 txq->vtntx_stats.vtxs_opackets++;
2483 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2484 if (m->m_flags & M_MCAST)
2485 txq->vtntx_stats.vtxs_omcasts++;
2488 uma_zfree(vtnet_tx_header_zone, txhdr);
2491 if (virtqueue_empty(vq))
2492 txq->vtntx_watchdog = 0;
2498 vtnet_tx_vq_intr(void *xtxq)
2500 struct vtnet_softc *sc;
2501 struct vtnet_txq *txq;
2506 ifp = sc->vtnet_ifp;
2508 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2510 * Ignore this interrupt. Either this is a spurious interrupt
2511 * or multiqueue without per-VQ MSIX so every queue needs to
2512 * be polled (a brain dead configuration we could try harder
2515 vtnet_txq_disable_intr(txq);
2520 if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2522 #endif /* DEV_NETMAP */
2524 VTNET_TXQ_LOCK(txq);
2526 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2527 VTNET_TXQ_UNLOCK(txq);
2532 vtnet_txq_start(txq);
2534 VTNET_TXQ_UNLOCK(txq);
2538 vtnet_tx_start_all(struct vtnet_softc *sc)
2540 struct vtnet_txq *txq;
2543 VTNET_CORE_LOCK_ASSERT(sc);
2545 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2546 txq = &sc->vtnet_txqs[i];
2548 VTNET_TXQ_LOCK(txq);
2549 vtnet_txq_start(txq);
2550 VTNET_TXQ_UNLOCK(txq);
2554 #ifndef VTNET_LEGACY_TX
2556 vtnet_qflush(struct ifnet *ifp)
2558 struct vtnet_softc *sc;
2559 struct vtnet_txq *txq;
2565 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2566 txq = &sc->vtnet_txqs[i];
2568 VTNET_TXQ_LOCK(txq);
2569 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2571 VTNET_TXQ_UNLOCK(txq);
2579 vtnet_watchdog(struct vtnet_txq *txq)
2583 ifp = txq->vtntx_sc->vtnet_ifp;
2585 VTNET_TXQ_LOCK(txq);
2586 if (txq->vtntx_watchdog == 1) {
2588 * Only drain completed frames if the watchdog is about to
2589 * expire. If any frames were drained, there may be enough
2590 * free descriptors now available to transmit queued frames.
2591 * In that case, the timer will immediately be decremented
2592 * below, but the timeout is generous enough that should not
2595 if (vtnet_txq_eof(txq) != 0)
2596 vtnet_txq_start(txq);
2599 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2600 VTNET_TXQ_UNLOCK(txq);
2603 VTNET_TXQ_UNLOCK(txq);
2605 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
2610 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
2611 struct vtnet_txq_stats *txacc)
2614 bzero(rxacc, sizeof(struct vtnet_rxq_stats));
2615 bzero(txacc, sizeof(struct vtnet_txq_stats));
2617 for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2618 struct vtnet_rxq_stats *rxst;
2619 struct vtnet_txq_stats *txst;
2621 rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
2622 rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
2623 rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
2624 rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
2625 rxacc->vrxs_csum += rxst->vrxs_csum;
2626 rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
2627 rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
2629 txst = &sc->vtnet_txqs[i].vtntx_stats;
2630 txacc->vtxs_opackets += txst->vtxs_opackets;
2631 txacc->vtxs_obytes += txst->vtxs_obytes;
2632 txacc->vtxs_csum += txst->vtxs_csum;
2633 txacc->vtxs_tso += txst->vtxs_tso;
2634 txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
2639 vtnet_get_counter(if_t ifp, ift_counter cnt)
2641 struct vtnet_softc *sc;
2642 struct vtnet_rxq_stats rxaccum;
2643 struct vtnet_txq_stats txaccum;
2645 sc = if_getsoftc(ifp);
2646 vtnet_accum_stats(sc, &rxaccum, &txaccum);
2649 case IFCOUNTER_IPACKETS:
2650 return (rxaccum.vrxs_ipackets);
2651 case IFCOUNTER_IQDROPS:
2652 return (rxaccum.vrxs_iqdrops);
2653 case IFCOUNTER_IERRORS:
2654 return (rxaccum.vrxs_ierrors);
2655 case IFCOUNTER_OPACKETS:
2656 return (txaccum.vtxs_opackets);
2657 #ifndef VTNET_LEGACY_TX
2658 case IFCOUNTER_OBYTES:
2659 return (txaccum.vtxs_obytes);
2660 case IFCOUNTER_OMCASTS:
2661 return (txaccum.vtxs_omcasts);
2664 return (if_get_counter_default(ifp, cnt));
2669 vtnet_tick(void *xsc)
2671 struct vtnet_softc *sc;
2676 ifp = sc->vtnet_ifp;
2679 VTNET_CORE_LOCK_ASSERT(sc);
2681 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2682 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2684 if (timedout != 0) {
2685 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2686 vtnet_init_locked(sc);
2688 callout_schedule(&sc->vtnet_tick_ch, hz);
2692 vtnet_start_taskqueues(struct vtnet_softc *sc)
2695 struct vtnet_rxq *rxq;
2696 struct vtnet_txq *txq;
2699 dev = sc->vtnet_dev;
2702 * Errors here are very difficult to recover from - we cannot
2703 * easily fail because, if this is during boot, we will hang
2704 * when freeing any successfully started taskqueues because
2705 * the scheduler isn't up yet.
2707 * Most drivers just ignore the return value - it only fails
2708 * with ENOMEM so an error is not likely.
2710 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2711 rxq = &sc->vtnet_rxqs[i];
2712 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2713 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2715 device_printf(dev, "failed to start rx taskq %d\n",
2719 txq = &sc->vtnet_txqs[i];
2720 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2721 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2723 device_printf(dev, "failed to start tx taskq %d\n",
2730 vtnet_free_taskqueues(struct vtnet_softc *sc)
2732 struct vtnet_rxq *rxq;
2733 struct vtnet_txq *txq;
2736 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2737 rxq = &sc->vtnet_rxqs[i];
2738 if (rxq->vtnrx_tq != NULL) {
2739 taskqueue_free(rxq->vtnrx_tq);
2740 rxq->vtnrx_tq = NULL;
2743 txq = &sc->vtnet_txqs[i];
2744 if (txq->vtntx_tq != NULL) {
2745 taskqueue_free(txq->vtntx_tq);
2746 txq->vtntx_tq = NULL;
2752 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2754 struct vtnet_rxq *rxq;
2755 struct vtnet_txq *txq;
2758 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2759 rxq = &sc->vtnet_rxqs[i];
2760 if (rxq->vtnrx_tq != NULL)
2761 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2763 txq = &sc->vtnet_txqs[i];
2764 if (txq->vtntx_tq != NULL) {
2765 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2766 #ifndef VTNET_LEGACY_TX
2767 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2774 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2776 struct vtnet_rxq *rxq;
2777 struct vtnet_txq *txq;
2780 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2781 rxq = &sc->vtnet_rxqs[i];
2782 vtnet_rxq_free_mbufs(rxq);
2784 txq = &sc->vtnet_txqs[i];
2785 vtnet_txq_free_mbufs(txq);
2790 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2792 struct vtnet_rxq *rxq;
2793 struct vtnet_txq *txq;
2797 * Lock and unlock the per-queue mutex so we known the stop
2798 * state is visible. Doing only the active queues should be
2799 * sufficient, but it does not cost much extra to do all the
2800 * queues. Note we hold the core mutex here too.
2802 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2803 rxq = &sc->vtnet_rxqs[i];
2804 VTNET_RXQ_LOCK(rxq);
2805 VTNET_RXQ_UNLOCK(rxq);
2807 txq = &sc->vtnet_txqs[i];
2808 VTNET_TXQ_LOCK(txq);
2809 VTNET_TXQ_UNLOCK(txq);
2814 vtnet_stop(struct vtnet_softc *sc)
2819 dev = sc->vtnet_dev;
2820 ifp = sc->vtnet_ifp;
2822 VTNET_CORE_LOCK_ASSERT(sc);
2824 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2825 sc->vtnet_link_active = 0;
2826 callout_stop(&sc->vtnet_tick_ch);
2828 /* Only advisory. */
2829 vtnet_disable_interrupts(sc);
2832 * Stop the host adapter. This resets it to the pre-initialized
2833 * state. It will not generate any interrupts until after it is
2837 vtnet_stop_rendezvous(sc);
2839 /* Free any mbufs left in the virtqueues. */
2840 vtnet_drain_rxtx_queues(sc);
2844 vtnet_virtio_reinit(struct vtnet_softc *sc)
2851 dev = sc->vtnet_dev;
2852 ifp = sc->vtnet_ifp;
2853 features = sc->vtnet_features;
2857 mask |= IFCAP_RXCSUM;
2860 mask |= IFCAP_RXCSUM_IPV6;
2864 * Re-negotiate with the host, removing any disabled receive
2865 * features. Transmit features are disabled only on our side
2866 * via if_capenable and if_hwassist.
2869 if (ifp->if_capabilities & mask) {
2871 * We require both IPv4 and IPv6 offloading to be enabled
2872 * in order to negotiated it: VirtIO does not distinguish
2875 if ((ifp->if_capenable & mask) != mask)
2876 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2879 if (ifp->if_capabilities & IFCAP_LRO) {
2880 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2881 features &= ~VTNET_LRO_FEATURES;
2884 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2885 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2886 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2889 error = virtio_reinit(dev, features);
2891 device_printf(dev, "virtio reinit error %d\n", error);
2897 vtnet_init_rx_filters(struct vtnet_softc *sc)
2901 ifp = sc->vtnet_ifp;
2903 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2904 /* Restore promiscuous and all-multicast modes. */
2905 vtnet_rx_filter(sc);
2906 /* Restore filtered MAC addresses. */
2907 vtnet_rx_filter_mac(sc);
2910 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2911 vtnet_rx_filter_vlan(sc);
2915 vtnet_init_rx_queues(struct vtnet_softc *sc)
2918 struct vtnet_rxq *rxq;
2919 int i, clsize, error;
2921 dev = sc->vtnet_dev;
2924 * Use the new cluster size if one has been set (via a MTU
2925 * change). Otherwise, use the standard 2K clusters.
2927 * BMV: It might make sense to use page sized clusters as
2928 * the default (depending on the features negotiated).
2930 if (sc->vtnet_rx_new_clsize != 0) {
2931 clsize = sc->vtnet_rx_new_clsize;
2932 sc->vtnet_rx_new_clsize = 0;
2936 sc->vtnet_rx_clsize = clsize;
2937 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
2939 KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2940 sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2941 ("%s: too many rx mbufs %d for %d segments", __func__,
2942 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2944 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2945 rxq = &sc->vtnet_rxqs[i];
2947 /* Hold the lock to satisfy asserts. */
2948 VTNET_RXQ_LOCK(rxq);
2949 error = vtnet_rxq_populate(rxq);
2950 VTNET_RXQ_UNLOCK(rxq);
2954 "cannot allocate mbufs for Rx queue %d\n", i);
2963 vtnet_init_tx_queues(struct vtnet_softc *sc)
2965 struct vtnet_txq *txq;
2968 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2969 txq = &sc->vtnet_txqs[i];
2970 txq->vtntx_watchdog = 0;
2977 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
2981 error = vtnet_init_rx_queues(sc);
2985 error = vtnet_init_tx_queues(sc);
2993 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
2998 dev = sc->vtnet_dev;
3000 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3001 sc->vtnet_act_vq_pairs = 1;
3005 npairs = sc->vtnet_requested_vq_pairs;
3007 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3009 "cannot set active queue pairs to %d\n", npairs);
3013 sc->vtnet_act_vq_pairs = npairs;
3017 vtnet_reinit(struct vtnet_softc *sc)
3022 ifp = sc->vtnet_ifp;
3024 /* Use the current MAC address. */
3025 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3026 vtnet_set_hwaddr(sc);
3028 vtnet_set_active_vq_pairs(sc);
3030 ifp->if_hwassist = 0;
3031 if (ifp->if_capenable & IFCAP_TXCSUM)
3032 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3033 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3034 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
3035 if (ifp->if_capenable & IFCAP_TSO4)
3036 ifp->if_hwassist |= CSUM_IP_TSO;
3037 if (ifp->if_capenable & IFCAP_TSO6)
3038 ifp->if_hwassist |= CSUM_IP6_TSO;
3040 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3041 vtnet_init_rx_filters(sc);
3043 error = vtnet_init_rxtx_queues(sc);
3047 vtnet_enable_interrupts(sc);
3048 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3054 vtnet_init_locked(struct vtnet_softc *sc)
3059 dev = sc->vtnet_dev;
3060 ifp = sc->vtnet_ifp;
3062 VTNET_CORE_LOCK_ASSERT(sc);
3064 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3069 /* Reinitialize with the host. */
3070 if (vtnet_virtio_reinit(sc) != 0)
3073 if (vtnet_reinit(sc) != 0)
3076 virtio_reinit_complete(dev);
3078 vtnet_update_link_status(sc);
3079 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3088 vtnet_init(void *xsc)
3090 struct vtnet_softc *sc;
3094 VTNET_CORE_LOCK(sc);
3095 vtnet_init_locked(sc);
3096 VTNET_CORE_UNLOCK(sc);
3100 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3102 struct virtqueue *vq;
3104 vq = sc->vtnet_ctrl_vq;
3107 * The control virtqueue is only polled and therefore it should
3110 KASSERT(virtqueue_empty(vq),
3111 ("%s: ctrl vq %p not empty", __func__, vq));
3115 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3116 struct sglist *sg, int readable, int writable)
3118 struct virtqueue *vq;
3120 vq = sc->vtnet_ctrl_vq;
3122 VTNET_CORE_LOCK_ASSERT(sc);
3123 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3124 ("%s: CTRL_VQ feature not negotiated", __func__));
3126 if (!virtqueue_empty(vq))
3128 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3132 * Poll for the response, but the command is likely already
3133 * done when we return from the notify.
3135 virtqueue_notify(vq);
3136 virtqueue_poll(vq, NULL);
3140 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3142 struct virtio_net_ctrl_hdr hdr __aligned(2);
3143 struct sglist_seg segs[3];
3148 hdr.class = VIRTIO_NET_CTRL_MAC;
3149 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3150 ack = VIRTIO_NET_ERR;
3152 sglist_init(&sg, 3, segs);
3154 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3155 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3156 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3157 KASSERT(error == 0 && sg.sg_nseg == 3,
3158 ("%s: error %d adding set MAC msg to sglist", __func__, error));
3160 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3162 return (ack == VIRTIO_NET_OK ? 0 : EIO);
3166 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3168 struct sglist_seg segs[3];
3171 struct virtio_net_ctrl_hdr hdr;
3173 struct virtio_net_ctrl_mq mq;
3179 s.hdr.class = VIRTIO_NET_CTRL_MQ;
3180 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3181 s.mq.virtqueue_pairs = npairs;
3182 s.ack = VIRTIO_NET_ERR;
3184 sglist_init(&sg, 3, segs);
3186 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3187 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3188 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3189 KASSERT(error == 0 && sg.sg_nseg == 3,
3190 ("%s: error %d adding MQ message to sglist", __func__, error));
3192 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3194 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3198 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3200 struct sglist_seg segs[3];
3203 struct virtio_net_ctrl_hdr hdr;
3211 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3212 ("%s: CTRL_RX feature not negotiated", __func__));
3214 s.hdr.class = VIRTIO_NET_CTRL_RX;
3217 s.ack = VIRTIO_NET_ERR;
3219 sglist_init(&sg, 3, segs);
3221 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3222 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3223 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3224 KASSERT(error == 0 && sg.sg_nseg == 3,
3225 ("%s: error %d adding Rx message to sglist", __func__, error));
3227 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3229 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3233 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3236 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3240 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3243 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3247 * The device defaults to promiscuous mode for backwards compatibility.
3248 * Turn it off at attach time if possible.
3251 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3255 ifp = sc->vtnet_ifp;
3257 VTNET_CORE_LOCK(sc);
3258 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3259 ifp->if_flags |= IFF_PROMISC;
3260 } else if (vtnet_set_promisc(sc, 0) != 0) {
3261 ifp->if_flags |= IFF_PROMISC;
3262 device_printf(sc->vtnet_dev,
3263 "cannot disable default promiscuous mode\n");
3265 VTNET_CORE_UNLOCK(sc);
3269 vtnet_rx_filter(struct vtnet_softc *sc)
3274 dev = sc->vtnet_dev;
3275 ifp = sc->vtnet_ifp;
3277 VTNET_CORE_LOCK_ASSERT(sc);
3279 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3280 device_printf(dev, "cannot %s promiscuous mode\n",
3281 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3283 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3284 device_printf(dev, "cannot %s all-multicast mode\n",
3285 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3289 vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
3291 struct vtnet_softc *sc = arg;
3293 if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3296 if (ucnt < VTNET_MAX_MAC_ENTRIES)
3298 &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
3305 vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
3307 struct vtnet_mac_filter *filter = arg;
3309 if (mcnt < VTNET_MAX_MAC_ENTRIES)
3310 bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
3317 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3319 struct virtio_net_ctrl_hdr hdr __aligned(2);
3320 struct vtnet_mac_filter *filter;
3321 struct sglist_seg segs[4];
3324 bool promisc, allmulti;
3329 ifp = sc->vtnet_ifp;
3330 filter = sc->vtnet_mac_filter;
3332 VTNET_CORE_LOCK_ASSERT(sc);
3333 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3334 ("%s: CTRL_RX feature not negotiated", __func__));
3336 /* Unicast MAC addresses: */
3337 ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
3338 promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
3341 filter->vmf_unicast.nentries = 0;
3342 if_printf(ifp, "more than %d MAC addresses assigned, "
3343 "falling back to promiscuous mode\n",
3344 VTNET_MAX_MAC_ENTRIES);
3346 filter->vmf_unicast.nentries = ucnt;
3348 /* Multicast MAC addresses: */
3349 mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
3350 allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
3353 filter->vmf_multicast.nentries = 0;
3354 if_printf(ifp, "more than %d multicast MAC addresses "
3355 "assigned, falling back to all-multicast mode\n",
3356 VTNET_MAX_MAC_ENTRIES);
3358 filter->vmf_multicast.nentries = mcnt;
3360 if (promisc && allmulti)
3363 hdr.class = VIRTIO_NET_CTRL_MAC;
3364 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3365 ack = VIRTIO_NET_ERR;
3367 sglist_init(&sg, 4, segs);
3369 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3370 error |= sglist_append(&sg, &filter->vmf_unicast,
3371 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3372 error |= sglist_append(&sg, &filter->vmf_multicast,
3373 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3374 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3375 KASSERT(error == 0 && sg.sg_nseg == 4,
3376 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3378 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3380 if (ack != VIRTIO_NET_OK)
3381 if_printf(ifp, "error setting host MAC filter table\n");
3384 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3385 if_printf(ifp, "cannot enable promiscuous mode\n");
3386 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3387 if_printf(ifp, "cannot enable all-multicast mode\n");
3391 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3393 struct sglist_seg segs[3];
3396 struct virtio_net_ctrl_hdr hdr;
3404 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3405 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3407 s.ack = VIRTIO_NET_ERR;
3409 sglist_init(&sg, 3, segs);
3411 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3412 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3413 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3414 KASSERT(error == 0 && sg.sg_nseg == 3,
3415 ("%s: error %d adding VLAN message to sglist", __func__, error));
3417 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3419 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3423 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3429 VTNET_CORE_LOCK_ASSERT(sc);
3430 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3431 ("%s: VLAN_FILTER feature not negotiated", __func__));
3433 /* Enable the filter for each configured VLAN. */
3434 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3435 w = sc->vtnet_vlan_filter[i];
3437 while ((bit = ffs(w) - 1) != -1) {
3439 tag = sizeof(w) * CHAR_BIT * i + bit;
3441 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3442 device_printf(sc->vtnet_dev,
3443 "cannot enable VLAN %d filter\n", tag);
3450 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3455 ifp = sc->vtnet_ifp;
3456 idx = (tag >> 5) & 0x7F;
3459 if (tag == 0 || tag > 4095)
3462 VTNET_CORE_LOCK(sc);
3465 sc->vtnet_vlan_filter[idx] |= (1 << bit);
3467 sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3469 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3470 ifp->if_drv_flags & IFF_DRV_RUNNING &&
3471 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3472 device_printf(sc->vtnet_dev,
3473 "cannot %s VLAN %d %s the host filter table\n",
3474 add ? "add" : "remove", tag, add ? "to" : "from");
3477 VTNET_CORE_UNLOCK(sc);
3481 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3484 if (ifp->if_softc != arg)
3487 vtnet_update_vlan_filter(arg, 1, tag);
3491 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3494 if (ifp->if_softc != arg)
3497 vtnet_update_vlan_filter(arg, 0, tag);
3501 vtnet_is_link_up(struct vtnet_softc *sc)
3507 dev = sc->vtnet_dev;
3508 ifp = sc->vtnet_ifp;
3510 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3511 status = VIRTIO_NET_S_LINK_UP;
3513 status = virtio_read_dev_config_2(dev,
3514 offsetof(struct virtio_net_config, status));
3516 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3520 vtnet_update_link_status(struct vtnet_softc *sc)
3525 ifp = sc->vtnet_ifp;
3527 VTNET_CORE_LOCK_ASSERT(sc);
3528 link = vtnet_is_link_up(sc);
3530 /* Notify if the link status has changed. */
3531 if (link != 0 && sc->vtnet_link_active == 0) {
3532 sc->vtnet_link_active = 1;
3533 if_link_state_change(ifp, LINK_STATE_UP);
3534 } else if (link == 0 && sc->vtnet_link_active != 0) {
3535 sc->vtnet_link_active = 0;
3536 if_link_state_change(ifp, LINK_STATE_DOWN);
3541 vtnet_ifmedia_upd(struct ifnet *ifp)
3543 struct vtnet_softc *sc;
3544 struct ifmedia *ifm;
3547 ifm = &sc->vtnet_media;
3549 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3556 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3558 struct vtnet_softc *sc;
3562 ifmr->ifm_status = IFM_AVALID;
3563 ifmr->ifm_active = IFM_ETHER;
3565 VTNET_CORE_LOCK(sc);
3566 if (vtnet_is_link_up(sc) != 0) {
3567 ifmr->ifm_status |= IFM_ACTIVE;
3568 ifmr->ifm_active |= VTNET_MEDIATYPE;
3570 ifmr->ifm_active |= IFM_NONE;
3571 VTNET_CORE_UNLOCK(sc);
3575 vtnet_set_hwaddr(struct vtnet_softc *sc)
3580 dev = sc->vtnet_dev;
3582 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3583 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3584 device_printf(dev, "unable to set MAC address\n");
3585 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3586 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3587 virtio_write_dev_config_1(dev,
3588 offsetof(struct virtio_net_config, mac) + i,
3589 sc->vtnet_hwaddr[i]);
3595 vtnet_get_hwaddr(struct vtnet_softc *sc)
3600 dev = sc->vtnet_dev;
3602 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3604 * Generate a random locally administered unicast address.
3606 * It would be nice to generate the same MAC address across
3607 * reboots, but it seems all the hosts currently available
3608 * support the MAC feature, so this isn't too important.
3610 sc->vtnet_hwaddr[0] = 0xB2;
3611 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3612 vtnet_set_hwaddr(sc);
3616 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3617 sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3618 offsetof(struct virtio_net_config, mac) + i);
3623 vtnet_vlan_tag_remove(struct mbuf *m)
3625 struct ether_vlan_header *evh;
3627 evh = mtod(m, struct ether_vlan_header *);
3628 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3629 m->m_flags |= M_VLANTAG;
3631 /* Strip the 802.1Q header. */
3632 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3633 ETHER_HDR_LEN - ETHER_TYPE_LEN);
3634 m_adj(m, ETHER_VLAN_ENCAP_LEN);
3638 vtnet_set_rx_process_limit(struct vtnet_softc *sc)
3642 limit = vtnet_tunable_int(sc, "rx_process_limit",
3643 vtnet_rx_process_limit);
3646 sc->vtnet_rx_process_limit = limit;
3650 vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3654 size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3657 * The Tx interrupt is disabled until the queue free count falls
3658 * below our threshold. Completed frames are drained from the Tx
3659 * virtqueue before transmitting new frames and in the watchdog
3660 * callout, so the frequency of Tx interrupts is greatly reduced,
3661 * at the cost of not freeing mbufs as quickly as they otherwise
3664 * N.B. We assume all the Tx queues are the same size.
3669 * Without indirect descriptors, leave enough room for the most
3670 * segments we handle.
3672 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3673 thresh < sc->vtnet_tx_nsegs)
3674 thresh = sc->vtnet_tx_nsegs;
3676 sc->vtnet_tx_intr_thresh = thresh;
3680 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3681 struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3683 struct sysctl_oid *node;
3684 struct sysctl_oid_list *list;
3685 struct vtnet_rxq_stats *stats;
3688 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3689 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3690 CTLFLAG_RD, NULL, "Receive Queue");
3691 list = SYSCTL_CHILDREN(node);
3693 stats = &rxq->vtnrx_stats;
3695 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3696 &stats->vrxs_ipackets, "Receive packets");
3697 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3698 &stats->vrxs_ibytes, "Receive bytes");
3699 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3700 &stats->vrxs_iqdrops, "Receive drops");
3701 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3702 &stats->vrxs_ierrors, "Receive errors");
3703 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3704 &stats->vrxs_csum, "Receive checksum offloaded");
3705 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3706 &stats->vrxs_csum_failed, "Receive checksum offload failed");
3707 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3708 &stats->vrxs_rescheduled,
3709 "Receive interrupt handler rescheduled");
3713 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3714 struct sysctl_oid_list *child, struct vtnet_txq *txq)
3716 struct sysctl_oid *node;
3717 struct sysctl_oid_list *list;
3718 struct vtnet_txq_stats *stats;
3721 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3722 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3723 CTLFLAG_RD, NULL, "Transmit Queue");
3724 list = SYSCTL_CHILDREN(node);
3726 stats = &txq->vtntx_stats;
3728 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3729 &stats->vtxs_opackets, "Transmit packets");
3730 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3731 &stats->vtxs_obytes, "Transmit bytes");
3732 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3733 &stats->vtxs_omcasts, "Transmit multicasts");
3734 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3735 &stats->vtxs_csum, "Transmit checksum offloaded");
3736 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3737 &stats->vtxs_tso, "Transmit segmentation offloaded");
3738 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3739 &stats->vtxs_rescheduled,
3740 "Transmit interrupt handler rescheduled");
3744 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3747 struct sysctl_ctx_list *ctx;
3748 struct sysctl_oid *tree;
3749 struct sysctl_oid_list *child;
3752 dev = sc->vtnet_dev;
3753 ctx = device_get_sysctl_ctx(dev);
3754 tree = device_get_sysctl_tree(dev);
3755 child = SYSCTL_CHILDREN(tree);
3757 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3758 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3759 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3764 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3765 struct sysctl_oid_list *child, struct vtnet_softc *sc)
3767 struct vtnet_statistics *stats;
3768 struct vtnet_rxq_stats rxaccum;
3769 struct vtnet_txq_stats txaccum;
3771 vtnet_accum_stats(sc, &rxaccum, &txaccum);
3773 stats = &sc->vtnet_stats;
3774 stats->rx_csum_offloaded = rxaccum.vrxs_csum;
3775 stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
3776 stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
3777 stats->tx_csum_offloaded = txaccum.vtxs_csum;
3778 stats->tx_tso_offloaded = txaccum.vtxs_tso;
3779 stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
3781 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3782 CTLFLAG_RD, &stats->mbuf_alloc_failed,
3783 "Mbuf cluster allocation failures");
3785 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3786 CTLFLAG_RD, &stats->rx_frame_too_large,
3787 "Received frame larger than the mbuf chain");
3788 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3789 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3790 "Enqueuing the replacement receive mbuf failed");
3791 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3792 CTLFLAG_RD, &stats->rx_mergeable_failed,
3793 "Mergeable buffers receive failures");
3794 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3795 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3796 "Received checksum offloaded buffer with unsupported "
3798 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3799 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3800 "Received checksum offloaded buffer with incorrect IP protocol");
3801 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3802 CTLFLAG_RD, &stats->rx_csum_bad_offset,
3803 "Received checksum offloaded buffer with incorrect offset");
3804 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3805 CTLFLAG_RD, &stats->rx_csum_bad_proto,
3806 "Received checksum offloaded buffer with incorrect protocol");
3807 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3808 CTLFLAG_RD, &stats->rx_csum_failed,
3809 "Received buffer checksum offload failed");
3810 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3811 CTLFLAG_RD, &stats->rx_csum_offloaded,
3812 "Received buffer checksum offload succeeded");
3813 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3814 CTLFLAG_RD, &stats->rx_task_rescheduled,
3815 "Times the receive interrupt task rescheduled itself");
3817 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3818 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3819 "Aborted transmit of checksum offloaded buffer with unknown "
3821 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3822 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3823 "Aborted transmit of TSO buffer with unknown Ethernet type");
3824 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3825 CTLFLAG_RD, &stats->tx_tso_not_tcp,
3826 "Aborted transmit of TSO buffer with non TCP protocol");
3827 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3828 CTLFLAG_RD, &stats->tx_defragged,
3829 "Transmit mbufs defragged");
3830 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
3831 CTLFLAG_RD, &stats->tx_defrag_failed,
3832 "Aborted transmit of buffer because defrag failed");
3833 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3834 CTLFLAG_RD, &stats->tx_csum_offloaded,
3835 "Offloaded checksum of transmitted buffer");
3836 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3837 CTLFLAG_RD, &stats->tx_tso_offloaded,
3838 "Segmentation offload of transmitted buffer");
3839 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3840 CTLFLAG_RD, &stats->tx_task_rescheduled,
3841 "Times the transmit interrupt task rescheduled itself");
3845 vtnet_setup_sysctl(struct vtnet_softc *sc)
3848 struct sysctl_ctx_list *ctx;
3849 struct sysctl_oid *tree;
3850 struct sysctl_oid_list *child;
3852 dev = sc->vtnet_dev;
3853 ctx = device_get_sysctl_ctx(dev);
3854 tree = device_get_sysctl_tree(dev);
3855 child = SYSCTL_CHILDREN(tree);
3857 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3858 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3859 "Maximum number of supported virtqueue pairs");
3860 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
3861 CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
3862 "Requested number of virtqueue pairs");
3863 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3864 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3865 "Number of active virtqueue pairs");
3867 vtnet_setup_stat_sysctl(ctx, child, sc);
3871 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3874 return (virtqueue_enable_intr(rxq->vtnrx_vq));
3878 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3881 virtqueue_disable_intr(rxq->vtnrx_vq);
3885 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3887 struct virtqueue *vq;
3891 if (vtnet_txq_below_threshold(txq) != 0)
3892 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
3895 * The free count is above our threshold. Keep the Tx interrupt
3896 * disabled until the queue is fuller.
3902 vtnet_txq_disable_intr(struct vtnet_txq *txq)
3905 virtqueue_disable_intr(txq->vtntx_vq);
3909 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3913 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3914 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
3918 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
3922 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3923 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
3927 vtnet_enable_interrupts(struct vtnet_softc *sc)
3930 vtnet_enable_rx_interrupts(sc);
3931 vtnet_enable_tx_interrupts(sc);
3935 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
3939 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3940 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3944 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
3948 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3949 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3953 vtnet_disable_interrupts(struct vtnet_softc *sc)
3956 vtnet_disable_rx_interrupts(sc);
3957 vtnet_disable_tx_interrupts(sc);
3961 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
3965 snprintf(path, sizeof(path),
3966 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
3967 TUNABLE_INT_FETCH(path, &def);
3974 vtnet_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
3976 struct vtnet_softc *sc;
3978 sc = if_getsoftc(ifp);
3980 VTNET_CORE_LOCK(sc);
3981 *nrxr = sc->vtnet_max_vq_pairs;
3982 *ncl = DEBUGNET_MAX_IN_FLIGHT;
3983 *clsize = sc->vtnet_rx_clsize;
3984 VTNET_CORE_UNLOCK(sc);
3987 * We need to allocate from this zone in the transmit path, so ensure
3988 * that we have at least one item per header available.
3989 * XXX add a separate zone like we do for mbufs? otherwise we may alloc
3992 uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
3993 uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
3997 vtnet_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4002 vtnet_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4004 struct vtnet_softc *sc;
4005 struct vtnet_txq *txq;
4008 sc = if_getsoftc(ifp);
4009 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4013 txq = &sc->vtnet_txqs[0];
4014 error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
4016 (void)vtnet_txq_notify(txq);
4021 vtnet_debugnet_poll(struct ifnet *ifp, int count)
4023 struct vtnet_softc *sc;
4026 sc = if_getsoftc(ifp);
4027 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4031 (void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4032 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4033 (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4036 #endif /* DEBUGNET */