2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO network devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/sockio.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <sys/random.h>
45 #include <sys/sglist.h>
47 #include <sys/mutex.h>
48 #include <sys/taskqueue.h>
50 #include <machine/smp.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_types.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #include <netinet/ip6.h>
69 #include <netinet6/ip6_var.h>
70 #include <netinet/udp.h>
71 #include <netinet/tcp.h>
72 #include <netinet/sctp.h>
73 #include <netinet/netdump/netdump.h>
75 #include <machine/bus.h>
76 #include <machine/resource.h>
80 #include <dev/virtio/virtio.h>
81 #include <dev/virtio/virtqueue.h>
82 #include <dev/virtio/network/virtio_net.h>
83 #include <dev/virtio/network/if_vtnetvar.h>
84 #include "virtio_if.h"
87 #include "opt_inet6.h"
89 static int vtnet_modevent(module_t, int, void *);
91 static int vtnet_probe(device_t);
92 static int vtnet_attach(device_t);
93 static int vtnet_detach(device_t);
94 static int vtnet_suspend(device_t);
95 static int vtnet_resume(device_t);
96 static int vtnet_shutdown(device_t);
97 static int vtnet_attach_completed(device_t);
98 static int vtnet_config_change(device_t);
100 static void vtnet_negotiate_features(struct vtnet_softc *);
101 static void vtnet_setup_features(struct vtnet_softc *);
102 static int vtnet_init_rxq(struct vtnet_softc *, int);
103 static int vtnet_init_txq(struct vtnet_softc *, int);
104 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
105 static void vtnet_free_rxtx_queues(struct vtnet_softc *);
106 static int vtnet_alloc_rx_filters(struct vtnet_softc *);
107 static void vtnet_free_rx_filters(struct vtnet_softc *);
108 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
109 static int vtnet_setup_interface(struct vtnet_softc *);
110 static int vtnet_change_mtu(struct vtnet_softc *, int);
111 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
112 static uint64_t vtnet_get_counter(struct ifnet *, ift_counter);
114 static int vtnet_rxq_populate(struct vtnet_rxq *);
115 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
117 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
118 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
120 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
121 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
122 static int vtnet_rxq_new_buf(struct vtnet_rxq *);
123 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
124 struct virtio_net_hdr *);
125 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
126 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
127 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
128 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
129 struct virtio_net_hdr *);
130 static int vtnet_rxq_eof(struct vtnet_rxq *);
131 static void vtnet_rx_vq_intr(void *);
132 static void vtnet_rxq_tq_intr(void *, int);
134 static int vtnet_txq_below_threshold(struct vtnet_txq *);
135 static int vtnet_txq_notify(struct vtnet_txq *);
136 static void vtnet_txq_free_mbufs(struct vtnet_txq *);
137 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
138 int *, int *, int *);
139 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
140 int, struct virtio_net_hdr *);
142 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
143 struct virtio_net_hdr *);
144 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
145 struct vtnet_tx_header *);
146 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
147 #ifdef VTNET_LEGACY_TX
148 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
149 static void vtnet_start(struct ifnet *);
151 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
152 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
153 static void vtnet_txq_tq_deferred(void *, int);
155 static void vtnet_txq_start(struct vtnet_txq *);
156 static void vtnet_txq_tq_intr(void *, int);
157 static int vtnet_txq_eof(struct vtnet_txq *);
158 static void vtnet_tx_vq_intr(void *);
159 static void vtnet_tx_start_all(struct vtnet_softc *);
161 #ifndef VTNET_LEGACY_TX
162 static void vtnet_qflush(struct ifnet *);
165 static int vtnet_watchdog(struct vtnet_txq *);
166 static void vtnet_accum_stats(struct vtnet_softc *,
167 struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
168 static void vtnet_tick(void *);
170 static void vtnet_start_taskqueues(struct vtnet_softc *);
171 static void vtnet_free_taskqueues(struct vtnet_softc *);
172 static void vtnet_drain_taskqueues(struct vtnet_softc *);
174 static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
175 static void vtnet_stop_rendezvous(struct vtnet_softc *);
176 static void vtnet_stop(struct vtnet_softc *);
177 static int vtnet_virtio_reinit(struct vtnet_softc *);
178 static void vtnet_init_rx_filters(struct vtnet_softc *);
179 static int vtnet_init_rx_queues(struct vtnet_softc *);
180 static int vtnet_init_tx_queues(struct vtnet_softc *);
181 static int vtnet_init_rxtx_queues(struct vtnet_softc *);
182 static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
183 static int vtnet_reinit(struct vtnet_softc *);
184 static void vtnet_init_locked(struct vtnet_softc *);
185 static void vtnet_init(void *);
187 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
188 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
189 struct sglist *, int, int);
190 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
191 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
192 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
193 static int vtnet_set_promisc(struct vtnet_softc *, int);
194 static int vtnet_set_allmulti(struct vtnet_softc *, int);
195 static void vtnet_attach_disable_promisc(struct vtnet_softc *);
196 static void vtnet_rx_filter(struct vtnet_softc *);
197 static void vtnet_rx_filter_mac(struct vtnet_softc *);
198 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
199 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
200 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
201 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
202 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
204 static int vtnet_is_link_up(struct vtnet_softc *);
205 static void vtnet_update_link_status(struct vtnet_softc *);
206 static int vtnet_ifmedia_upd(struct ifnet *);
207 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
208 static void vtnet_get_hwaddr(struct vtnet_softc *);
209 static void vtnet_set_hwaddr(struct vtnet_softc *);
210 static void vtnet_vlan_tag_remove(struct mbuf *);
211 static void vtnet_set_rx_process_limit(struct vtnet_softc *);
212 static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
214 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
215 struct sysctl_oid_list *, struct vtnet_rxq *);
216 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
217 struct sysctl_oid_list *, struct vtnet_txq *);
218 static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
219 static void vtnet_setup_sysctl(struct vtnet_softc *);
221 static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
222 static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
223 static int vtnet_txq_enable_intr(struct vtnet_txq *);
224 static void vtnet_txq_disable_intr(struct vtnet_txq *);
225 static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
226 static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
227 static void vtnet_enable_interrupts(struct vtnet_softc *);
228 static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
229 static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
230 static void vtnet_disable_interrupts(struct vtnet_softc *);
232 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
234 NETDUMP_DEFINE(vtnet);
237 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
238 static int vtnet_csum_disable = 0;
239 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
240 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
241 &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
242 static int vtnet_tso_disable = 0;
243 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
244 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
245 0, "Disables TCP Segmentation Offload");
246 static int vtnet_lro_disable = 0;
247 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
248 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
249 0, "Disables TCP Large Receive Offload");
250 static int vtnet_mq_disable = 0;
251 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
252 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
253 0, "Disables Multi Queue support");
254 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
255 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
256 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
257 &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
258 static int vtnet_rx_process_limit = 512;
259 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
260 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
261 &vtnet_rx_process_limit, 0,
262 "Limits the number RX segments processed in a single pass");
264 static uma_zone_t vtnet_tx_header_zone;
266 static struct virtio_feature_desc vtnet_feature_desc[] = {
267 { VIRTIO_NET_F_CSUM, "TxChecksum" },
268 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
269 { VIRTIO_NET_F_MAC, "MacAddress" },
270 { VIRTIO_NET_F_GSO, "TxAllGSO" },
271 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
272 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
273 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
274 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
275 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
276 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
277 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
278 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
279 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
280 { VIRTIO_NET_F_STATUS, "Status" },
281 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
282 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
283 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
284 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
285 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
286 { VIRTIO_NET_F_MQ, "Multiqueue" },
287 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
292 static device_method_t vtnet_methods[] = {
293 /* Device methods. */
294 DEVMETHOD(device_probe, vtnet_probe),
295 DEVMETHOD(device_attach, vtnet_attach),
296 DEVMETHOD(device_detach, vtnet_detach),
297 DEVMETHOD(device_suspend, vtnet_suspend),
298 DEVMETHOD(device_resume, vtnet_resume),
299 DEVMETHOD(device_shutdown, vtnet_shutdown),
301 /* VirtIO methods. */
302 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
303 DEVMETHOD(virtio_config_change, vtnet_config_change),
309 #include <dev/netmap/if_vtnet_netmap.h>
310 #endif /* DEV_NETMAP */
312 static driver_t vtnet_driver = {
315 sizeof(struct vtnet_softc)
317 static devclass_t vtnet_devclass;
319 DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass,
321 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
323 MODULE_VERSION(vtnet, 1);
324 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
326 MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
327 #endif /* DEV_NETMAP */
330 vtnet_modevent(module_t mod, int type, void *unused)
333 static int loaded = 0;
338 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
339 sizeof(struct vtnet_tx_header),
340 NULL, NULL, NULL, NULL, 0, 0);
343 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
348 uma_zdestroy(vtnet_tx_header_zone);
349 vtnet_tx_header_zone = NULL;
363 vtnet_probe(device_t dev)
366 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
369 device_set_desc(dev, "VirtIO Networking Adapter");
371 return (BUS_PROBE_DEFAULT);
375 vtnet_attach(device_t dev)
377 struct vtnet_softc *sc;
380 sc = device_get_softc(dev);
383 /* Register our feature descriptions. */
384 virtio_set_feature_desc(dev, vtnet_feature_desc);
386 VTNET_CORE_LOCK_INIT(sc);
387 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
389 vtnet_setup_sysctl(sc);
390 vtnet_setup_features(sc);
392 error = vtnet_alloc_rx_filters(sc);
394 device_printf(dev, "cannot allocate Rx filters\n");
398 error = vtnet_alloc_rxtx_queues(sc);
400 device_printf(dev, "cannot allocate queues\n");
404 error = vtnet_alloc_virtqueues(sc);
406 device_printf(dev, "cannot allocate virtqueues\n");
410 error = vtnet_setup_interface(sc);
412 device_printf(dev, "cannot setup interface\n");
416 error = virtio_setup_intr(dev, INTR_TYPE_NET);
418 device_printf(dev, "cannot setup virtqueue interrupts\n");
419 /* BMV: This will crash if during boot! */
420 ether_ifdetach(sc->vtnet_ifp);
425 vtnet_netmap_attach(sc);
426 #endif /* DEV_NETMAP */
428 vtnet_start_taskqueues(sc);
438 vtnet_detach(device_t dev)
440 struct vtnet_softc *sc;
443 sc = device_get_softc(dev);
446 if (device_is_attached(dev)) {
449 VTNET_CORE_UNLOCK(sc);
451 callout_drain(&sc->vtnet_tick_ch);
452 vtnet_drain_taskqueues(sc);
459 #endif /* DEV_NETMAP */
461 vtnet_free_taskqueues(sc);
463 if (sc->vtnet_vlan_attach != NULL) {
464 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
465 sc->vtnet_vlan_attach = NULL;
467 if (sc->vtnet_vlan_detach != NULL) {
468 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
469 sc->vtnet_vlan_detach = NULL;
472 ifmedia_removeall(&sc->vtnet_media);
476 sc->vtnet_ifp = NULL;
479 vtnet_free_rxtx_queues(sc);
480 vtnet_free_rx_filters(sc);
482 if (sc->vtnet_ctrl_vq != NULL)
483 vtnet_free_ctrl_vq(sc);
485 VTNET_CORE_LOCK_DESTROY(sc);
491 vtnet_suspend(device_t dev)
493 struct vtnet_softc *sc;
495 sc = device_get_softc(dev);
499 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
500 VTNET_CORE_UNLOCK(sc);
506 vtnet_resume(device_t dev)
508 struct vtnet_softc *sc;
511 sc = device_get_softc(dev);
515 if (ifp->if_flags & IFF_UP)
516 vtnet_init_locked(sc);
517 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
518 VTNET_CORE_UNLOCK(sc);
524 vtnet_shutdown(device_t dev)
528 * Suspend already does all of what we need to
529 * do here; we just never expect to be resumed.
531 return (vtnet_suspend(dev));
535 vtnet_attach_completed(device_t dev)
538 vtnet_attach_disable_promisc(device_get_softc(dev));
544 vtnet_config_change(device_t dev)
546 struct vtnet_softc *sc;
548 sc = device_get_softc(dev);
551 vtnet_update_link_status(sc);
552 if (sc->vtnet_link_active != 0)
553 vtnet_tx_start_all(sc);
554 VTNET_CORE_UNLOCK(sc);
560 vtnet_negotiate_features(struct vtnet_softc *sc)
563 uint64_t mask, features;
569 * TSO and LRO are only available when their corresponding checksum
570 * offload feature is also negotiated.
572 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
573 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
574 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
576 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
577 mask |= VTNET_TSO_FEATURES;
578 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
579 mask |= VTNET_LRO_FEATURES;
580 #ifndef VTNET_LEGACY_TX
581 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
582 mask |= VIRTIO_NET_F_MQ;
584 mask |= VIRTIO_NET_F_MQ;
587 features = VTNET_FEATURES & ~mask;
588 sc->vtnet_features = virtio_negotiate_features(dev, features);
590 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
591 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
593 * LRO without mergeable buffers requires special care. This
594 * is not ideal because every receive buffer must be large
595 * enough to hold the maximum TCP packet, the Ethernet header,
596 * and the header. This requires up to 34 descriptors with
597 * MCLBYTES clusters. If we do not have indirect descriptors,
598 * LRO is disabled since the virtqueue will not contain very
599 * many receive buffers.
601 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
603 "LRO disabled due to both mergeable buffers and "
604 "indirect descriptors not negotiated\n");
606 features &= ~VTNET_LRO_FEATURES;
608 virtio_negotiate_features(dev, features);
610 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
615 vtnet_setup_features(struct vtnet_softc *sc)
621 vtnet_negotiate_features(sc);
623 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
624 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
625 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
626 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
628 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
629 /* This feature should always be negotiated. */
630 sc->vtnet_flags |= VTNET_FLAG_MAC;
633 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
634 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
635 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
637 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
639 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
640 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
641 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
642 sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
644 sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
646 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
647 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
648 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
649 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
651 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
653 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
654 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
656 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
657 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
658 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
659 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
660 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
661 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
664 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
665 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
666 sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
667 offsetof(struct virtio_net_config, max_virtqueue_pairs));
669 sc->vtnet_max_vq_pairs = 1;
671 if (sc->vtnet_max_vq_pairs > 1) {
673 * Limit the maximum number of queue pairs to the lower of
674 * the number of CPUs and the configured maximum.
675 * The actual number of queues that get used may be less.
679 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
680 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
683 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
684 max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
686 sc->vtnet_requested_vq_pairs = max;
687 sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
694 vtnet_init_rxq(struct vtnet_softc *sc, int id)
696 struct vtnet_rxq *rxq;
698 rxq = &sc->vtnet_rxqs[id];
700 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
701 device_get_nameunit(sc->vtnet_dev), id);
702 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
707 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
708 if (rxq->vtnrx_sg == NULL)
711 TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
712 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
713 taskqueue_thread_enqueue, &rxq->vtnrx_tq);
715 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
719 vtnet_init_txq(struct vtnet_softc *sc, int id)
721 struct vtnet_txq *txq;
723 txq = &sc->vtnet_txqs[id];
725 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
726 device_get_nameunit(sc->vtnet_dev), id);
727 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
732 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
733 if (txq->vtntx_sg == NULL)
736 #ifndef VTNET_LEGACY_TX
737 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
738 M_NOWAIT, &txq->vtntx_mtx);
739 if (txq->vtntx_br == NULL)
742 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
744 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
745 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
746 taskqueue_thread_enqueue, &txq->vtntx_tq);
747 if (txq->vtntx_tq == NULL)
754 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
756 int i, npairs, error;
758 npairs = sc->vtnet_max_vq_pairs;
760 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
762 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
764 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
767 for (i = 0; i < npairs; i++) {
768 error = vtnet_init_rxq(sc, i);
771 error = vtnet_init_txq(sc, i);
776 vtnet_setup_queue_sysctl(sc);
782 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
785 rxq->vtnrx_sc = NULL;
788 if (rxq->vtnrx_sg != NULL) {
789 sglist_free(rxq->vtnrx_sg);
790 rxq->vtnrx_sg = NULL;
793 if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
794 mtx_destroy(&rxq->vtnrx_mtx);
798 vtnet_destroy_txq(struct vtnet_txq *txq)
801 txq->vtntx_sc = NULL;
804 if (txq->vtntx_sg != NULL) {
805 sglist_free(txq->vtntx_sg);
806 txq->vtntx_sg = NULL;
809 #ifndef VTNET_LEGACY_TX
810 if (txq->vtntx_br != NULL) {
811 buf_ring_free(txq->vtntx_br, M_DEVBUF);
812 txq->vtntx_br = NULL;
816 if (mtx_initialized(&txq->vtntx_mtx) != 0)
817 mtx_destroy(&txq->vtntx_mtx);
821 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
825 if (sc->vtnet_rxqs != NULL) {
826 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
827 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
828 free(sc->vtnet_rxqs, M_DEVBUF);
829 sc->vtnet_rxqs = NULL;
832 if (sc->vtnet_txqs != NULL) {
833 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
834 vtnet_destroy_txq(&sc->vtnet_txqs[i]);
835 free(sc->vtnet_txqs, M_DEVBUF);
836 sc->vtnet_txqs = NULL;
841 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
844 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
845 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
846 M_DEVBUF, M_NOWAIT | M_ZERO);
847 if (sc->vtnet_mac_filter == NULL)
851 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
852 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
853 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
854 if (sc->vtnet_vlan_filter == NULL)
862 vtnet_free_rx_filters(struct vtnet_softc *sc)
865 if (sc->vtnet_mac_filter != NULL) {
866 free(sc->vtnet_mac_filter, M_DEVBUF);
867 sc->vtnet_mac_filter = NULL;
870 if (sc->vtnet_vlan_filter != NULL) {
871 free(sc->vtnet_vlan_filter, M_DEVBUF);
872 sc->vtnet_vlan_filter = NULL;
877 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
880 struct vq_alloc_info *info;
881 struct vtnet_rxq *rxq;
882 struct vtnet_txq *txq;
883 int i, idx, flags, nvqs, error;
888 nvqs = sc->vtnet_max_vq_pairs * 2;
889 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
892 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
896 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
897 rxq = &sc->vtnet_rxqs[i];
898 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
899 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
900 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
902 txq = &sc->vtnet_txqs[i];
903 VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
904 vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
905 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
908 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
909 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
910 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
914 * Enable interrupt binding if this is multiqueue. This only matters
915 * when per-vq MSIX is available.
917 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
920 error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
927 vtnet_setup_interface(struct vtnet_softc *sc)
934 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
936 device_printf(dev, "cannot allocate ifnet structure\n");
940 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
941 ifp->if_baudrate = IF_Gbps(10); /* Approx. */
943 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
944 ifp->if_init = vtnet_init;
945 ifp->if_ioctl = vtnet_ioctl;
946 ifp->if_get_counter = vtnet_get_counter;
947 #ifndef VTNET_LEGACY_TX
948 ifp->if_transmit = vtnet_txq_mq_start;
949 ifp->if_qflush = vtnet_qflush;
951 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
952 ifp->if_start = vtnet_start;
953 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
954 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
955 IFQ_SET_READY(&ifp->if_snd);
958 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
960 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
961 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
963 /* Read (or generate) the MAC address for the adapter. */
964 vtnet_get_hwaddr(sc);
966 ether_ifattach(ifp, sc->vtnet_hwaddr);
968 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
969 ifp->if_capabilities |= IFCAP_LINKSTATE;
971 /* Tell the upper layer(s) we support long frames. */
972 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
973 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
975 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
976 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
978 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
979 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
980 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
982 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
983 ifp->if_capabilities |= IFCAP_TSO4;
984 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
985 ifp->if_capabilities |= IFCAP_TSO6;
986 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
987 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
990 if (ifp->if_capabilities & IFCAP_TSO)
991 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
994 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
995 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
997 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
998 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
999 ifp->if_capabilities |= IFCAP_LRO;
1002 if (ifp->if_capabilities & IFCAP_HWCSUM) {
1004 * VirtIO does not support VLAN tagging, but we can fake
1005 * it by inserting and removing the 802.1Q header during
1006 * transmit and receive. We are then able to do checksum
1007 * offloading of VLAN frames.
1009 ifp->if_capabilities |=
1010 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1013 ifp->if_capenable = ifp->if_capabilities;
1016 * Capabilities after here are not enabled by default.
1019 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1020 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1022 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1023 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1024 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1025 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1028 vtnet_set_rx_process_limit(sc);
1029 vtnet_set_tx_intr_threshold(sc);
1031 NETDUMP_SET(ifp, vtnet);
1037 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1040 int frame_size, clsize;
1042 ifp = sc->vtnet_ifp;
1044 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1047 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1051 * Based on the new MTU (and hence frame size) determine which
1052 * cluster size is most appropriate for the receive queues.
1054 if (frame_size <= MCLBYTES) {
1056 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1057 /* Avoid going past 9K jumbos. */
1058 if (frame_size > MJUM9BYTES)
1060 clsize = MJUM9BYTES;
1062 clsize = MJUMPAGESIZE;
1064 ifp->if_mtu = new_mtu;
1065 sc->vtnet_rx_new_clsize = clsize;
1067 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1068 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1069 vtnet_init_locked(sc);
1076 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1078 struct vtnet_softc *sc;
1080 int reinit, mask, error;
1083 ifr = (struct ifreq *) data;
1088 if (ifp->if_mtu != ifr->ifr_mtu) {
1089 VTNET_CORE_LOCK(sc);
1090 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1091 VTNET_CORE_UNLOCK(sc);
1096 VTNET_CORE_LOCK(sc);
1097 if ((ifp->if_flags & IFF_UP) == 0) {
1098 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1100 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1101 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1102 (IFF_PROMISC | IFF_ALLMULTI)) {
1103 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1104 vtnet_rx_filter(sc);
1106 ifp->if_flags |= IFF_PROMISC;
1107 if ((ifp->if_flags ^ sc->vtnet_if_flags)
1113 vtnet_init_locked(sc);
1116 sc->vtnet_if_flags = ifp->if_flags;
1117 VTNET_CORE_UNLOCK(sc);
1122 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1124 VTNET_CORE_LOCK(sc);
1125 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1126 vtnet_rx_filter_mac(sc);
1127 VTNET_CORE_UNLOCK(sc);
1132 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1136 VTNET_CORE_LOCK(sc);
1137 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1139 if (mask & IFCAP_TXCSUM)
1140 ifp->if_capenable ^= IFCAP_TXCSUM;
1141 if (mask & IFCAP_TXCSUM_IPV6)
1142 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1143 if (mask & IFCAP_TSO4)
1144 ifp->if_capenable ^= IFCAP_TSO4;
1145 if (mask & IFCAP_TSO6)
1146 ifp->if_capenable ^= IFCAP_TSO6;
1148 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1149 IFCAP_VLAN_HWFILTER)) {
1150 /* These Rx features require us to renegotiate. */
1153 if (mask & IFCAP_RXCSUM)
1154 ifp->if_capenable ^= IFCAP_RXCSUM;
1155 if (mask & IFCAP_RXCSUM_IPV6)
1156 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1157 if (mask & IFCAP_LRO)
1158 ifp->if_capenable ^= IFCAP_LRO;
1159 if (mask & IFCAP_VLAN_HWFILTER)
1160 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1164 if (mask & IFCAP_VLAN_HWTSO)
1165 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1166 if (mask & IFCAP_VLAN_HWTAGGING)
1167 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1169 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1170 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1171 vtnet_init_locked(sc);
1174 VTNET_CORE_UNLOCK(sc);
1175 VLAN_CAPABILITIES(ifp);
1180 error = ether_ioctl(ifp, cmd, data);
1184 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1190 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1192 struct virtqueue *vq;
1198 for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1199 error = vtnet_rxq_new_buf(rxq);
1205 virtqueue_notify(vq);
1207 * EMSGSIZE signifies the virtqueue did not have enough
1208 * entries available to hold the last mbuf. This is not
1211 if (error == EMSGSIZE)
1219 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1221 struct virtqueue *vq;
1228 while ((m = virtqueue_drain(vq, &last)) != NULL)
1231 KASSERT(virtqueue_empty(vq),
1232 ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1235 static struct mbuf *
1236 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1238 struct mbuf *m_head, *m_tail, *m;
1241 clsize = sc->vtnet_rx_clsize;
1243 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1244 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1246 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1250 m_head->m_len = clsize;
1253 /* Allocate the rest of the chain. */
1254 for (i = 1; i < nbufs; i++) {
1255 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1264 if (m_tailp != NULL)
1270 sc->vtnet_stats.mbuf_alloc_failed++;
1277 * Slow path for when LRO without mergeable buffers is negotiated.
1280 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1283 struct vtnet_softc *sc;
1284 struct mbuf *m, *m_prev;
1285 struct mbuf *m_new, *m_tail;
1286 int len, clsize, nreplace, error;
1289 clsize = sc->vtnet_rx_clsize;
1299 * Since these mbuf chains are so large, we avoid allocating an
1300 * entire replacement chain if possible. When the received frame
1301 * did not consume the entire chain, the unused mbufs are moved
1302 * to the replacement chain.
1306 * Something is seriously wrong if we received a frame
1307 * larger than the chain. Drop it.
1310 sc->vtnet_stats.rx_frame_too_large++;
1314 /* We always allocate the same cluster size. */
1315 KASSERT(m->m_len == clsize,
1316 ("%s: mbuf size %d is not the cluster size %d",
1317 __func__, m->m_len, clsize));
1319 m->m_len = MIN(m->m_len, len);
1327 KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1328 ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1329 sc->vtnet_rx_nmbufs));
1331 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1332 if (m_new == NULL) {
1333 m_prev->m_len = clsize;
1338 * Move any unused mbufs from the received chain onto the end
1341 if (m_prev->m_next != NULL) {
1342 m_tail->m_next = m_prev->m_next;
1343 m_prev->m_next = NULL;
1346 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1349 * BAD! We could not enqueue the replacement mbuf chain. We
1350 * must restore the m0 chain to the original state if it was
1351 * modified so we can subsequently discard it.
1353 * NOTE: The replacement is suppose to be an identical copy
1354 * to the one just dequeued so this is an unexpected error.
1356 sc->vtnet_stats.rx_enq_replacement_failed++;
1358 if (m_tail->m_next != NULL) {
1359 m_prev->m_next = m_tail->m_next;
1360 m_tail->m_next = NULL;
1363 m_prev->m_len = clsize;
1371 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1373 struct vtnet_softc *sc;
1379 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1380 ("%s: chained mbuf without LRO_NOMRG", __func__));
1382 if (m->m_next == NULL) {
1383 /* Fast-path for the common case of just one mbuf. */
1387 m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1391 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1394 * The new mbuf is suppose to be an identical
1395 * copy of the one just dequeued so this is an
1399 sc->vtnet_stats.rx_enq_replacement_failed++;
1403 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1409 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1411 struct vtnet_softc *sc;
1413 struct vtnet_rx_header *rxhdr;
1419 mdata = mtod(m, uint8_t *);
1421 VTNET_RXQ_LOCK_ASSERT(rxq);
1422 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1423 ("%s: chained mbuf without LRO_NOMRG", __func__));
1424 KASSERT(m->m_len == sc->vtnet_rx_clsize,
1425 ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1426 sc->vtnet_rx_clsize));
1429 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1430 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1431 rxhdr = (struct vtnet_rx_header *) mdata;
1432 sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1433 offset = sizeof(struct vtnet_rx_header);
1437 sglist_append(sg, mdata + offset, m->m_len - offset);
1438 if (m->m_next != NULL) {
1439 error = sglist_append_mbuf(sg, m->m_next);
1443 error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1449 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1451 struct vtnet_softc *sc;
1457 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1461 error = vtnet_rxq_enqueue_buf(rxq, m);
1469 * Use the checksum offset in the VirtIO header to set the
1470 * correct CSUM_* flags.
1473 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1474 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1476 struct vtnet_softc *sc;
1477 #if defined(INET) || defined(INET6)
1478 int offset = hdr->csum_start + hdr->csum_offset;
1483 /* Only do a basic sanity check on the offset. */
1487 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1492 case ETHERTYPE_IPV6:
1493 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1498 sc->vtnet_stats.rx_csum_bad_ethtype++;
1503 * Use the offset to determine the appropriate CSUM_* flags. This is
1504 * a bit dirty, but we can get by with it since the checksum offsets
1505 * happen to be different. We assume the host host does not do IPv4
1506 * header checksum offloading.
1508 switch (hdr->csum_offset) {
1509 case offsetof(struct udphdr, uh_sum):
1510 case offsetof(struct tcphdr, th_sum):
1511 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1512 m->m_pkthdr.csum_data = 0xFFFF;
1514 case offsetof(struct sctphdr, checksum):
1515 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1518 sc->vtnet_stats.rx_csum_bad_offset++;
1526 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1527 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1529 struct vtnet_softc *sc;
1536 case ETHERTYPE_IP: {
1538 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1540 ip = (struct ip *)(m->m_data + ip_start);
1542 offset = ip_start + (ip->ip_hl << 2);
1547 case ETHERTYPE_IPV6:
1548 if (__predict_false(m->m_len < ip_start +
1549 sizeof(struct ip6_hdr)))
1551 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1552 if (__predict_false(offset < 0))
1557 sc->vtnet_stats.rx_csum_bad_ethtype++;
1563 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1565 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1566 m->m_pkthdr.csum_data = 0xFFFF;
1569 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1571 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1572 m->m_pkthdr.csum_data = 0xFFFF;
1575 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1577 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1581 * For the remaining protocols, FreeBSD does not support
1582 * checksum offloading, so the checksum will be recomputed.
1585 if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1586 "protocol eth_type=%#x proto=%d csum_start=%d "
1587 "csum_offset=%d\n", __func__, eth_type, proto,
1588 hdr->csum_start, hdr->csum_offset);
1597 * Set the appropriate CSUM_* flags. Unfortunately, the information
1598 * provided is not directly useful to us. The VirtIO header gives the
1599 * offset of the checksum, which is all Linux needs, but this is not
1600 * how FreeBSD does things. We are forced to peek inside the packet
1603 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1604 * could accept the offsets and let the stack figure it out.
1607 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1608 struct virtio_net_hdr *hdr)
1610 struct ether_header *eh;
1611 struct ether_vlan_header *evh;
1615 eh = mtod(m, struct ether_header *);
1616 eth_type = ntohs(eh->ether_type);
1617 if (eth_type == ETHERTYPE_VLAN) {
1618 /* BMV: We should handle nested VLAN tags too. */
1619 evh = mtod(m, struct ether_vlan_header *);
1620 eth_type = ntohs(evh->evl_proto);
1621 offset = sizeof(struct ether_vlan_header);
1623 offset = sizeof(struct ether_header);
1625 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1626 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1628 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1634 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1638 while (--nbufs > 0) {
1639 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1642 vtnet_rxq_discard_buf(rxq, m);
1647 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1652 * Requeue the discarded mbuf. This should always be successful
1653 * since it was just dequeued.
1655 error = vtnet_rxq_enqueue_buf(rxq, m);
1657 ("%s: cannot requeue discarded mbuf %d", __func__, error));
1661 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1663 struct vtnet_softc *sc;
1664 struct virtqueue *vq;
1665 struct mbuf *m, *m_tail;
1672 while (--nbufs > 0) {
1673 m = virtqueue_dequeue(vq, &len);
1675 rxq->vtnrx_stats.vrxs_ierrors++;
1679 if (vtnet_rxq_new_buf(rxq) != 0) {
1680 rxq->vtnrx_stats.vrxs_iqdrops++;
1681 vtnet_rxq_discard_buf(rxq, m);
1683 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1691 m->m_flags &= ~M_PKTHDR;
1693 m_head->m_pkthdr.len += len;
1701 sc->vtnet_stats.rx_mergeable_failed++;
1708 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1709 struct virtio_net_hdr *hdr)
1711 struct vtnet_softc *sc;
1713 struct ether_header *eh;
1716 ifp = sc->vtnet_ifp;
1718 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1719 eh = mtod(m, struct ether_header *);
1720 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1721 vtnet_vlan_tag_remove(m);
1723 * With the 802.1Q header removed, update the
1724 * checksum starting location accordingly.
1726 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1727 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1731 m->m_pkthdr.flowid = rxq->vtnrx_id;
1732 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1735 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1736 * distinction that Linux does. Need to reevaluate if performing
1737 * offloading for the NEEDS_CSUM case is really appropriate.
1739 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1740 VIRTIO_NET_HDR_F_DATA_VALID)) {
1741 if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1742 rxq->vtnrx_stats.vrxs_csum++;
1744 rxq->vtnrx_stats.vrxs_csum_failed++;
1747 rxq->vtnrx_stats.vrxs_ipackets++;
1748 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1750 VTNET_RXQ_UNLOCK(rxq);
1751 (*ifp->if_input)(ifp, m);
1752 VTNET_RXQ_LOCK(rxq);
1756 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1758 struct virtio_net_hdr lhdr, *hdr;
1759 struct vtnet_softc *sc;
1761 struct virtqueue *vq;
1763 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1764 int len, deq, nbufs, adjsz, count;
1768 ifp = sc->vtnet_ifp;
1771 count = sc->vtnet_rx_process_limit;
1773 VTNET_RXQ_LOCK_ASSERT(rxq);
1776 if (netmap_rx_irq(ifp, 0, &deq)) {
1779 #endif /* DEV_NETMAP */
1781 while (count-- > 0) {
1782 m = virtqueue_dequeue(vq, &len);
1787 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1788 rxq->vtnrx_stats.vrxs_ierrors++;
1789 vtnet_rxq_discard_buf(rxq, m);
1793 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1795 adjsz = sizeof(struct vtnet_rx_header);
1797 * Account for our pad inserted between the header
1798 * and the actual start of the frame.
1800 len += VTNET_RX_HEADER_PAD;
1802 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1803 nbufs = mhdr->num_buffers;
1804 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1807 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1808 rxq->vtnrx_stats.vrxs_iqdrops++;
1809 vtnet_rxq_discard_buf(rxq, m);
1811 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1815 m->m_pkthdr.len = len;
1816 m->m_pkthdr.rcvif = ifp;
1817 m->m_pkthdr.csum_flags = 0;
1820 /* Dequeue the rest of chain. */
1821 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1826 * Save copy of header before we strip it. For both mergeable
1827 * and non-mergeable, the header is at the beginning of the
1828 * mbuf data. We no longer need num_buffers, so always use a
1831 * BMV: Is this memcpy() expensive? We know the mbuf data is
1832 * still valid even after the m_adj().
1834 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1837 vtnet_rxq_input(rxq, m, hdr);
1839 /* Must recheck after dropping the Rx lock. */
1840 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1845 virtqueue_notify(vq);
1847 return (count > 0 ? 0 : EAGAIN);
1851 vtnet_rx_vq_intr(void *xrxq)
1853 struct vtnet_softc *sc;
1854 struct vtnet_rxq *rxq;
1860 ifp = sc->vtnet_ifp;
1863 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1865 * Ignore this interrupt. Either this is a spurious interrupt
1866 * or multiqueue without per-VQ MSIX so every queue needs to
1867 * be polled (a brain dead configuration we could try harder
1870 vtnet_rxq_disable_intr(rxq);
1874 VTNET_RXQ_LOCK(rxq);
1877 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1878 VTNET_RXQ_UNLOCK(rxq);
1882 more = vtnet_rxq_eof(rxq);
1883 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1885 vtnet_rxq_disable_intr(rxq);
1887 * This is an occasional condition or race (when !more),
1888 * so retry a few times before scheduling the taskqueue.
1890 if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1893 VTNET_RXQ_UNLOCK(rxq);
1894 rxq->vtnrx_stats.vrxs_rescheduled++;
1895 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1897 VTNET_RXQ_UNLOCK(rxq);
1901 vtnet_rxq_tq_intr(void *xrxq, int pending)
1903 struct vtnet_softc *sc;
1904 struct vtnet_rxq *rxq;
1910 ifp = sc->vtnet_ifp;
1912 VTNET_RXQ_LOCK(rxq);
1914 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1915 VTNET_RXQ_UNLOCK(rxq);
1919 more = vtnet_rxq_eof(rxq);
1920 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1922 vtnet_rxq_disable_intr(rxq);
1923 rxq->vtnrx_stats.vrxs_rescheduled++;
1924 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1927 VTNET_RXQ_UNLOCK(rxq);
1931 vtnet_txq_below_threshold(struct vtnet_txq *txq)
1933 struct vtnet_softc *sc;
1934 struct virtqueue *vq;
1939 return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
1943 vtnet_txq_notify(struct vtnet_txq *txq)
1945 struct virtqueue *vq;
1949 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
1950 virtqueue_notify(vq);
1952 if (vtnet_txq_enable_intr(txq) == 0)
1956 * Drain frames that were completed since last checked. If this
1957 * causes the queue to go above the threshold, the caller should
1958 * continue transmitting.
1960 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
1961 virtqueue_disable_intr(vq);
1969 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
1971 struct virtqueue *vq;
1972 struct vtnet_tx_header *txhdr;
1978 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1979 m_freem(txhdr->vth_mbuf);
1980 uma_zfree(vtnet_tx_header_zone, txhdr);
1983 KASSERT(virtqueue_empty(vq),
1984 ("%s: mbufs remaining in tx queue %p", __func__, txq));
1988 * BMV: Much of this can go away once we finally have offsets in
1989 * the mbuf packet header. Bug andre@.
1992 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
1993 int *etype, int *proto, int *start)
1995 struct vtnet_softc *sc;
1996 struct ether_vlan_header *evh;
2001 evh = mtod(m, struct ether_vlan_header *);
2002 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2003 /* BMV: We should handle nested VLAN tags too. */
2004 *etype = ntohs(evh->evl_proto);
2005 offset = sizeof(struct ether_vlan_header);
2007 *etype = ntohs(evh->evl_encap_proto);
2008 offset = sizeof(struct ether_header);
2013 case ETHERTYPE_IP: {
2014 struct ip *ip, iphdr;
2015 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2016 m_copydata(m, offset, sizeof(struct ip),
2020 ip = (struct ip *)(m->m_data + offset);
2022 *start = offset + (ip->ip_hl << 2);
2027 case ETHERTYPE_IPV6:
2029 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2030 /* Assert the network stack sent us a valid packet. */
2031 KASSERT(*start > offset,
2032 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2033 *start, offset, *proto));
2037 sc->vtnet_stats.tx_csum_bad_ethtype++;
2045 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2046 int offset, struct virtio_net_hdr *hdr)
2048 static struct timeval lastecn;
2050 struct vtnet_softc *sc;
2051 struct tcphdr *tcp, tcphdr;
2055 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
2056 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
2059 tcp = (struct tcphdr *)(m->m_data + offset);
2061 hdr->hdr_len = offset + (tcp->th_off << 2);
2062 hdr->gso_size = m->m_pkthdr.tso_segsz;
2063 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2064 VIRTIO_NET_HDR_GSO_TCPV6;
2066 if (tcp->th_flags & TH_CWR) {
2068 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2069 * ECN support is not on a per-interface basis, but globally via
2070 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2072 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2073 if (ppsratecheck(&lastecn, &curecn, 1))
2074 if_printf(sc->vtnet_ifp,
2075 "TSO with ECN not negotiated with host\n");
2078 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2081 txq->vtntx_stats.vtxs_tso++;
2086 static struct mbuf *
2087 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2088 struct virtio_net_hdr *hdr)
2090 struct vtnet_softc *sc;
2091 int flags, etype, csum_start, proto, error;
2094 flags = m->m_pkthdr.csum_flags;
2096 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2100 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2101 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2103 * We could compare the IP protocol vs the CSUM_ flag too,
2104 * but that really should not be necessary.
2106 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2107 hdr->csum_start = csum_start;
2108 hdr->csum_offset = m->m_pkthdr.csum_data;
2109 txq->vtntx_stats.vtxs_csum++;
2112 if (flags & CSUM_TSO) {
2113 if (__predict_false(proto != IPPROTO_TCP)) {
2114 /* Likely failed to correctly parse the mbuf. */
2115 sc->vtnet_stats.tx_tso_not_tcp++;
2119 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2120 ("%s: mbuf %p TSO without checksum offload %#x",
2121 __func__, m, flags));
2123 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2136 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2137 struct vtnet_tx_header *txhdr)
2139 struct vtnet_softc *sc;
2140 struct virtqueue *vq;
2151 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2152 KASSERT(error == 0 && sg->sg_nseg == 1,
2153 ("%s: error %d adding header to sglist", __func__, error));
2155 error = sglist_append_mbuf(sg, m);
2157 m = m_defrag(m, M_NOWAIT);
2162 sc->vtnet_stats.tx_defragged++;
2164 error = sglist_append_mbuf(sg, m);
2169 txhdr->vth_mbuf = m;
2170 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2175 sc->vtnet_stats.tx_defrag_failed++;
2183 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
2185 struct vtnet_tx_header *txhdr;
2186 struct virtio_net_hdr *hdr;
2193 txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2194 if (txhdr == NULL) {
2201 * Always use the non-mergeable header, regardless if the feature
2202 * was negotiated. For transmit, num_buffers is always zero. The
2203 * vtnet_hdr_size is used to enqueue the correct header size.
2205 hdr = &txhdr->vth_uhdr.hdr;
2207 if (m->m_flags & M_VLANTAG) {
2208 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2209 if ((*m_head = m) == NULL) {
2213 m->m_flags &= ~M_VLANTAG;
2216 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2217 m = vtnet_txq_offload(txq, m, hdr);
2218 if ((*m_head = m) == NULL) {
2224 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2229 uma_zfree(vtnet_tx_header_zone, txhdr);
2234 #ifdef VTNET_LEGACY_TX
2237 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2239 struct vtnet_softc *sc;
2240 struct virtqueue *vq;
2248 VTNET_TXQ_LOCK_ASSERT(txq);
2250 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2251 sc->vtnet_link_active == 0)
2259 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2260 if (virtqueue_full(vq))
2263 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2267 if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
2269 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2274 ETHER_BPF_MTAP(ifp, m0);
2277 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2278 if (tries++ < VTNET_NOTIFY_RETRIES)
2281 txq->vtntx_stats.vtxs_rescheduled++;
2282 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2287 vtnet_start(struct ifnet *ifp)
2289 struct vtnet_softc *sc;
2290 struct vtnet_txq *txq;
2293 txq = &sc->vtnet_txqs[0];
2295 VTNET_TXQ_LOCK(txq);
2296 vtnet_start_locked(txq, ifp);
2297 VTNET_TXQ_UNLOCK(txq);
2300 #else /* !VTNET_LEGACY_TX */
2303 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2305 struct vtnet_softc *sc;
2306 struct virtqueue *vq;
2307 struct buf_ring *br;
2309 int enq, tries, error;
2314 ifp = sc->vtnet_ifp;
2318 VTNET_TXQ_LOCK_ASSERT(txq);
2320 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2321 sc->vtnet_link_active == 0) {
2323 error = drbr_enqueue(ifp, br, m);
2328 error = drbr_enqueue(ifp, br, m);
2338 while ((m = drbr_peek(ifp, br)) != NULL) {
2339 if (virtqueue_full(vq)) {
2340 drbr_putback(ifp, br, m);
2344 if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
2346 drbr_putback(ifp, br, m);
2348 drbr_advance(ifp, br);
2351 drbr_advance(ifp, br);
2354 ETHER_BPF_MTAP(ifp, m);
2357 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2358 if (tries++ < VTNET_NOTIFY_RETRIES)
2361 txq->vtntx_stats.vtxs_rescheduled++;
2362 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2369 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2371 struct vtnet_softc *sc;
2372 struct vtnet_txq *txq;
2373 int i, npairs, error;
2376 npairs = sc->vtnet_act_vq_pairs;
2378 /* check if flowid is set */
2379 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2380 i = m->m_pkthdr.flowid % npairs;
2382 i = curcpu % npairs;
2384 txq = &sc->vtnet_txqs[i];
2386 if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2387 error = vtnet_txq_mq_start_locked(txq, m);
2388 VTNET_TXQ_UNLOCK(txq);
2390 error = drbr_enqueue(ifp, txq->vtntx_br, m);
2391 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2398 vtnet_txq_tq_deferred(void *xtxq, int pending)
2400 struct vtnet_softc *sc;
2401 struct vtnet_txq *txq;
2406 VTNET_TXQ_LOCK(txq);
2407 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2408 vtnet_txq_mq_start_locked(txq, NULL);
2409 VTNET_TXQ_UNLOCK(txq);
2412 #endif /* VTNET_LEGACY_TX */
2415 vtnet_txq_start(struct vtnet_txq *txq)
2417 struct vtnet_softc *sc;
2421 ifp = sc->vtnet_ifp;
2423 #ifdef VTNET_LEGACY_TX
2424 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2425 vtnet_start_locked(txq, ifp);
2427 if (!drbr_empty(ifp, txq->vtntx_br))
2428 vtnet_txq_mq_start_locked(txq, NULL);
2433 vtnet_txq_tq_intr(void *xtxq, int pending)
2435 struct vtnet_softc *sc;
2436 struct vtnet_txq *txq;
2441 ifp = sc->vtnet_ifp;
2443 VTNET_TXQ_LOCK(txq);
2445 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2446 VTNET_TXQ_UNLOCK(txq);
2451 vtnet_txq_start(txq);
2453 VTNET_TXQ_UNLOCK(txq);
2457 vtnet_txq_eof(struct vtnet_txq *txq)
2459 struct virtqueue *vq;
2460 struct vtnet_tx_header *txhdr;
2466 VTNET_TXQ_LOCK_ASSERT(txq);
2469 if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2470 virtqueue_disable_intr(vq); // XXX luigi
2471 return 0; // XXX or 1 ?
2473 #endif /* DEV_NETMAP */
2475 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2476 m = txhdr->vth_mbuf;
2479 txq->vtntx_stats.vtxs_opackets++;
2480 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2481 if (m->m_flags & M_MCAST)
2482 txq->vtntx_stats.vtxs_omcasts++;
2485 uma_zfree(vtnet_tx_header_zone, txhdr);
2488 if (virtqueue_empty(vq))
2489 txq->vtntx_watchdog = 0;
2495 vtnet_tx_vq_intr(void *xtxq)
2497 struct vtnet_softc *sc;
2498 struct vtnet_txq *txq;
2503 ifp = sc->vtnet_ifp;
2505 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2507 * Ignore this interrupt. Either this is a spurious interrupt
2508 * or multiqueue without per-VQ MSIX so every queue needs to
2509 * be polled (a brain dead configuration we could try harder
2512 vtnet_txq_disable_intr(txq);
2516 VTNET_TXQ_LOCK(txq);
2518 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2519 VTNET_TXQ_UNLOCK(txq);
2524 vtnet_txq_start(txq);
2526 VTNET_TXQ_UNLOCK(txq);
2530 vtnet_tx_start_all(struct vtnet_softc *sc)
2532 struct vtnet_txq *txq;
2535 VTNET_CORE_LOCK_ASSERT(sc);
2537 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2538 txq = &sc->vtnet_txqs[i];
2540 VTNET_TXQ_LOCK(txq);
2541 vtnet_txq_start(txq);
2542 VTNET_TXQ_UNLOCK(txq);
2546 #ifndef VTNET_LEGACY_TX
2548 vtnet_qflush(struct ifnet *ifp)
2550 struct vtnet_softc *sc;
2551 struct vtnet_txq *txq;
2557 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2558 txq = &sc->vtnet_txqs[i];
2560 VTNET_TXQ_LOCK(txq);
2561 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2563 VTNET_TXQ_UNLOCK(txq);
2571 vtnet_watchdog(struct vtnet_txq *txq)
2575 ifp = txq->vtntx_sc->vtnet_ifp;
2577 VTNET_TXQ_LOCK(txq);
2578 if (txq->vtntx_watchdog == 1) {
2580 * Only drain completed frames if the watchdog is about to
2581 * expire. If any frames were drained, there may be enough
2582 * free descriptors now available to transmit queued frames.
2583 * In that case, the timer will immediately be decremented
2584 * below, but the timeout is generous enough that should not
2587 if (vtnet_txq_eof(txq) != 0)
2588 vtnet_txq_start(txq);
2591 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2592 VTNET_TXQ_UNLOCK(txq);
2595 VTNET_TXQ_UNLOCK(txq);
2597 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
2602 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
2603 struct vtnet_txq_stats *txacc)
2606 bzero(rxacc, sizeof(struct vtnet_rxq_stats));
2607 bzero(txacc, sizeof(struct vtnet_txq_stats));
2609 for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2610 struct vtnet_rxq_stats *rxst;
2611 struct vtnet_txq_stats *txst;
2613 rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
2614 rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
2615 rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
2616 rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
2617 rxacc->vrxs_csum += rxst->vrxs_csum;
2618 rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
2619 rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
2621 txst = &sc->vtnet_txqs[i].vtntx_stats;
2622 txacc->vtxs_opackets += txst->vtxs_opackets;
2623 txacc->vtxs_obytes += txst->vtxs_obytes;
2624 txacc->vtxs_csum += txst->vtxs_csum;
2625 txacc->vtxs_tso += txst->vtxs_tso;
2626 txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
2631 vtnet_get_counter(if_t ifp, ift_counter cnt)
2633 struct vtnet_softc *sc;
2634 struct vtnet_rxq_stats rxaccum;
2635 struct vtnet_txq_stats txaccum;
2637 sc = if_getsoftc(ifp);
2638 vtnet_accum_stats(sc, &rxaccum, &txaccum);
2641 case IFCOUNTER_IPACKETS:
2642 return (rxaccum.vrxs_ipackets);
2643 case IFCOUNTER_IQDROPS:
2644 return (rxaccum.vrxs_iqdrops);
2645 case IFCOUNTER_IERRORS:
2646 return (rxaccum.vrxs_ierrors);
2647 case IFCOUNTER_OPACKETS:
2648 return (txaccum.vtxs_opackets);
2649 #ifndef VTNET_LEGACY_TX
2650 case IFCOUNTER_OBYTES:
2651 return (txaccum.vtxs_obytes);
2652 case IFCOUNTER_OMCASTS:
2653 return (txaccum.vtxs_omcasts);
2656 return (if_get_counter_default(ifp, cnt));
2661 vtnet_tick(void *xsc)
2663 struct vtnet_softc *sc;
2668 ifp = sc->vtnet_ifp;
2671 VTNET_CORE_LOCK_ASSERT(sc);
2673 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2674 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2676 if (timedout != 0) {
2677 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2678 vtnet_init_locked(sc);
2680 callout_schedule(&sc->vtnet_tick_ch, hz);
2684 vtnet_start_taskqueues(struct vtnet_softc *sc)
2687 struct vtnet_rxq *rxq;
2688 struct vtnet_txq *txq;
2691 dev = sc->vtnet_dev;
2694 * Errors here are very difficult to recover from - we cannot
2695 * easily fail because, if this is during boot, we will hang
2696 * when freeing any successfully started taskqueues because
2697 * the scheduler isn't up yet.
2699 * Most drivers just ignore the return value - it only fails
2700 * with ENOMEM so an error is not likely.
2702 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2703 rxq = &sc->vtnet_rxqs[i];
2704 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2705 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2707 device_printf(dev, "failed to start rx taskq %d\n",
2711 txq = &sc->vtnet_txqs[i];
2712 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2713 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2715 device_printf(dev, "failed to start tx taskq %d\n",
2722 vtnet_free_taskqueues(struct vtnet_softc *sc)
2724 struct vtnet_rxq *rxq;
2725 struct vtnet_txq *txq;
2728 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2729 rxq = &sc->vtnet_rxqs[i];
2730 if (rxq->vtnrx_tq != NULL) {
2731 taskqueue_free(rxq->vtnrx_tq);
2732 rxq->vtnrx_vq = NULL;
2735 txq = &sc->vtnet_txqs[i];
2736 if (txq->vtntx_tq != NULL) {
2737 taskqueue_free(txq->vtntx_tq);
2738 txq->vtntx_tq = NULL;
2744 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2746 struct vtnet_rxq *rxq;
2747 struct vtnet_txq *txq;
2750 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2751 rxq = &sc->vtnet_rxqs[i];
2752 if (rxq->vtnrx_tq != NULL)
2753 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2755 txq = &sc->vtnet_txqs[i];
2756 if (txq->vtntx_tq != NULL) {
2757 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2758 #ifndef VTNET_LEGACY_TX
2759 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2766 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2768 struct vtnet_rxq *rxq;
2769 struct vtnet_txq *txq;
2773 if (nm_native_on(NA(sc->vtnet_ifp)))
2775 #endif /* DEV_NETMAP */
2777 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2778 rxq = &sc->vtnet_rxqs[i];
2779 vtnet_rxq_free_mbufs(rxq);
2781 txq = &sc->vtnet_txqs[i];
2782 vtnet_txq_free_mbufs(txq);
2787 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2789 struct vtnet_rxq *rxq;
2790 struct vtnet_txq *txq;
2794 * Lock and unlock the per-queue mutex so we known the stop
2795 * state is visible. Doing only the active queues should be
2796 * sufficient, but it does not cost much extra to do all the
2797 * queues. Note we hold the core mutex here too.
2799 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2800 rxq = &sc->vtnet_rxqs[i];
2801 VTNET_RXQ_LOCK(rxq);
2802 VTNET_RXQ_UNLOCK(rxq);
2804 txq = &sc->vtnet_txqs[i];
2805 VTNET_TXQ_LOCK(txq);
2806 VTNET_TXQ_UNLOCK(txq);
2811 vtnet_stop(struct vtnet_softc *sc)
2816 dev = sc->vtnet_dev;
2817 ifp = sc->vtnet_ifp;
2819 VTNET_CORE_LOCK_ASSERT(sc);
2821 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2822 sc->vtnet_link_active = 0;
2823 callout_stop(&sc->vtnet_tick_ch);
2825 /* Only advisory. */
2826 vtnet_disable_interrupts(sc);
2829 * Stop the host adapter. This resets it to the pre-initialized
2830 * state. It will not generate any interrupts until after it is
2834 vtnet_stop_rendezvous(sc);
2836 /* Free any mbufs left in the virtqueues. */
2837 vtnet_drain_rxtx_queues(sc);
2841 vtnet_virtio_reinit(struct vtnet_softc *sc)
2848 dev = sc->vtnet_dev;
2849 ifp = sc->vtnet_ifp;
2850 features = sc->vtnet_features;
2854 mask |= IFCAP_RXCSUM;
2857 mask |= IFCAP_RXCSUM_IPV6;
2861 * Re-negotiate with the host, removing any disabled receive
2862 * features. Transmit features are disabled only on our side
2863 * via if_capenable and if_hwassist.
2866 if (ifp->if_capabilities & mask) {
2868 * We require both IPv4 and IPv6 offloading to be enabled
2869 * in order to negotiated it: VirtIO does not distinguish
2872 if ((ifp->if_capenable & mask) != mask)
2873 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2876 if (ifp->if_capabilities & IFCAP_LRO) {
2877 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2878 features &= ~VTNET_LRO_FEATURES;
2881 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2882 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2883 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2886 error = virtio_reinit(dev, features);
2888 device_printf(dev, "virtio reinit error %d\n", error);
2894 vtnet_init_rx_filters(struct vtnet_softc *sc)
2898 ifp = sc->vtnet_ifp;
2900 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2901 /* Restore promiscuous and all-multicast modes. */
2902 vtnet_rx_filter(sc);
2903 /* Restore filtered MAC addresses. */
2904 vtnet_rx_filter_mac(sc);
2907 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2908 vtnet_rx_filter_vlan(sc);
2912 vtnet_init_rx_queues(struct vtnet_softc *sc)
2915 struct vtnet_rxq *rxq;
2916 int i, clsize, error;
2918 dev = sc->vtnet_dev;
2921 * Use the new cluster size if one has been set (via a MTU
2922 * change). Otherwise, use the standard 2K clusters.
2924 * BMV: It might make sense to use page sized clusters as
2925 * the default (depending on the features negotiated).
2927 if (sc->vtnet_rx_new_clsize != 0) {
2928 clsize = sc->vtnet_rx_new_clsize;
2929 sc->vtnet_rx_new_clsize = 0;
2933 sc->vtnet_rx_clsize = clsize;
2934 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
2936 KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2937 sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2938 ("%s: too many rx mbufs %d for %d segments", __func__,
2939 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2942 if (vtnet_netmap_init_rx_buffers(sc))
2944 #endif /* DEV_NETMAP */
2946 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2947 rxq = &sc->vtnet_rxqs[i];
2949 /* Hold the lock to satisfy asserts. */
2950 VTNET_RXQ_LOCK(rxq);
2951 error = vtnet_rxq_populate(rxq);
2952 VTNET_RXQ_UNLOCK(rxq);
2956 "cannot allocate mbufs for Rx queue %d\n", i);
2965 vtnet_init_tx_queues(struct vtnet_softc *sc)
2967 struct vtnet_txq *txq;
2970 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2971 txq = &sc->vtnet_txqs[i];
2972 txq->vtntx_watchdog = 0;
2979 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
2983 error = vtnet_init_rx_queues(sc);
2987 error = vtnet_init_tx_queues(sc);
2995 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
3000 dev = sc->vtnet_dev;
3002 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3003 sc->vtnet_act_vq_pairs = 1;
3007 npairs = sc->vtnet_requested_vq_pairs;
3009 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3011 "cannot set active queue pairs to %d\n", npairs);
3015 sc->vtnet_act_vq_pairs = npairs;
3019 vtnet_reinit(struct vtnet_softc *sc)
3024 ifp = sc->vtnet_ifp;
3026 /* Use the current MAC address. */
3027 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3028 vtnet_set_hwaddr(sc);
3030 vtnet_set_active_vq_pairs(sc);
3032 ifp->if_hwassist = 0;
3033 if (ifp->if_capenable & IFCAP_TXCSUM)
3034 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3035 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3036 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
3037 if (ifp->if_capenable & IFCAP_TSO4)
3038 ifp->if_hwassist |= CSUM_IP_TSO;
3039 if (ifp->if_capenable & IFCAP_TSO6)
3040 ifp->if_hwassist |= CSUM_IP6_TSO;
3042 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3043 vtnet_init_rx_filters(sc);
3045 error = vtnet_init_rxtx_queues(sc);
3049 vtnet_enable_interrupts(sc);
3050 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3056 vtnet_init_locked(struct vtnet_softc *sc)
3061 dev = sc->vtnet_dev;
3062 ifp = sc->vtnet_ifp;
3064 VTNET_CORE_LOCK_ASSERT(sc);
3066 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3071 /* Reinitialize with the host. */
3072 if (vtnet_virtio_reinit(sc) != 0)
3075 if (vtnet_reinit(sc) != 0)
3078 virtio_reinit_complete(dev);
3080 vtnet_update_link_status(sc);
3081 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3090 vtnet_init(void *xsc)
3092 struct vtnet_softc *sc;
3097 if (!NA(sc->vtnet_ifp)) {
3098 D("try to attach again");
3099 vtnet_netmap_attach(sc);
3101 #endif /* DEV_NETMAP */
3103 VTNET_CORE_LOCK(sc);
3104 vtnet_init_locked(sc);
3105 VTNET_CORE_UNLOCK(sc);
3109 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3111 struct virtqueue *vq;
3113 vq = sc->vtnet_ctrl_vq;
3116 * The control virtqueue is only polled and therefore it should
3119 KASSERT(virtqueue_empty(vq),
3120 ("%s: ctrl vq %p not empty", __func__, vq));
3124 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3125 struct sglist *sg, int readable, int writable)
3127 struct virtqueue *vq;
3129 vq = sc->vtnet_ctrl_vq;
3131 VTNET_CORE_LOCK_ASSERT(sc);
3132 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3133 ("%s: CTRL_VQ feature not negotiated", __func__));
3135 if (!virtqueue_empty(vq))
3137 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3141 * Poll for the response, but the command is likely already
3142 * done when we return from the notify.
3144 virtqueue_notify(vq);
3145 virtqueue_poll(vq, NULL);
3149 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3151 struct virtio_net_ctrl_hdr hdr __aligned(2);
3152 struct sglist_seg segs[3];
3157 hdr.class = VIRTIO_NET_CTRL_MAC;
3158 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3159 ack = VIRTIO_NET_ERR;
3161 sglist_init(&sg, 3, segs);
3163 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3164 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3165 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3166 KASSERT(error == 0 && sg.sg_nseg == 3,
3167 ("%s: error %d adding set MAC msg to sglist", __func__, error));
3169 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3171 return (ack == VIRTIO_NET_OK ? 0 : EIO);
3175 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3177 struct sglist_seg segs[3];
3180 struct virtio_net_ctrl_hdr hdr;
3182 struct virtio_net_ctrl_mq mq;
3188 s.hdr.class = VIRTIO_NET_CTRL_MQ;
3189 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3190 s.mq.virtqueue_pairs = npairs;
3191 s.ack = VIRTIO_NET_ERR;
3193 sglist_init(&sg, 3, segs);
3195 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3196 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3197 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3198 KASSERT(error == 0 && sg.sg_nseg == 3,
3199 ("%s: error %d adding MQ message to sglist", __func__, error));
3201 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3203 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3207 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3209 struct sglist_seg segs[3];
3212 struct virtio_net_ctrl_hdr hdr;
3220 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3221 ("%s: CTRL_RX feature not negotiated", __func__));
3223 s.hdr.class = VIRTIO_NET_CTRL_RX;
3226 s.ack = VIRTIO_NET_ERR;
3228 sglist_init(&sg, 3, segs);
3230 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3231 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3232 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3233 KASSERT(error == 0 && sg.sg_nseg == 3,
3234 ("%s: error %d adding Rx message to sglist", __func__, error));
3236 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3238 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3242 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3245 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3249 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3252 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3256 * The device defaults to promiscuous mode for backwards compatibility.
3257 * Turn it off at attach time if possible.
3260 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3264 ifp = sc->vtnet_ifp;
3266 VTNET_CORE_LOCK(sc);
3267 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3268 ifp->if_flags |= IFF_PROMISC;
3269 } else if (vtnet_set_promisc(sc, 0) != 0) {
3270 ifp->if_flags |= IFF_PROMISC;
3271 device_printf(sc->vtnet_dev,
3272 "cannot disable default promiscuous mode\n");
3274 VTNET_CORE_UNLOCK(sc);
3278 vtnet_rx_filter(struct vtnet_softc *sc)
3283 dev = sc->vtnet_dev;
3284 ifp = sc->vtnet_ifp;
3286 VTNET_CORE_LOCK_ASSERT(sc);
3288 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3289 device_printf(dev, "cannot %s promiscuous mode\n",
3290 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3292 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3293 device_printf(dev, "cannot %s all-multicast mode\n",
3294 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3298 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3300 struct virtio_net_ctrl_hdr hdr __aligned(2);
3301 struct vtnet_mac_filter *filter;
3302 struct sglist_seg segs[4];
3306 struct ifmultiaddr *ifma;
3307 int ucnt, mcnt, promisc, allmulti, error;
3310 ifp = sc->vtnet_ifp;
3311 filter = sc->vtnet_mac_filter;
3317 VTNET_CORE_LOCK_ASSERT(sc);
3318 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3319 ("%s: CTRL_RX feature not negotiated", __func__));
3321 /* Unicast MAC addresses: */
3323 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
3324 if (ifa->ifa_addr->sa_family != AF_LINK)
3326 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3327 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3329 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
3334 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3335 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
3338 if_addr_runlock(ifp);
3341 filter->vmf_unicast.nentries = 0;
3342 if_printf(ifp, "more than %d MAC addresses assigned, "
3343 "falling back to promiscuous mode\n",
3344 VTNET_MAX_MAC_ENTRIES);
3346 filter->vmf_unicast.nentries = ucnt;
3348 /* Multicast MAC addresses: */
3349 if_maddr_rlock(ifp);
3350 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3351 if (ifma->ifma_addr->sa_family != AF_LINK)
3353 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
3358 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3359 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
3362 if_maddr_runlock(ifp);
3364 if (allmulti != 0) {
3365 filter->vmf_multicast.nentries = 0;
3366 if_printf(ifp, "more than %d multicast MAC addresses "
3367 "assigned, falling back to all-multicast mode\n",
3368 VTNET_MAX_MAC_ENTRIES);
3370 filter->vmf_multicast.nentries = mcnt;
3372 if (promisc != 0 && allmulti != 0)
3375 hdr.class = VIRTIO_NET_CTRL_MAC;
3376 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3377 ack = VIRTIO_NET_ERR;
3379 sglist_init(&sg, 4, segs);
3381 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3382 error |= sglist_append(&sg, &filter->vmf_unicast,
3383 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3384 error |= sglist_append(&sg, &filter->vmf_multicast,
3385 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3386 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3387 KASSERT(error == 0 && sg.sg_nseg == 4,
3388 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3390 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3392 if (ack != VIRTIO_NET_OK)
3393 if_printf(ifp, "error setting host MAC filter table\n");
3396 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3397 if_printf(ifp, "cannot enable promiscuous mode\n");
3398 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3399 if_printf(ifp, "cannot enable all-multicast mode\n");
3403 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3405 struct sglist_seg segs[3];
3408 struct virtio_net_ctrl_hdr hdr;
3416 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3417 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3419 s.ack = VIRTIO_NET_ERR;
3421 sglist_init(&sg, 3, segs);
3423 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3424 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3425 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3426 KASSERT(error == 0 && sg.sg_nseg == 3,
3427 ("%s: error %d adding VLAN message to sglist", __func__, error));
3429 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3431 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3435 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3441 VTNET_CORE_LOCK_ASSERT(sc);
3442 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3443 ("%s: VLAN_FILTER feature not negotiated", __func__));
3445 /* Enable the filter for each configured VLAN. */
3446 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3447 w = sc->vtnet_vlan_filter[i];
3449 while ((bit = ffs(w) - 1) != -1) {
3451 tag = sizeof(w) * CHAR_BIT * i + bit;
3453 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3454 device_printf(sc->vtnet_dev,
3455 "cannot enable VLAN %d filter\n", tag);
3462 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3467 ifp = sc->vtnet_ifp;
3468 idx = (tag >> 5) & 0x7F;
3471 if (tag == 0 || tag > 4095)
3474 VTNET_CORE_LOCK(sc);
3477 sc->vtnet_vlan_filter[idx] |= (1 << bit);
3479 sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3481 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3482 ifp->if_drv_flags & IFF_DRV_RUNNING &&
3483 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3484 device_printf(sc->vtnet_dev,
3485 "cannot %s VLAN %d %s the host filter table\n",
3486 add ? "add" : "remove", tag, add ? "to" : "from");
3489 VTNET_CORE_UNLOCK(sc);
3493 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3496 if (ifp->if_softc != arg)
3499 vtnet_update_vlan_filter(arg, 1, tag);
3503 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3506 if (ifp->if_softc != arg)
3509 vtnet_update_vlan_filter(arg, 0, tag);
3513 vtnet_is_link_up(struct vtnet_softc *sc)
3519 dev = sc->vtnet_dev;
3520 ifp = sc->vtnet_ifp;
3522 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3523 status = VIRTIO_NET_S_LINK_UP;
3525 status = virtio_read_dev_config_2(dev,
3526 offsetof(struct virtio_net_config, status));
3528 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3532 vtnet_update_link_status(struct vtnet_softc *sc)
3537 ifp = sc->vtnet_ifp;
3539 VTNET_CORE_LOCK_ASSERT(sc);
3540 link = vtnet_is_link_up(sc);
3542 /* Notify if the link status has changed. */
3543 if (link != 0 && sc->vtnet_link_active == 0) {
3544 sc->vtnet_link_active = 1;
3545 if_link_state_change(ifp, LINK_STATE_UP);
3546 } else if (link == 0 && sc->vtnet_link_active != 0) {
3547 sc->vtnet_link_active = 0;
3548 if_link_state_change(ifp, LINK_STATE_DOWN);
3553 vtnet_ifmedia_upd(struct ifnet *ifp)
3555 struct vtnet_softc *sc;
3556 struct ifmedia *ifm;
3559 ifm = &sc->vtnet_media;
3561 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3568 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3570 struct vtnet_softc *sc;
3574 ifmr->ifm_status = IFM_AVALID;
3575 ifmr->ifm_active = IFM_ETHER;
3577 VTNET_CORE_LOCK(sc);
3578 if (vtnet_is_link_up(sc) != 0) {
3579 ifmr->ifm_status |= IFM_ACTIVE;
3580 ifmr->ifm_active |= VTNET_MEDIATYPE;
3582 ifmr->ifm_active |= IFM_NONE;
3583 VTNET_CORE_UNLOCK(sc);
3587 vtnet_set_hwaddr(struct vtnet_softc *sc)
3592 dev = sc->vtnet_dev;
3594 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3595 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3596 device_printf(dev, "unable to set MAC address\n");
3597 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3598 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3599 virtio_write_dev_config_1(dev,
3600 offsetof(struct virtio_net_config, mac) + i,
3601 sc->vtnet_hwaddr[i]);
3607 vtnet_get_hwaddr(struct vtnet_softc *sc)
3612 dev = sc->vtnet_dev;
3614 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3616 * Generate a random locally administered unicast address.
3618 * It would be nice to generate the same MAC address across
3619 * reboots, but it seems all the hosts currently available
3620 * support the MAC feature, so this isn't too important.
3622 sc->vtnet_hwaddr[0] = 0xB2;
3623 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3624 vtnet_set_hwaddr(sc);
3628 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3629 sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3630 offsetof(struct virtio_net_config, mac) + i);
3635 vtnet_vlan_tag_remove(struct mbuf *m)
3637 struct ether_vlan_header *evh;
3639 evh = mtod(m, struct ether_vlan_header *);
3640 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3641 m->m_flags |= M_VLANTAG;
3643 /* Strip the 802.1Q header. */
3644 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3645 ETHER_HDR_LEN - ETHER_TYPE_LEN);
3646 m_adj(m, ETHER_VLAN_ENCAP_LEN);
3650 vtnet_set_rx_process_limit(struct vtnet_softc *sc)
3654 limit = vtnet_tunable_int(sc, "rx_process_limit",
3655 vtnet_rx_process_limit);
3658 sc->vtnet_rx_process_limit = limit;
3662 vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3666 size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3669 * The Tx interrupt is disabled until the queue free count falls
3670 * below our threshold. Completed frames are drained from the Tx
3671 * virtqueue before transmitting new frames and in the watchdog
3672 * callout, so the frequency of Tx interrupts is greatly reduced,
3673 * at the cost of not freeing mbufs as quickly as they otherwise
3676 * N.B. We assume all the Tx queues are the same size.
3681 * Without indirect descriptors, leave enough room for the most
3682 * segments we handle.
3684 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3685 thresh < sc->vtnet_tx_nsegs)
3686 thresh = sc->vtnet_tx_nsegs;
3688 sc->vtnet_tx_intr_thresh = thresh;
3692 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3693 struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3695 struct sysctl_oid *node;
3696 struct sysctl_oid_list *list;
3697 struct vtnet_rxq_stats *stats;
3700 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3701 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3702 CTLFLAG_RD, NULL, "Receive Queue");
3703 list = SYSCTL_CHILDREN(node);
3705 stats = &rxq->vtnrx_stats;
3707 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3708 &stats->vrxs_ipackets, "Receive packets");
3709 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3710 &stats->vrxs_ibytes, "Receive bytes");
3711 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3712 &stats->vrxs_iqdrops, "Receive drops");
3713 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3714 &stats->vrxs_ierrors, "Receive errors");
3715 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3716 &stats->vrxs_csum, "Receive checksum offloaded");
3717 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3718 &stats->vrxs_csum_failed, "Receive checksum offload failed");
3719 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3720 &stats->vrxs_rescheduled,
3721 "Receive interrupt handler rescheduled");
3725 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3726 struct sysctl_oid_list *child, struct vtnet_txq *txq)
3728 struct sysctl_oid *node;
3729 struct sysctl_oid_list *list;
3730 struct vtnet_txq_stats *stats;
3733 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3734 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3735 CTLFLAG_RD, NULL, "Transmit Queue");
3736 list = SYSCTL_CHILDREN(node);
3738 stats = &txq->vtntx_stats;
3740 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3741 &stats->vtxs_opackets, "Transmit packets");
3742 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3743 &stats->vtxs_obytes, "Transmit bytes");
3744 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3745 &stats->vtxs_omcasts, "Transmit multicasts");
3746 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3747 &stats->vtxs_csum, "Transmit checksum offloaded");
3748 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3749 &stats->vtxs_tso, "Transmit segmentation offloaded");
3750 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3751 &stats->vtxs_rescheduled,
3752 "Transmit interrupt handler rescheduled");
3756 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3759 struct sysctl_ctx_list *ctx;
3760 struct sysctl_oid *tree;
3761 struct sysctl_oid_list *child;
3764 dev = sc->vtnet_dev;
3765 ctx = device_get_sysctl_ctx(dev);
3766 tree = device_get_sysctl_tree(dev);
3767 child = SYSCTL_CHILDREN(tree);
3769 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3770 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3771 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3776 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3777 struct sysctl_oid_list *child, struct vtnet_softc *sc)
3779 struct vtnet_statistics *stats;
3780 struct vtnet_rxq_stats rxaccum;
3781 struct vtnet_txq_stats txaccum;
3783 vtnet_accum_stats(sc, &rxaccum, &txaccum);
3785 stats = &sc->vtnet_stats;
3786 stats->rx_csum_offloaded = rxaccum.vrxs_csum;
3787 stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
3788 stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
3789 stats->tx_csum_offloaded = txaccum.vtxs_csum;
3790 stats->tx_tso_offloaded = txaccum.vtxs_tso;
3791 stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
3793 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3794 CTLFLAG_RD, &stats->mbuf_alloc_failed,
3795 "Mbuf cluster allocation failures");
3797 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3798 CTLFLAG_RD, &stats->rx_frame_too_large,
3799 "Received frame larger than the mbuf chain");
3800 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3801 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3802 "Enqueuing the replacement receive mbuf failed");
3803 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3804 CTLFLAG_RD, &stats->rx_mergeable_failed,
3805 "Mergeable buffers receive failures");
3806 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3807 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3808 "Received checksum offloaded buffer with unsupported "
3810 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3811 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3812 "Received checksum offloaded buffer with incorrect IP protocol");
3813 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3814 CTLFLAG_RD, &stats->rx_csum_bad_offset,
3815 "Received checksum offloaded buffer with incorrect offset");
3816 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3817 CTLFLAG_RD, &stats->rx_csum_bad_proto,
3818 "Received checksum offloaded buffer with incorrect protocol");
3819 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3820 CTLFLAG_RD, &stats->rx_csum_failed,
3821 "Received buffer checksum offload failed");
3822 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3823 CTLFLAG_RD, &stats->rx_csum_offloaded,
3824 "Received buffer checksum offload succeeded");
3825 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3826 CTLFLAG_RD, &stats->rx_task_rescheduled,
3827 "Times the receive interrupt task rescheduled itself");
3829 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3830 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3831 "Aborted transmit of checksum offloaded buffer with unknown "
3833 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3834 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3835 "Aborted transmit of TSO buffer with unknown Ethernet type");
3836 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3837 CTLFLAG_RD, &stats->tx_tso_not_tcp,
3838 "Aborted transmit of TSO buffer with non TCP protocol");
3839 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3840 CTLFLAG_RD, &stats->tx_defragged,
3841 "Transmit mbufs defragged");
3842 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
3843 CTLFLAG_RD, &stats->tx_defrag_failed,
3844 "Aborted transmit of buffer because defrag failed");
3845 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3846 CTLFLAG_RD, &stats->tx_csum_offloaded,
3847 "Offloaded checksum of transmitted buffer");
3848 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3849 CTLFLAG_RD, &stats->tx_tso_offloaded,
3850 "Segmentation offload of transmitted buffer");
3851 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3852 CTLFLAG_RD, &stats->tx_task_rescheduled,
3853 "Times the transmit interrupt task rescheduled itself");
3857 vtnet_setup_sysctl(struct vtnet_softc *sc)
3860 struct sysctl_ctx_list *ctx;
3861 struct sysctl_oid *tree;
3862 struct sysctl_oid_list *child;
3864 dev = sc->vtnet_dev;
3865 ctx = device_get_sysctl_ctx(dev);
3866 tree = device_get_sysctl_tree(dev);
3867 child = SYSCTL_CHILDREN(tree);
3869 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3870 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3871 "Maximum number of supported virtqueue pairs");
3872 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
3873 CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
3874 "Requested number of virtqueue pairs");
3875 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3876 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3877 "Number of active virtqueue pairs");
3879 vtnet_setup_stat_sysctl(ctx, child, sc);
3883 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3886 return (virtqueue_enable_intr(rxq->vtnrx_vq));
3890 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3893 virtqueue_disable_intr(rxq->vtnrx_vq);
3897 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3899 struct virtqueue *vq;
3903 if (vtnet_txq_below_threshold(txq) != 0)
3904 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
3907 * The free count is above our threshold. Keep the Tx interrupt
3908 * disabled until the queue is fuller.
3914 vtnet_txq_disable_intr(struct vtnet_txq *txq)
3917 virtqueue_disable_intr(txq->vtntx_vq);
3921 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3925 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3926 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
3930 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
3934 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3935 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
3939 vtnet_enable_interrupts(struct vtnet_softc *sc)
3942 vtnet_enable_rx_interrupts(sc);
3943 vtnet_enable_tx_interrupts(sc);
3947 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
3951 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3952 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3956 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
3960 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3961 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3965 vtnet_disable_interrupts(struct vtnet_softc *sc)
3968 vtnet_disable_rx_interrupts(sc);
3969 vtnet_disable_tx_interrupts(sc);
3973 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
3977 snprintf(path, sizeof(path),
3978 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
3979 TUNABLE_INT_FETCH(path, &def);
3986 vtnet_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
3988 struct vtnet_softc *sc;
3990 sc = if_getsoftc(ifp);
3992 VTNET_CORE_LOCK(sc);
3993 *nrxr = sc->vtnet_max_vq_pairs;
3994 *ncl = NETDUMP_MAX_IN_FLIGHT;
3995 *clsize = sc->vtnet_rx_clsize;
3996 VTNET_CORE_UNLOCK(sc);
3999 * We need to allocate from this zone in the transmit path, so ensure
4000 * that we have at least one item per header available.
4001 * XXX add a separate zone like we do for mbufs? otherwise we may alloc
4004 uma_zone_reserve(vtnet_tx_header_zone, NETDUMP_MAX_IN_FLIGHT * 2);
4005 uma_prealloc(vtnet_tx_header_zone, NETDUMP_MAX_IN_FLIGHT * 2);
4009 vtnet_netdump_event(struct ifnet *ifp __unused, enum netdump_ev event __unused)
4014 vtnet_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
4016 struct vtnet_softc *sc;
4017 struct vtnet_txq *txq;
4020 sc = if_getsoftc(ifp);
4021 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4025 txq = &sc->vtnet_txqs[0];
4026 error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
4028 (void)vtnet_txq_notify(txq);
4033 vtnet_netdump_poll(struct ifnet *ifp, int count)
4035 struct vtnet_softc *sc;
4038 sc = if_getsoftc(ifp);
4039 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4043 (void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4044 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4045 (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4048 #endif /* NETDUMP */