2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sockio.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
47 #include <machine/smp.h>
51 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/if_media.h>
57 #include <net/if_vlan_var.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet6/ip6_var.h>
66 #include <netinet/udp.h>
67 #include <netinet/tcp.h>
68 #include <netinet/sctp.h>
70 #include <machine/bus.h>
71 #include <machine/resource.h>
75 #include <dev/virtio/virtio.h>
76 #include <dev/virtio/virtqueue.h>
77 #include <dev/virtio/network/virtio_net.h>
78 #include <dev/virtio/network/if_vtnetvar.h>
80 #include "virtio_if.h"
83 #include "opt_inet6.h"
85 static int vtnet_modevent(module_t, int, void *);
87 static int vtnet_probe(device_t);
88 static int vtnet_attach(device_t);
89 static int vtnet_detach(device_t);
90 static int vtnet_suspend(device_t);
91 static int vtnet_resume(device_t);
92 static int vtnet_shutdown(device_t);
93 static int vtnet_attach_completed(device_t);
94 static int vtnet_config_change(device_t);
96 static void vtnet_negotiate_features(struct vtnet_softc *);
97 static void vtnet_setup_features(struct vtnet_softc *);
98 static int vtnet_init_rxq(struct vtnet_softc *, int);
99 static int vtnet_init_txq(struct vtnet_softc *, int);
100 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
101 static void vtnet_free_rxtx_queues(struct vtnet_softc *);
102 static int vtnet_alloc_rx_filters(struct vtnet_softc *);
103 static void vtnet_free_rx_filters(struct vtnet_softc *);
104 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
105 static int vtnet_setup_interface(struct vtnet_softc *);
106 static int vtnet_change_mtu(struct vtnet_softc *, int);
107 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
109 static int vtnet_rxq_populate(struct vtnet_rxq *);
110 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
112 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
113 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
115 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
116 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
117 static int vtnet_rxq_new_buf(struct vtnet_rxq *);
118 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
119 struct virtio_net_hdr *);
120 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
121 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
122 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
123 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
124 struct virtio_net_hdr *);
125 static int vtnet_rxq_eof(struct vtnet_rxq *);
126 static void vtnet_rx_vq_intr(void *);
127 static void vtnet_rxq_tq_intr(void *, int);
129 static int vtnet_txq_below_threshold(struct vtnet_txq *);
130 static int vtnet_txq_notify(struct vtnet_txq *);
131 static void vtnet_txq_free_mbufs(struct vtnet_txq *);
132 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
133 int *, int *, int *);
134 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
135 int, struct virtio_net_hdr *);
137 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
138 struct virtio_net_hdr *);
139 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
140 struct vtnet_tx_header *);
141 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
142 #ifdef VTNET_LEGACY_TX
143 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
144 static void vtnet_start(struct ifnet *);
146 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
147 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
148 static void vtnet_txq_tq_deferred(void *, int);
150 static void vtnet_txq_start(struct vtnet_txq *);
151 static void vtnet_txq_tq_intr(void *, int);
152 static int vtnet_txq_eof(struct vtnet_txq *);
153 static void vtnet_tx_vq_intr(void *);
154 static void vtnet_tx_start_all(struct vtnet_softc *);
156 #ifndef VTNET_LEGACY_TX
157 static void vtnet_qflush(struct ifnet *);
160 static int vtnet_watchdog(struct vtnet_txq *);
161 static void vtnet_rxq_accum_stats(struct vtnet_rxq *,
162 struct vtnet_rxq_stats *);
163 static void vtnet_txq_accum_stats(struct vtnet_txq *,
164 struct vtnet_txq_stats *);
165 static void vtnet_accumulate_stats(struct vtnet_softc *);
166 static void vtnet_tick(void *);
168 static void vtnet_start_taskqueues(struct vtnet_softc *);
169 static void vtnet_free_taskqueues(struct vtnet_softc *);
170 static void vtnet_drain_taskqueues(struct vtnet_softc *);
172 static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
173 static void vtnet_stop_rendezvous(struct vtnet_softc *);
174 static void vtnet_stop(struct vtnet_softc *);
175 static int vtnet_virtio_reinit(struct vtnet_softc *);
176 static void vtnet_init_rx_filters(struct vtnet_softc *);
177 static int vtnet_init_rx_queues(struct vtnet_softc *);
178 static int vtnet_init_tx_queues(struct vtnet_softc *);
179 static int vtnet_init_rxtx_queues(struct vtnet_softc *);
180 static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
181 static int vtnet_reinit(struct vtnet_softc *);
182 static void vtnet_init_locked(struct vtnet_softc *);
183 static void vtnet_init(void *);
185 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
186 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
187 struct sglist *, int, int);
188 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
189 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
190 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
191 static int vtnet_set_promisc(struct vtnet_softc *, int);
192 static int vtnet_set_allmulti(struct vtnet_softc *, int);
193 static void vtnet_attach_disable_promisc(struct vtnet_softc *);
194 static void vtnet_rx_filter(struct vtnet_softc *);
195 static void vtnet_rx_filter_mac(struct vtnet_softc *);
196 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
197 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
198 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
199 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
200 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
202 static int vtnet_is_link_up(struct vtnet_softc *);
203 static void vtnet_update_link_status(struct vtnet_softc *);
204 static int vtnet_ifmedia_upd(struct ifnet *);
205 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206 static void vtnet_get_hwaddr(struct vtnet_softc *);
207 static void vtnet_set_hwaddr(struct vtnet_softc *);
208 static void vtnet_vlan_tag_remove(struct mbuf *);
209 static void vtnet_set_rx_process_limit(struct vtnet_softc *);
210 static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
212 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
213 struct sysctl_oid_list *, struct vtnet_rxq *);
214 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
215 struct sysctl_oid_list *, struct vtnet_txq *);
216 static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
217 static void vtnet_setup_sysctl(struct vtnet_softc *);
219 static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
220 static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
221 static int vtnet_txq_enable_intr(struct vtnet_txq *);
222 static void vtnet_txq_disable_intr(struct vtnet_txq *);
223 static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
224 static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
225 static void vtnet_enable_interrupts(struct vtnet_softc *);
226 static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
227 static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
228 static void vtnet_disable_interrupts(struct vtnet_softc *);
230 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
233 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
234 static int vtnet_csum_disable = 0;
235 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
236 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
237 &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
238 static int vtnet_tso_disable = 0;
239 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
240 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
241 0, "Disables TCP Segmentation Offload");
242 static int vtnet_lro_disable = 0;
243 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
244 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
245 0, "Disables TCP Large Receive Offload");
246 static int vtnet_mq_disable = 0;
247 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
248 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
249 0, "Disables Multi Queue support");
250 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
251 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
252 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
253 &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
254 static int vtnet_rx_process_limit = 512;
255 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
256 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
257 &vtnet_rx_process_limit, 0,
258 "Limits the number RX segments processed in a single pass");
260 static uma_zone_t vtnet_tx_header_zone;
262 static struct virtio_feature_desc vtnet_feature_desc[] = {
263 { VIRTIO_NET_F_CSUM, "TxChecksum" },
264 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
265 { VIRTIO_NET_F_MAC, "MacAddress" },
266 { VIRTIO_NET_F_GSO, "TxAllGSO" },
267 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
268 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
269 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
270 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
271 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
272 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
273 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
274 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
275 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
276 { VIRTIO_NET_F_STATUS, "Status" },
277 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
278 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
279 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
280 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
281 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
282 { VIRTIO_NET_F_MQ, "Multiqueue" },
283 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
288 static device_method_t vtnet_methods[] = {
289 /* Device methods. */
290 DEVMETHOD(device_probe, vtnet_probe),
291 DEVMETHOD(device_attach, vtnet_attach),
292 DEVMETHOD(device_detach, vtnet_detach),
293 DEVMETHOD(device_suspend, vtnet_suspend),
294 DEVMETHOD(device_resume, vtnet_resume),
295 DEVMETHOD(device_shutdown, vtnet_shutdown),
297 /* VirtIO methods. */
298 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
299 DEVMETHOD(virtio_config_change, vtnet_config_change),
305 #include <dev/netmap/if_vtnet_netmap.h>
306 #endif /* DEV_NETMAP */
308 static driver_t vtnet_driver = {
311 sizeof(struct vtnet_softc)
313 static devclass_t vtnet_devclass;
315 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
317 MODULE_VERSION(vtnet, 1);
318 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
321 vtnet_modevent(module_t mod, int type, void *unused)
329 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
330 sizeof(struct vtnet_tx_header),
331 NULL, NULL, NULL, NULL, 0, 0);
335 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
337 else if (type == MOD_UNLOAD) {
338 uma_zdestroy(vtnet_tx_header_zone);
339 vtnet_tx_header_zone = NULL;
353 vtnet_probe(device_t dev)
356 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
359 device_set_desc(dev, "VirtIO Networking Adapter");
361 return (BUS_PROBE_DEFAULT);
365 vtnet_attach(device_t dev)
367 struct vtnet_softc *sc;
370 sc = device_get_softc(dev);
373 /* Register our feature descriptions. */
374 virtio_set_feature_desc(dev, vtnet_feature_desc);
376 VTNET_CORE_LOCK_INIT(sc);
377 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
379 vtnet_setup_sysctl(sc);
380 vtnet_setup_features(sc);
382 error = vtnet_alloc_rx_filters(sc);
384 device_printf(dev, "cannot allocate Rx filters\n");
388 error = vtnet_alloc_rxtx_queues(sc);
390 device_printf(dev, "cannot allocate queues\n");
394 error = vtnet_alloc_virtqueues(sc);
396 device_printf(dev, "cannot allocate virtqueues\n");
400 error = vtnet_setup_interface(sc);
402 device_printf(dev, "cannot setup interface\n");
406 error = virtio_setup_intr(dev, INTR_TYPE_NET);
408 device_printf(dev, "cannot setup virtqueue interrupts\n");
409 /* BMV: This will crash if during boot! */
410 ether_ifdetach(sc->vtnet_ifp);
415 vtnet_netmap_attach(sc);
416 #endif /* DEV_NETMAP */
418 vtnet_start_taskqueues(sc);
428 vtnet_detach(device_t dev)
430 struct vtnet_softc *sc;
433 sc = device_get_softc(dev);
436 if (device_is_attached(dev)) {
439 VTNET_CORE_UNLOCK(sc);
441 callout_drain(&sc->vtnet_tick_ch);
442 vtnet_drain_taskqueues(sc);
449 #endif /* DEV_NETMAP */
451 vtnet_free_taskqueues(sc);
453 if (sc->vtnet_vlan_attach != NULL) {
454 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
455 sc->vtnet_vlan_attach = NULL;
457 if (sc->vtnet_vlan_detach != NULL) {
458 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
459 sc->vtnet_vlan_detach = NULL;
462 ifmedia_removeall(&sc->vtnet_media);
466 sc->vtnet_ifp = NULL;
469 vtnet_free_rxtx_queues(sc);
470 vtnet_free_rx_filters(sc);
472 if (sc->vtnet_ctrl_vq != NULL)
473 vtnet_free_ctrl_vq(sc);
475 VTNET_CORE_LOCK_DESTROY(sc);
481 vtnet_suspend(device_t dev)
483 struct vtnet_softc *sc;
485 sc = device_get_softc(dev);
489 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
490 VTNET_CORE_UNLOCK(sc);
496 vtnet_resume(device_t dev)
498 struct vtnet_softc *sc;
501 sc = device_get_softc(dev);
505 if (ifp->if_flags & IFF_UP)
506 vtnet_init_locked(sc);
507 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
508 VTNET_CORE_UNLOCK(sc);
514 vtnet_shutdown(device_t dev)
518 * Suspend already does all of what we need to
519 * do here; we just never expect to be resumed.
521 return (vtnet_suspend(dev));
525 vtnet_attach_completed(device_t dev)
528 vtnet_attach_disable_promisc(device_get_softc(dev));
534 vtnet_config_change(device_t dev)
536 struct vtnet_softc *sc;
538 sc = device_get_softc(dev);
541 vtnet_update_link_status(sc);
542 if (sc->vtnet_link_active != 0)
543 vtnet_tx_start_all(sc);
544 VTNET_CORE_UNLOCK(sc);
550 vtnet_negotiate_features(struct vtnet_softc *sc)
553 uint64_t mask, features;
559 * TSO and LRO are only available when their corresponding checksum
560 * offload feature is also negotiated.
562 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
563 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
564 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
566 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
567 mask |= VTNET_TSO_FEATURES;
568 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
569 mask |= VTNET_LRO_FEATURES;
570 #ifndef VTNET_LEGACY_TX
571 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
572 mask |= VIRTIO_NET_F_MQ;
574 mask |= VIRTIO_NET_F_MQ;
577 features = VTNET_FEATURES & ~mask;
578 sc->vtnet_features = virtio_negotiate_features(dev, features);
580 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
581 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
583 * LRO without mergeable buffers requires special care. This
584 * is not ideal because every receive buffer must be large
585 * enough to hold the maximum TCP packet, the Ethernet header,
586 * and the header. This requires up to 34 descriptors with
587 * MCLBYTES clusters. If we do not have indirect descriptors,
588 * LRO is disabled since the virtqueue will not contain very
589 * many receive buffers.
591 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
593 "LRO disabled due to both mergeable buffers and "
594 "indirect descriptors not negotiated\n");
596 features &= ~VTNET_LRO_FEATURES;
598 virtio_negotiate_features(dev, features);
600 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
605 vtnet_setup_features(struct vtnet_softc *sc)
611 vtnet_negotiate_features(sc);
613 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
614 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
615 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
616 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
618 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
619 /* This feature should always be negotiated. */
620 sc->vtnet_flags |= VTNET_FLAG_MAC;
623 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
624 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
625 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
627 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
629 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
630 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
631 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
632 sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
634 sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
636 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
637 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
638 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
639 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
641 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
643 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
644 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
646 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
647 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
648 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
649 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
650 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
651 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
654 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
655 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
656 sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
657 offsetof(struct virtio_net_config, max_virtqueue_pairs));
659 sc->vtnet_max_vq_pairs = 1;
661 if (sc->vtnet_max_vq_pairs > 1) {
663 * Limit the maximum number of queue pairs to the lower of
664 * the number of CPUs and the configured maximum.
665 * The actual number of queues that get used may be less.
669 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
670 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
673 if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
674 max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
676 sc->vtnet_requested_vq_pairs = max;
677 sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
684 vtnet_init_rxq(struct vtnet_softc *sc, int id)
686 struct vtnet_rxq *rxq;
688 rxq = &sc->vtnet_rxqs[id];
690 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
691 device_get_nameunit(sc->vtnet_dev), id);
692 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
697 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
698 if (rxq->vtnrx_sg == NULL)
701 TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
702 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
703 taskqueue_thread_enqueue, &rxq->vtnrx_tq);
705 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
709 vtnet_init_txq(struct vtnet_softc *sc, int id)
711 struct vtnet_txq *txq;
713 txq = &sc->vtnet_txqs[id];
715 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
716 device_get_nameunit(sc->vtnet_dev), id);
717 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
722 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
723 if (txq->vtntx_sg == NULL)
726 #ifndef VTNET_LEGACY_TX
727 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
728 M_NOWAIT, &txq->vtntx_mtx);
729 if (txq->vtntx_br == NULL)
732 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
734 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
735 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
736 taskqueue_thread_enqueue, &txq->vtntx_tq);
737 if (txq->vtntx_tq == NULL)
744 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
746 int i, npairs, error;
748 npairs = sc->vtnet_max_vq_pairs;
750 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
752 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
754 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
757 for (i = 0; i < npairs; i++) {
758 error = vtnet_init_rxq(sc, i);
761 error = vtnet_init_txq(sc, i);
766 vtnet_setup_queue_sysctl(sc);
772 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
775 rxq->vtnrx_sc = NULL;
778 if (rxq->vtnrx_sg != NULL) {
779 sglist_free(rxq->vtnrx_sg);
780 rxq->vtnrx_sg = NULL;
783 if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
784 mtx_destroy(&rxq->vtnrx_mtx);
788 vtnet_destroy_txq(struct vtnet_txq *txq)
791 txq->vtntx_sc = NULL;
794 if (txq->vtntx_sg != NULL) {
795 sglist_free(txq->vtntx_sg);
796 txq->vtntx_sg = NULL;
799 #ifndef VTNET_LEGACY_TX
800 if (txq->vtntx_br != NULL) {
801 buf_ring_free(txq->vtntx_br, M_DEVBUF);
802 txq->vtntx_br = NULL;
806 if (mtx_initialized(&txq->vtntx_mtx) != 0)
807 mtx_destroy(&txq->vtntx_mtx);
811 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
815 if (sc->vtnet_rxqs != NULL) {
816 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
817 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
818 free(sc->vtnet_rxqs, M_DEVBUF);
819 sc->vtnet_rxqs = NULL;
822 if (sc->vtnet_txqs != NULL) {
823 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
824 vtnet_destroy_txq(&sc->vtnet_txqs[i]);
825 free(sc->vtnet_txqs, M_DEVBUF);
826 sc->vtnet_txqs = NULL;
831 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
834 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
835 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
836 M_DEVBUF, M_NOWAIT | M_ZERO);
837 if (sc->vtnet_mac_filter == NULL)
841 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
842 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
843 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
844 if (sc->vtnet_vlan_filter == NULL)
852 vtnet_free_rx_filters(struct vtnet_softc *sc)
855 if (sc->vtnet_mac_filter != NULL) {
856 free(sc->vtnet_mac_filter, M_DEVBUF);
857 sc->vtnet_mac_filter = NULL;
860 if (sc->vtnet_vlan_filter != NULL) {
861 free(sc->vtnet_vlan_filter, M_DEVBUF);
862 sc->vtnet_vlan_filter = NULL;
867 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
870 struct vq_alloc_info *info;
871 struct vtnet_rxq *rxq;
872 struct vtnet_txq *txq;
873 int i, idx, flags, nvqs, error;
878 nvqs = sc->vtnet_max_vq_pairs * 2;
879 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
882 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
886 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
887 rxq = &sc->vtnet_rxqs[i];
888 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
889 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
890 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
892 txq = &sc->vtnet_txqs[i];
893 VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
894 vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
895 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
898 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
899 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
900 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
904 * Enable interrupt binding if this is multiqueue. This only matters
905 * when per-vq MSIX is available.
907 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
910 error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
917 vtnet_setup_interface(struct vtnet_softc *sc)
924 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
926 device_printf(dev, "cannot allocate ifnet structure\n");
930 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
931 if_initbaudrate(ifp, IF_Gbps(10)); /* Approx. */
933 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
934 ifp->if_init = vtnet_init;
935 ifp->if_ioctl = vtnet_ioctl;
937 #ifndef VTNET_LEGACY_TX
938 ifp->if_transmit = vtnet_txq_mq_start;
939 ifp->if_qflush = vtnet_qflush;
941 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
942 ifp->if_start = vtnet_start;
943 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
944 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
945 IFQ_SET_READY(&ifp->if_snd);
948 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
950 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
951 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
953 /* Read (or generate) the MAC address for the adapter. */
954 vtnet_get_hwaddr(sc);
956 ether_ifattach(ifp, sc->vtnet_hwaddr);
958 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
959 ifp->if_capabilities |= IFCAP_LINKSTATE;
961 /* Tell the upper layer(s) we support long frames. */
962 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
963 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
965 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
966 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
968 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
969 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
970 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
972 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
973 ifp->if_capabilities |= IFCAP_TSO4;
974 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
975 ifp->if_capabilities |= IFCAP_TSO6;
976 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
977 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
980 if (ifp->if_capabilities & IFCAP_TSO)
981 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
984 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
985 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
987 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
988 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
989 ifp->if_capabilities |= IFCAP_LRO;
992 if (ifp->if_capabilities & IFCAP_HWCSUM) {
994 * VirtIO does not support VLAN tagging, but we can fake
995 * it by inserting and removing the 802.1Q header during
996 * transmit and receive. We are then able to do checksum
997 * offloading of VLAN frames.
999 ifp->if_capabilities |=
1000 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1003 ifp->if_capenable = ifp->if_capabilities;
1006 * Capabilities after here are not enabled by default.
1009 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1010 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1012 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1013 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1014 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1015 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1018 vtnet_set_rx_process_limit(sc);
1019 vtnet_set_tx_intr_threshold(sc);
1025 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1028 int frame_size, clsize;
1030 ifp = sc->vtnet_ifp;
1032 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1035 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1039 * Based on the new MTU (and hence frame size) determine which
1040 * cluster size is most appropriate for the receive queues.
1042 if (frame_size <= MCLBYTES) {
1044 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1045 /* Avoid going past 9K jumbos. */
1046 if (frame_size > MJUM9BYTES)
1048 clsize = MJUM9BYTES;
1050 clsize = MJUMPAGESIZE;
1052 ifp->if_mtu = new_mtu;
1053 sc->vtnet_rx_new_clsize = clsize;
1055 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1056 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1057 vtnet_init_locked(sc);
1064 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1066 struct vtnet_softc *sc;
1068 int reinit, mask, error;
1071 ifr = (struct ifreq *) data;
1076 if (ifp->if_mtu != ifr->ifr_mtu) {
1077 VTNET_CORE_LOCK(sc);
1078 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1079 VTNET_CORE_UNLOCK(sc);
1084 VTNET_CORE_LOCK(sc);
1085 if ((ifp->if_flags & IFF_UP) == 0) {
1086 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1088 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1089 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1090 (IFF_PROMISC | IFF_ALLMULTI)) {
1091 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1092 vtnet_rx_filter(sc);
1094 ifp->if_flags |= IFF_PROMISC;
1095 if ((ifp->if_flags ^ sc->vtnet_if_flags)
1101 vtnet_init_locked(sc);
1104 sc->vtnet_if_flags = ifp->if_flags;
1105 VTNET_CORE_UNLOCK(sc);
1110 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1112 VTNET_CORE_LOCK(sc);
1113 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1114 vtnet_rx_filter_mac(sc);
1115 VTNET_CORE_UNLOCK(sc);
1120 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1124 VTNET_CORE_LOCK(sc);
1125 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1127 if (mask & IFCAP_TXCSUM)
1128 ifp->if_capenable ^= IFCAP_TXCSUM;
1129 if (mask & IFCAP_TXCSUM_IPV6)
1130 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1131 if (mask & IFCAP_TSO4)
1132 ifp->if_capenable ^= IFCAP_TSO4;
1133 if (mask & IFCAP_TSO6)
1134 ifp->if_capenable ^= IFCAP_TSO6;
1136 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1137 IFCAP_VLAN_HWFILTER)) {
1138 /* These Rx features require us to renegotiate. */
1141 if (mask & IFCAP_RXCSUM)
1142 ifp->if_capenable ^= IFCAP_RXCSUM;
1143 if (mask & IFCAP_RXCSUM_IPV6)
1144 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1145 if (mask & IFCAP_LRO)
1146 ifp->if_capenable ^= IFCAP_LRO;
1147 if (mask & IFCAP_VLAN_HWFILTER)
1148 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1152 if (mask & IFCAP_VLAN_HWTSO)
1153 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1154 if (mask & IFCAP_VLAN_HWTAGGING)
1155 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1157 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1158 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1159 vtnet_init_locked(sc);
1162 VTNET_CORE_UNLOCK(sc);
1163 VLAN_CAPABILITIES(ifp);
1168 error = ether_ioctl(ifp, cmd, data);
1172 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1178 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1180 struct virtqueue *vq;
1186 for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1187 error = vtnet_rxq_new_buf(rxq);
1193 virtqueue_notify(vq);
1195 * EMSGSIZE signifies the virtqueue did not have enough
1196 * entries available to hold the last mbuf. This is not
1199 if (error == EMSGSIZE)
1207 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1209 struct virtqueue *vq;
1216 while ((m = virtqueue_drain(vq, &last)) != NULL)
1219 KASSERT(virtqueue_empty(vq),
1220 ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1223 static struct mbuf *
1224 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1226 struct mbuf *m_head, *m_tail, *m;
1229 clsize = sc->vtnet_rx_clsize;
1231 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1232 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1234 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1238 m_head->m_len = clsize;
1241 /* Allocate the rest of the chain. */
1242 for (i = 1; i < nbufs; i++) {
1243 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1252 if (m_tailp != NULL)
1258 sc->vtnet_stats.mbuf_alloc_failed++;
1265 * Slow path for when LRO without mergeable buffers is negotiated.
1268 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1271 struct vtnet_softc *sc;
1272 struct mbuf *m, *m_prev;
1273 struct mbuf *m_new, *m_tail;
1274 int len, clsize, nreplace, error;
1277 clsize = sc->vtnet_rx_clsize;
1287 * Since these mbuf chains are so large, we avoid allocating an
1288 * entire replacement chain if possible. When the received frame
1289 * did not consume the entire chain, the unused mbufs are moved
1290 * to the replacement chain.
1294 * Something is seriously wrong if we received a frame
1295 * larger than the chain. Drop it.
1298 sc->vtnet_stats.rx_frame_too_large++;
1302 /* We always allocate the same cluster size. */
1303 KASSERT(m->m_len == clsize,
1304 ("%s: mbuf size %d is not the cluster size %d",
1305 __func__, m->m_len, clsize));
1307 m->m_len = MIN(m->m_len, len);
1315 KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1316 ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1317 sc->vtnet_rx_nmbufs));
1319 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1320 if (m_new == NULL) {
1321 m_prev->m_len = clsize;
1326 * Move any unused mbufs from the received chain onto the end
1329 if (m_prev->m_next != NULL) {
1330 m_tail->m_next = m_prev->m_next;
1331 m_prev->m_next = NULL;
1334 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1337 * BAD! We could not enqueue the replacement mbuf chain. We
1338 * must restore the m0 chain to the original state if it was
1339 * modified so we can subsequently discard it.
1341 * NOTE: The replacement is suppose to be an identical copy
1342 * to the one just dequeued so this is an unexpected error.
1344 sc->vtnet_stats.rx_enq_replacement_failed++;
1346 if (m_tail->m_next != NULL) {
1347 m_prev->m_next = m_tail->m_next;
1348 m_tail->m_next = NULL;
1351 m_prev->m_len = clsize;
1359 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1361 struct vtnet_softc *sc;
1367 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1368 ("%s: chained mbuf without LRO_NOMRG", __func__));
1370 if (m->m_next == NULL) {
1371 /* Fast-path for the common case of just one mbuf. */
1375 m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1379 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1382 * The new mbuf is suppose to be an identical
1383 * copy of the one just dequeued so this is an
1387 sc->vtnet_stats.rx_enq_replacement_failed++;
1391 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1397 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1399 struct vtnet_softc *sc;
1401 struct vtnet_rx_header *rxhdr;
1407 mdata = mtod(m, uint8_t *);
1409 VTNET_RXQ_LOCK_ASSERT(rxq);
1410 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1411 ("%s: chained mbuf without LRO_NOMRG", __func__));
1412 KASSERT(m->m_len == sc->vtnet_rx_clsize,
1413 ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1414 sc->vtnet_rx_clsize));
1417 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1418 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1419 rxhdr = (struct vtnet_rx_header *) mdata;
1420 sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1421 offset = sizeof(struct vtnet_rx_header);
1425 sglist_append(sg, mdata + offset, m->m_len - offset);
1426 if (m->m_next != NULL) {
1427 error = sglist_append_mbuf(sg, m->m_next);
1431 error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1437 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1439 struct vtnet_softc *sc;
1445 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1449 error = vtnet_rxq_enqueue_buf(rxq, m);
1457 * Use the checksum offset in the VirtIO header to set the
1458 * correct CSUM_* flags.
1461 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1462 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1464 struct vtnet_softc *sc;
1465 #if defined(INET) || defined(INET6)
1466 int offset = hdr->csum_start + hdr->csum_offset;
1471 /* Only do a basic sanity check on the offset. */
1475 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1480 case ETHERTYPE_IPV6:
1481 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1486 sc->vtnet_stats.rx_csum_bad_ethtype++;
1491 * Use the offset to determine the appropriate CSUM_* flags. This is
1492 * a bit dirty, but we can get by with it since the checksum offsets
1493 * happen to be different. We assume the host host does not do IPv4
1494 * header checksum offloading.
1496 switch (hdr->csum_offset) {
1497 case offsetof(struct udphdr, uh_sum):
1498 case offsetof(struct tcphdr, th_sum):
1499 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1500 m->m_pkthdr.csum_data = 0xFFFF;
1502 case offsetof(struct sctphdr, checksum):
1503 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1506 sc->vtnet_stats.rx_csum_bad_offset++;
1514 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1515 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1517 struct vtnet_softc *sc;
1524 case ETHERTYPE_IP: {
1526 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1528 ip = (struct ip *)(m->m_data + ip_start);
1530 offset = ip_start + (ip->ip_hl << 2);
1535 case ETHERTYPE_IPV6:
1536 if (__predict_false(m->m_len < ip_start +
1537 sizeof(struct ip6_hdr)))
1539 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1540 if (__predict_false(offset < 0))
1545 sc->vtnet_stats.rx_csum_bad_ethtype++;
1551 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1553 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1554 m->m_pkthdr.csum_data = 0xFFFF;
1557 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1559 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1560 m->m_pkthdr.csum_data = 0xFFFF;
1563 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1565 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1569 * For the remaining protocols, FreeBSD does not support
1570 * checksum offloading, so the checksum will be recomputed.
1573 if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1574 "protocol eth_type=%#x proto=%d csum_start=%d "
1575 "csum_offset=%d\n", __func__, eth_type, proto,
1576 hdr->csum_start, hdr->csum_offset);
1585 * Set the appropriate CSUM_* flags. Unfortunately, the information
1586 * provided is not directly useful to us. The VirtIO header gives the
1587 * offset of the checksum, which is all Linux needs, but this is not
1588 * how FreeBSD does things. We are forced to peek inside the packet
1591 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1592 * could accept the offsets and let the stack figure it out.
1595 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1596 struct virtio_net_hdr *hdr)
1598 struct ether_header *eh;
1599 struct ether_vlan_header *evh;
1603 eh = mtod(m, struct ether_header *);
1604 eth_type = ntohs(eh->ether_type);
1605 if (eth_type == ETHERTYPE_VLAN) {
1606 /* BMV: We should handle nested VLAN tags too. */
1607 evh = mtod(m, struct ether_vlan_header *);
1608 eth_type = ntohs(evh->evl_proto);
1609 offset = sizeof(struct ether_vlan_header);
1611 offset = sizeof(struct ether_header);
1613 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1614 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1616 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1622 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1626 while (--nbufs > 0) {
1627 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1630 vtnet_rxq_discard_buf(rxq, m);
1635 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1640 * Requeue the discarded mbuf. This should always be successful
1641 * since it was just dequeued.
1643 error = vtnet_rxq_enqueue_buf(rxq, m);
1645 ("%s: cannot requeue discarded mbuf %d", __func__, error));
1649 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1651 struct vtnet_softc *sc;
1653 struct virtqueue *vq;
1654 struct mbuf *m, *m_tail;
1659 ifp = sc->vtnet_ifp;
1662 while (--nbufs > 0) {
1663 m = virtqueue_dequeue(vq, &len);
1665 rxq->vtnrx_stats.vrxs_ierrors++;
1669 if (vtnet_rxq_new_buf(rxq) != 0) {
1670 rxq->vtnrx_stats.vrxs_iqdrops++;
1671 vtnet_rxq_discard_buf(rxq, m);
1673 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1681 m->m_flags &= ~M_PKTHDR;
1683 m_head->m_pkthdr.len += len;
1691 sc->vtnet_stats.rx_mergeable_failed++;
1698 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1699 struct virtio_net_hdr *hdr)
1701 struct vtnet_softc *sc;
1703 struct ether_header *eh;
1706 ifp = sc->vtnet_ifp;
1708 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1709 eh = mtod(m, struct ether_header *);
1710 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1711 vtnet_vlan_tag_remove(m);
1713 * With the 802.1Q header removed, update the
1714 * checksum starting location accordingly.
1716 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1717 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1721 m->m_pkthdr.flowid = rxq->vtnrx_id;
1722 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
1725 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1726 * distinction that Linux does. Need to reevaluate if performing
1727 * offloading for the NEEDS_CSUM case is really appropriate.
1729 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1730 VIRTIO_NET_HDR_F_DATA_VALID)) {
1731 if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1732 rxq->vtnrx_stats.vrxs_csum++;
1734 rxq->vtnrx_stats.vrxs_csum_failed++;
1737 rxq->vtnrx_stats.vrxs_ipackets++;
1738 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1740 VTNET_RXQ_UNLOCK(rxq);
1741 (*ifp->if_input)(ifp, m);
1742 VTNET_RXQ_LOCK(rxq);
1746 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1748 struct virtio_net_hdr lhdr, *hdr;
1749 struct vtnet_softc *sc;
1751 struct virtqueue *vq;
1753 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1754 int len, deq, nbufs, adjsz, count;
1758 ifp = sc->vtnet_ifp;
1761 count = sc->vtnet_rx_process_limit;
1763 VTNET_RXQ_LOCK_ASSERT(rxq);
1766 if (netmap_rx_irq(ifp, 0, &deq)) {
1769 #endif /* DEV_NETMAP */
1771 while (count-- > 0) {
1772 m = virtqueue_dequeue(vq, &len);
1777 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1778 rxq->vtnrx_stats.vrxs_ierrors++;
1779 vtnet_rxq_discard_buf(rxq, m);
1783 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1785 adjsz = sizeof(struct vtnet_rx_header);
1787 * Account for our pad inserted between the header
1788 * and the actual start of the frame.
1790 len += VTNET_RX_HEADER_PAD;
1792 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1793 nbufs = mhdr->num_buffers;
1794 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1797 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1798 rxq->vtnrx_stats.vrxs_iqdrops++;
1799 vtnet_rxq_discard_buf(rxq, m);
1801 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1805 m->m_pkthdr.len = len;
1806 m->m_pkthdr.rcvif = ifp;
1807 m->m_pkthdr.csum_flags = 0;
1810 /* Dequeue the rest of chain. */
1811 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1816 * Save copy of header before we strip it. For both mergeable
1817 * and non-mergeable, the header is at the beginning of the
1818 * mbuf data. We no longer need num_buffers, so always use a
1821 * BMV: Is this memcpy() expensive? We know the mbuf data is
1822 * still valid even after the m_adj().
1824 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1827 vtnet_rxq_input(rxq, m, hdr);
1829 /* Must recheck after dropping the Rx lock. */
1830 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1835 virtqueue_notify(vq);
1837 return (count > 0 ? 0 : EAGAIN);
1841 vtnet_rx_vq_intr(void *xrxq)
1843 struct vtnet_softc *sc;
1844 struct vtnet_rxq *rxq;
1850 ifp = sc->vtnet_ifp;
1853 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1855 * Ignore this interrupt. Either this is a spurious interrupt
1856 * or multiqueue without per-VQ MSIX so every queue needs to
1857 * be polled (a brain dead configuration we could try harder
1860 vtnet_rxq_disable_intr(rxq);
1864 VTNET_RXQ_LOCK(rxq);
1867 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1868 VTNET_RXQ_UNLOCK(rxq);
1872 more = vtnet_rxq_eof(rxq);
1873 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1875 vtnet_rxq_disable_intr(rxq);
1877 * This is an occasional condition or race (when !more),
1878 * so retry a few times before scheduling the taskqueue.
1880 if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1883 VTNET_RXQ_UNLOCK(rxq);
1884 rxq->vtnrx_stats.vrxs_rescheduled++;
1885 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1887 VTNET_RXQ_UNLOCK(rxq);
1891 vtnet_rxq_tq_intr(void *xrxq, int pending)
1893 struct vtnet_softc *sc;
1894 struct vtnet_rxq *rxq;
1900 ifp = sc->vtnet_ifp;
1902 VTNET_RXQ_LOCK(rxq);
1904 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1905 VTNET_RXQ_UNLOCK(rxq);
1909 more = vtnet_rxq_eof(rxq);
1910 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1912 vtnet_rxq_disable_intr(rxq);
1913 rxq->vtnrx_stats.vrxs_rescheduled++;
1914 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1917 VTNET_RXQ_UNLOCK(rxq);
1921 vtnet_txq_below_threshold(struct vtnet_txq *txq)
1923 struct vtnet_softc *sc;
1924 struct virtqueue *vq;
1929 return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
1933 vtnet_txq_notify(struct vtnet_txq *txq)
1935 struct virtqueue *vq;
1939 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
1940 virtqueue_notify(vq);
1942 if (vtnet_txq_enable_intr(txq) == 0)
1946 * Drain frames that were completed since last checked. If this
1947 * causes the queue to go above the threshold, the caller should
1948 * continue transmitting.
1950 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
1951 virtqueue_disable_intr(vq);
1959 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
1961 struct virtqueue *vq;
1962 struct vtnet_tx_header *txhdr;
1968 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1969 m_freem(txhdr->vth_mbuf);
1970 uma_zfree(vtnet_tx_header_zone, txhdr);
1973 KASSERT(virtqueue_empty(vq),
1974 ("%s: mbufs remaining in tx queue %p", __func__, txq));
1978 * BMV: Much of this can go away once we finally have offsets in
1979 * the mbuf packet header. Bug andre@.
1982 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
1983 int *etype, int *proto, int *start)
1985 struct vtnet_softc *sc;
1986 struct ether_vlan_header *evh;
1991 evh = mtod(m, struct ether_vlan_header *);
1992 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1993 /* BMV: We should handle nested VLAN tags too. */
1994 *etype = ntohs(evh->evl_proto);
1995 offset = sizeof(struct ether_vlan_header);
1997 *etype = ntohs(evh->evl_encap_proto);
1998 offset = sizeof(struct ether_header);
2003 case ETHERTYPE_IP: {
2004 struct ip *ip, iphdr;
2005 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2006 m_copydata(m, offset, sizeof(struct ip),
2010 ip = (struct ip *)(m->m_data + offset);
2012 *start = offset + (ip->ip_hl << 2);
2017 case ETHERTYPE_IPV6:
2019 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2020 /* Assert the network stack sent us a valid packet. */
2021 KASSERT(*start > offset,
2022 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2023 *start, offset, *proto));
2027 sc->vtnet_stats.tx_csum_bad_ethtype++;
2035 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2036 int offset, struct virtio_net_hdr *hdr)
2038 static struct timeval lastecn;
2040 struct vtnet_softc *sc;
2041 struct tcphdr *tcp, tcphdr;
2045 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
2046 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
2049 tcp = (struct tcphdr *)(m->m_data + offset);
2051 hdr->hdr_len = offset + (tcp->th_off << 2);
2052 hdr->gso_size = m->m_pkthdr.tso_segsz;
2053 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2054 VIRTIO_NET_HDR_GSO_TCPV6;
2056 if (tcp->th_flags & TH_CWR) {
2058 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2059 * ECN support is not on a per-interface basis, but globally via
2060 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2062 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2063 if (ppsratecheck(&lastecn, &curecn, 1))
2064 if_printf(sc->vtnet_ifp,
2065 "TSO with ECN not negotiated with host\n");
2068 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2071 txq->vtntx_stats.vtxs_tso++;
2076 static struct mbuf *
2077 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2078 struct virtio_net_hdr *hdr)
2080 struct vtnet_softc *sc;
2081 int flags, etype, csum_start, proto, error;
2084 flags = m->m_pkthdr.csum_flags;
2086 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2090 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2091 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2093 * We could compare the IP protocol vs the CSUM_ flag too,
2094 * but that really should not be necessary.
2096 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2097 hdr->csum_start = csum_start;
2098 hdr->csum_offset = m->m_pkthdr.csum_data;
2099 txq->vtntx_stats.vtxs_csum++;
2102 if (flags & CSUM_TSO) {
2103 if (__predict_false(proto != IPPROTO_TCP)) {
2104 /* Likely failed to correctly parse the mbuf. */
2105 sc->vtnet_stats.tx_tso_not_tcp++;
2109 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2110 ("%s: mbuf %p TSO without checksum offload %#x",
2111 __func__, m, flags));
2113 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2126 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2127 struct vtnet_tx_header *txhdr)
2129 struct vtnet_softc *sc;
2130 struct virtqueue *vq;
2141 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2142 KASSERT(error == 0 && sg->sg_nseg == 1,
2143 ("%s: error %d adding header to sglist", __func__, error));
2145 error = sglist_append_mbuf(sg, m);
2147 m = m_defrag(m, M_NOWAIT);
2152 sc->vtnet_stats.tx_defragged++;
2154 error = sglist_append_mbuf(sg, m);
2159 txhdr->vth_mbuf = m;
2160 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2165 sc->vtnet_stats.tx_defrag_failed++;
2173 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
2175 struct vtnet_tx_header *txhdr;
2176 struct virtio_net_hdr *hdr;
2183 txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
2184 if (txhdr == NULL) {
2191 * Always use the non-mergeable header, regardless if the feature
2192 * was negotiated. For transmit, num_buffers is always zero. The
2193 * vtnet_hdr_size is used to enqueue the correct header size.
2195 hdr = &txhdr->vth_uhdr.hdr;
2197 if (m->m_flags & M_VLANTAG) {
2198 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2199 if ((*m_head = m) == NULL) {
2203 m->m_flags &= ~M_VLANTAG;
2206 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2207 m = vtnet_txq_offload(txq, m, hdr);
2208 if ((*m_head = m) == NULL) {
2214 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2219 uma_zfree(vtnet_tx_header_zone, txhdr);
2224 #ifdef VTNET_LEGACY_TX
2227 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2229 struct vtnet_softc *sc;
2230 struct virtqueue *vq;
2238 VTNET_TXQ_LOCK_ASSERT(txq);
2240 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2241 sc->vtnet_link_active == 0)
2249 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2250 if (virtqueue_full(vq))
2253 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2257 if (vtnet_txq_encap(txq, &m0) != 0) {
2259 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2264 ETHER_BPF_MTAP(ifp, m0);
2267 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2268 if (tries++ < VTNET_NOTIFY_RETRIES)
2271 txq->vtntx_stats.vtxs_rescheduled++;
2272 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2277 vtnet_start(struct ifnet *ifp)
2279 struct vtnet_softc *sc;
2280 struct vtnet_txq *txq;
2283 txq = &sc->vtnet_txqs[0];
2285 VTNET_TXQ_LOCK(txq);
2286 vtnet_start_locked(txq, ifp);
2287 VTNET_TXQ_UNLOCK(txq);
2290 #else /* !VTNET_LEGACY_TX */
2293 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2295 struct vtnet_softc *sc;
2296 struct virtqueue *vq;
2297 struct buf_ring *br;
2299 int enq, tries, error;
2304 ifp = sc->vtnet_ifp;
2308 VTNET_TXQ_LOCK_ASSERT(txq);
2310 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2311 sc->vtnet_link_active == 0) {
2313 error = drbr_enqueue(ifp, br, m);
2318 error = drbr_enqueue(ifp, br, m);
2328 while ((m = drbr_peek(ifp, br)) != NULL) {
2329 if (virtqueue_full(vq)) {
2330 drbr_putback(ifp, br, m);
2334 if (vtnet_txq_encap(txq, &m) != 0) {
2336 drbr_putback(ifp, br, m);
2338 drbr_advance(ifp, br);
2341 drbr_advance(ifp, br);
2344 ETHER_BPF_MTAP(ifp, m);
2347 if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2348 if (tries++ < VTNET_NOTIFY_RETRIES)
2351 txq->vtntx_stats.vtxs_rescheduled++;
2352 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2359 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2361 struct vtnet_softc *sc;
2362 struct vtnet_txq *txq;
2363 int i, npairs, error;
2366 npairs = sc->vtnet_act_vq_pairs;
2368 /* check if flowid is set */
2369 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2370 i = m->m_pkthdr.flowid % npairs;
2372 i = curcpu % npairs;
2374 txq = &sc->vtnet_txqs[i];
2376 if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2377 error = vtnet_txq_mq_start_locked(txq, m);
2378 VTNET_TXQ_UNLOCK(txq);
2380 error = drbr_enqueue(ifp, txq->vtntx_br, m);
2381 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2388 vtnet_txq_tq_deferred(void *xtxq, int pending)
2390 struct vtnet_softc *sc;
2391 struct vtnet_txq *txq;
2396 VTNET_TXQ_LOCK(txq);
2397 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2398 vtnet_txq_mq_start_locked(txq, NULL);
2399 VTNET_TXQ_UNLOCK(txq);
2402 #endif /* VTNET_LEGACY_TX */
2405 vtnet_txq_start(struct vtnet_txq *txq)
2407 struct vtnet_softc *sc;
2411 ifp = sc->vtnet_ifp;
2413 #ifdef VTNET_LEGACY_TX
2414 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2415 vtnet_start_locked(txq, ifp);
2417 if (!drbr_empty(ifp, txq->vtntx_br))
2418 vtnet_txq_mq_start_locked(txq, NULL);
2423 vtnet_txq_tq_intr(void *xtxq, int pending)
2425 struct vtnet_softc *sc;
2426 struct vtnet_txq *txq;
2431 ifp = sc->vtnet_ifp;
2433 VTNET_TXQ_LOCK(txq);
2435 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2436 VTNET_TXQ_UNLOCK(txq);
2441 vtnet_txq_start(txq);
2443 VTNET_TXQ_UNLOCK(txq);
2447 vtnet_txq_eof(struct vtnet_txq *txq)
2449 struct virtqueue *vq;
2450 struct vtnet_tx_header *txhdr;
2456 VTNET_TXQ_LOCK_ASSERT(txq);
2459 if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) {
2460 virtqueue_disable_intr(vq); // XXX luigi
2461 return 0; // XXX or 1 ?
2463 #endif /* DEV_NETMAP */
2465 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2466 m = txhdr->vth_mbuf;
2469 txq->vtntx_stats.vtxs_opackets++;
2470 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2471 if (m->m_flags & M_MCAST)
2472 txq->vtntx_stats.vtxs_omcasts++;
2475 uma_zfree(vtnet_tx_header_zone, txhdr);
2478 if (virtqueue_empty(vq))
2479 txq->vtntx_watchdog = 0;
2485 vtnet_tx_vq_intr(void *xtxq)
2487 struct vtnet_softc *sc;
2488 struct vtnet_txq *txq;
2493 ifp = sc->vtnet_ifp;
2495 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2497 * Ignore this interrupt. Either this is a spurious interrupt
2498 * or multiqueue without per-VQ MSIX so every queue needs to
2499 * be polled (a brain dead configuration we could try harder
2502 vtnet_txq_disable_intr(txq);
2506 VTNET_TXQ_LOCK(txq);
2508 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2509 VTNET_TXQ_UNLOCK(txq);
2514 vtnet_txq_start(txq);
2516 VTNET_TXQ_UNLOCK(txq);
2520 vtnet_tx_start_all(struct vtnet_softc *sc)
2522 struct vtnet_txq *txq;
2525 VTNET_CORE_LOCK_ASSERT(sc);
2527 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2528 txq = &sc->vtnet_txqs[i];
2530 VTNET_TXQ_LOCK(txq);
2531 vtnet_txq_start(txq);
2532 VTNET_TXQ_UNLOCK(txq);
2536 #ifndef VTNET_LEGACY_TX
2538 vtnet_qflush(struct ifnet *ifp)
2540 struct vtnet_softc *sc;
2541 struct vtnet_txq *txq;
2547 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2548 txq = &sc->vtnet_txqs[i];
2550 VTNET_TXQ_LOCK(txq);
2551 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2553 VTNET_TXQ_UNLOCK(txq);
2561 vtnet_watchdog(struct vtnet_txq *txq)
2565 ifp = txq->vtntx_sc->vtnet_ifp;
2567 VTNET_TXQ_LOCK(txq);
2568 if (txq->vtntx_watchdog == 1) {
2570 * Only drain completed frames if the watchdog is about to
2571 * expire. If any frames were drained, there may be enough
2572 * free descriptors now available to transmit queued frames.
2573 * In that case, the timer will immediately be decremented
2574 * below, but the timeout is generous enough that should not
2577 if (vtnet_txq_eof(txq) != 0)
2578 vtnet_txq_start(txq);
2581 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2582 VTNET_TXQ_UNLOCK(txq);
2585 VTNET_TXQ_UNLOCK(txq);
2587 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
2592 vtnet_rxq_accum_stats(struct vtnet_rxq *rxq, struct vtnet_rxq_stats *accum)
2594 struct vtnet_rxq_stats *st;
2596 st = &rxq->vtnrx_stats;
2598 accum->vrxs_ipackets += st->vrxs_ipackets;
2599 accum->vrxs_ibytes += st->vrxs_ibytes;
2600 accum->vrxs_iqdrops += st->vrxs_iqdrops;
2601 accum->vrxs_csum += st->vrxs_csum;
2602 accum->vrxs_csum_failed += st->vrxs_csum_failed;
2603 accum->vrxs_rescheduled += st->vrxs_rescheduled;
2607 vtnet_txq_accum_stats(struct vtnet_txq *txq, struct vtnet_txq_stats *accum)
2609 struct vtnet_txq_stats *st;
2611 st = &txq->vtntx_stats;
2613 accum->vtxs_opackets += st->vtxs_opackets;
2614 accum->vtxs_obytes += st->vtxs_obytes;
2615 accum->vtxs_csum += st->vtxs_csum;
2616 accum->vtxs_tso += st->vtxs_tso;
2617 accum->vtxs_rescheduled += st->vtxs_rescheduled;
2621 vtnet_accumulate_stats(struct vtnet_softc *sc)
2624 struct vtnet_statistics *st;
2625 struct vtnet_rxq_stats rxaccum;
2626 struct vtnet_txq_stats txaccum;
2629 ifp = sc->vtnet_ifp;
2630 st = &sc->vtnet_stats;
2631 bzero(&rxaccum, sizeof(struct vtnet_rxq_stats));
2632 bzero(&txaccum, sizeof(struct vtnet_txq_stats));
2634 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2635 vtnet_rxq_accum_stats(&sc->vtnet_rxqs[i], &rxaccum);
2636 vtnet_txq_accum_stats(&sc->vtnet_txqs[i], &txaccum);
2639 st->rx_csum_offloaded = rxaccum.vrxs_csum;
2640 st->rx_csum_failed = rxaccum.vrxs_csum_failed;
2641 st->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
2642 st->tx_csum_offloaded = txaccum.vtxs_csum;
2643 st->tx_tso_offloaded = txaccum.vtxs_tso;
2644 st->tx_task_rescheduled = txaccum.vtxs_rescheduled;
2647 * With the exception of if_ierrors, these ifnet statistics are
2648 * only updated in the driver, so just set them to our accumulated
2649 * values. if_ierrors is updated in ether_input() for malformed
2650 * frames that we should have already discarded.
2652 ifp->if_ipackets = rxaccum.vrxs_ipackets;
2653 ifp->if_iqdrops = rxaccum.vrxs_iqdrops;
2654 ifp->if_ierrors = rxaccum.vrxs_ierrors;
2655 ifp->if_opackets = txaccum.vtxs_opackets;
2656 #ifndef VTNET_LEGACY_TX
2657 ifp->if_obytes = txaccum.vtxs_obytes;
2658 ifp->if_omcasts = txaccum.vtxs_omcasts;
2663 vtnet_tick(void *xsc)
2665 struct vtnet_softc *sc;
2670 ifp = sc->vtnet_ifp;
2673 VTNET_CORE_LOCK_ASSERT(sc);
2674 vtnet_accumulate_stats(sc);
2676 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2677 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2679 if (timedout != 0) {
2680 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2681 vtnet_init_locked(sc);
2683 callout_schedule(&sc->vtnet_tick_ch, hz);
2687 vtnet_start_taskqueues(struct vtnet_softc *sc)
2690 struct vtnet_rxq *rxq;
2691 struct vtnet_txq *txq;
2694 dev = sc->vtnet_dev;
2697 * Errors here are very difficult to recover from - we cannot
2698 * easily fail because, if this is during boot, we will hang
2699 * when freeing any successfully started taskqueues because
2700 * the scheduler isn't up yet.
2702 * Most drivers just ignore the return value - it only fails
2703 * with ENOMEM so an error is not likely.
2705 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2706 rxq = &sc->vtnet_rxqs[i];
2707 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2708 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2710 device_printf(dev, "failed to start rx taskq %d\n",
2714 txq = &sc->vtnet_txqs[i];
2715 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2716 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2718 device_printf(dev, "failed to start tx taskq %d\n",
2725 vtnet_free_taskqueues(struct vtnet_softc *sc)
2727 struct vtnet_rxq *rxq;
2728 struct vtnet_txq *txq;
2731 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2732 rxq = &sc->vtnet_rxqs[i];
2733 if (rxq->vtnrx_tq != NULL) {
2734 taskqueue_free(rxq->vtnrx_tq);
2735 rxq->vtnrx_vq = NULL;
2738 txq = &sc->vtnet_txqs[i];
2739 if (txq->vtntx_tq != NULL) {
2740 taskqueue_free(txq->vtntx_tq);
2741 txq->vtntx_tq = NULL;
2747 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2749 struct vtnet_rxq *rxq;
2750 struct vtnet_txq *txq;
2753 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2754 rxq = &sc->vtnet_rxqs[i];
2755 if (rxq->vtnrx_tq != NULL)
2756 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2758 txq = &sc->vtnet_txqs[i];
2759 if (txq->vtntx_tq != NULL) {
2760 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2761 #ifndef VTNET_LEGACY_TX
2762 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2769 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2771 struct vtnet_rxq *rxq;
2772 struct vtnet_txq *txq;
2776 if (nm_native_on(NA(sc->vtnet_ifp)))
2778 #endif /* DEV_NETMAP */
2780 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2781 rxq = &sc->vtnet_rxqs[i];
2782 vtnet_rxq_free_mbufs(rxq);
2784 txq = &sc->vtnet_txqs[i];
2785 vtnet_txq_free_mbufs(txq);
2790 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2792 struct vtnet_rxq *rxq;
2793 struct vtnet_txq *txq;
2797 * Lock and unlock the per-queue mutex so we known the stop
2798 * state is visible. Doing only the active queues should be
2799 * sufficient, but it does not cost much extra to do all the
2800 * queues. Note we hold the core mutex here too.
2802 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2803 rxq = &sc->vtnet_rxqs[i];
2804 VTNET_RXQ_LOCK(rxq);
2805 VTNET_RXQ_UNLOCK(rxq);
2807 txq = &sc->vtnet_txqs[i];
2808 VTNET_TXQ_LOCK(txq);
2809 VTNET_TXQ_UNLOCK(txq);
2814 vtnet_stop(struct vtnet_softc *sc)
2819 dev = sc->vtnet_dev;
2820 ifp = sc->vtnet_ifp;
2822 VTNET_CORE_LOCK_ASSERT(sc);
2824 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2825 sc->vtnet_link_active = 0;
2826 callout_stop(&sc->vtnet_tick_ch);
2828 /* Only advisory. */
2829 vtnet_disable_interrupts(sc);
2832 * Stop the host adapter. This resets it to the pre-initialized
2833 * state. It will not generate any interrupts until after it is
2837 vtnet_stop_rendezvous(sc);
2839 /* Free any mbufs left in the virtqueues. */
2840 vtnet_drain_rxtx_queues(sc);
2844 vtnet_virtio_reinit(struct vtnet_softc *sc)
2851 dev = sc->vtnet_dev;
2852 ifp = sc->vtnet_ifp;
2853 features = sc->vtnet_features;
2857 mask |= IFCAP_RXCSUM;
2860 mask |= IFCAP_RXCSUM_IPV6;
2864 * Re-negotiate with the host, removing any disabled receive
2865 * features. Transmit features are disabled only on our side
2866 * via if_capenable and if_hwassist.
2869 if (ifp->if_capabilities & mask) {
2871 * We require both IPv4 and IPv6 offloading to be enabled
2872 * in order to negotiated it: VirtIO does not distinguish
2875 if ((ifp->if_capenable & mask) != mask)
2876 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2879 if (ifp->if_capabilities & IFCAP_LRO) {
2880 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2881 features &= ~VTNET_LRO_FEATURES;
2884 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2885 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2886 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2889 error = virtio_reinit(dev, features);
2891 device_printf(dev, "virtio reinit error %d\n", error);
2897 vtnet_init_rx_filters(struct vtnet_softc *sc)
2901 ifp = sc->vtnet_ifp;
2903 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2904 /* Restore promiscuous and all-multicast modes. */
2905 vtnet_rx_filter(sc);
2906 /* Restore filtered MAC addresses. */
2907 vtnet_rx_filter_mac(sc);
2910 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2911 vtnet_rx_filter_vlan(sc);
2915 vtnet_init_rx_queues(struct vtnet_softc *sc)
2918 struct vtnet_rxq *rxq;
2919 int i, clsize, error;
2921 dev = sc->vtnet_dev;
2924 * Use the new cluster size if one has been set (via a MTU
2925 * change). Otherwise, use the standard 2K clusters.
2927 * BMV: It might make sense to use page sized clusters as
2928 * the default (depending on the features negotiated).
2930 if (sc->vtnet_rx_new_clsize != 0) {
2931 clsize = sc->vtnet_rx_new_clsize;
2932 sc->vtnet_rx_new_clsize = 0;
2936 sc->vtnet_rx_clsize = clsize;
2937 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
2939 KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2940 sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2941 ("%s: too many rx mbufs %d for %d segments", __func__,
2942 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2945 if (vtnet_netmap_init_rx_buffers(sc))
2947 #endif /* DEV_NETMAP */
2949 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2950 rxq = &sc->vtnet_rxqs[i];
2952 /* Hold the lock to satisfy asserts. */
2953 VTNET_RXQ_LOCK(rxq);
2954 error = vtnet_rxq_populate(rxq);
2955 VTNET_RXQ_UNLOCK(rxq);
2959 "cannot allocate mbufs for Rx queue %d\n", i);
2968 vtnet_init_tx_queues(struct vtnet_softc *sc)
2970 struct vtnet_txq *txq;
2973 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2974 txq = &sc->vtnet_txqs[i];
2975 txq->vtntx_watchdog = 0;
2982 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
2986 error = vtnet_init_rx_queues(sc);
2990 error = vtnet_init_tx_queues(sc);
2998 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
3003 dev = sc->vtnet_dev;
3005 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
3006 sc->vtnet_act_vq_pairs = 1;
3010 npairs = sc->vtnet_requested_vq_pairs;
3012 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3014 "cannot set active queue pairs to %d\n", npairs);
3018 sc->vtnet_act_vq_pairs = npairs;
3022 vtnet_reinit(struct vtnet_softc *sc)
3027 ifp = sc->vtnet_ifp;
3029 /* Use the current MAC address. */
3030 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3031 vtnet_set_hwaddr(sc);
3033 vtnet_set_active_vq_pairs(sc);
3035 ifp->if_hwassist = 0;
3036 if (ifp->if_capenable & IFCAP_TXCSUM)
3037 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3038 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3039 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
3040 if (ifp->if_capenable & IFCAP_TSO4)
3041 ifp->if_hwassist |= CSUM_IP_TSO;
3042 if (ifp->if_capenable & IFCAP_TSO6)
3043 ifp->if_hwassist |= CSUM_IP6_TSO;
3045 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3046 vtnet_init_rx_filters(sc);
3048 error = vtnet_init_rxtx_queues(sc);
3052 vtnet_enable_interrupts(sc);
3053 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3059 vtnet_init_locked(struct vtnet_softc *sc)
3064 dev = sc->vtnet_dev;
3065 ifp = sc->vtnet_ifp;
3067 VTNET_CORE_LOCK_ASSERT(sc);
3069 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3074 /* Reinitialize with the host. */
3075 if (vtnet_virtio_reinit(sc) != 0)
3078 if (vtnet_reinit(sc) != 0)
3081 virtio_reinit_complete(dev);
3083 vtnet_update_link_status(sc);
3084 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3093 vtnet_init(void *xsc)
3095 struct vtnet_softc *sc;
3100 if (!NA(sc->vtnet_ifp)) {
3101 D("try to attach again");
3102 vtnet_netmap_attach(sc);
3104 #endif /* DEV_NETMAP */
3106 VTNET_CORE_LOCK(sc);
3107 vtnet_init_locked(sc);
3108 VTNET_CORE_UNLOCK(sc);
3112 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3114 struct virtqueue *vq;
3116 vq = sc->vtnet_ctrl_vq;
3119 * The control virtqueue is only polled and therefore it should
3122 KASSERT(virtqueue_empty(vq),
3123 ("%s: ctrl vq %p not empty", __func__, vq));
3127 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3128 struct sglist *sg, int readable, int writable)
3130 struct virtqueue *vq;
3132 vq = sc->vtnet_ctrl_vq;
3134 VTNET_CORE_LOCK_ASSERT(sc);
3135 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3136 ("%s: CTRL_VQ feature not negotiated", __func__));
3138 if (!virtqueue_empty(vq))
3140 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3144 * Poll for the response, but the command is likely already
3145 * done when we return from the notify.
3147 virtqueue_notify(vq);
3148 virtqueue_poll(vq, NULL);
3152 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3154 struct virtio_net_ctrl_hdr hdr __aligned(2);
3155 struct sglist_seg segs[3];
3160 hdr.class = VIRTIO_NET_CTRL_MAC;
3161 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3162 ack = VIRTIO_NET_ERR;
3164 sglist_init(&sg, 3, segs);
3166 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3167 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3168 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3169 KASSERT(error == 0 && sg.sg_nseg == 3,
3170 ("%s: error %d adding set MAC msg to sglist", __func__, error));
3172 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3174 return (ack == VIRTIO_NET_OK ? 0 : EIO);
3178 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3180 struct sglist_seg segs[3];
3183 struct virtio_net_ctrl_hdr hdr;
3185 struct virtio_net_ctrl_mq mq;
3191 s.hdr.class = VIRTIO_NET_CTRL_MQ;
3192 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3193 s.mq.virtqueue_pairs = npairs;
3194 s.ack = VIRTIO_NET_ERR;
3196 sglist_init(&sg, 3, segs);
3198 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3199 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3200 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3201 KASSERT(error == 0 && sg.sg_nseg == 3,
3202 ("%s: error %d adding MQ message to sglist", __func__, error));
3204 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3206 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3210 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3212 struct sglist_seg segs[3];
3215 struct virtio_net_ctrl_hdr hdr;
3223 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3224 ("%s: CTRL_RX feature not negotiated", __func__));
3226 s.hdr.class = VIRTIO_NET_CTRL_RX;
3229 s.ack = VIRTIO_NET_ERR;
3231 sglist_init(&sg, 3, segs);
3233 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3234 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3235 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3236 KASSERT(error == 0 && sg.sg_nseg == 3,
3237 ("%s: error %d adding Rx message to sglist", __func__, error));
3239 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3241 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3245 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3248 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3252 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3255 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3259 * The device defaults to promiscuous mode for backwards compatibility.
3260 * Turn it off at attach time if possible.
3263 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3267 ifp = sc->vtnet_ifp;
3269 VTNET_CORE_LOCK(sc);
3270 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3271 ifp->if_flags |= IFF_PROMISC;
3272 } else if (vtnet_set_promisc(sc, 0) != 0) {
3273 ifp->if_flags |= IFF_PROMISC;
3274 device_printf(sc->vtnet_dev,
3275 "cannot disable default promiscuous mode\n");
3277 VTNET_CORE_UNLOCK(sc);
3281 vtnet_rx_filter(struct vtnet_softc *sc)
3286 dev = sc->vtnet_dev;
3287 ifp = sc->vtnet_ifp;
3289 VTNET_CORE_LOCK_ASSERT(sc);
3291 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3292 device_printf(dev, "cannot %s promiscuous mode\n",
3293 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3295 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3296 device_printf(dev, "cannot %s all-multicast mode\n",
3297 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3301 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3303 struct virtio_net_ctrl_hdr hdr __aligned(2);
3304 struct vtnet_mac_filter *filter;
3305 struct sglist_seg segs[4];
3309 struct ifmultiaddr *ifma;
3310 int ucnt, mcnt, promisc, allmulti, error;
3313 ifp = sc->vtnet_ifp;
3314 filter = sc->vtnet_mac_filter;
3320 VTNET_CORE_LOCK_ASSERT(sc);
3321 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3322 ("%s: CTRL_RX feature not negotiated", __func__));
3324 /* Unicast MAC addresses: */
3326 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
3327 if (ifa->ifa_addr->sa_family != AF_LINK)
3329 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3330 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3332 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
3337 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3338 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
3341 if_addr_runlock(ifp);
3344 filter->vmf_unicast.nentries = 0;
3345 if_printf(ifp, "more than %d MAC addresses assigned, "
3346 "falling back to promiscuous mode\n",
3347 VTNET_MAX_MAC_ENTRIES);
3349 filter->vmf_unicast.nentries = ucnt;
3351 /* Multicast MAC addresses: */
3352 if_maddr_rlock(ifp);
3353 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3354 if (ifma->ifma_addr->sa_family != AF_LINK)
3356 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
3361 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3362 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
3365 if_maddr_runlock(ifp);
3367 if (allmulti != 0) {
3368 filter->vmf_multicast.nentries = 0;
3369 if_printf(ifp, "more than %d multicast MAC addresses "
3370 "assigned, falling back to all-multicast mode\n",
3371 VTNET_MAX_MAC_ENTRIES);
3373 filter->vmf_multicast.nentries = mcnt;
3375 if (promisc != 0 && allmulti != 0)
3378 hdr.class = VIRTIO_NET_CTRL_MAC;
3379 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3380 ack = VIRTIO_NET_ERR;
3382 sglist_init(&sg, 4, segs);
3384 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3385 error |= sglist_append(&sg, &filter->vmf_unicast,
3386 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3387 error |= sglist_append(&sg, &filter->vmf_multicast,
3388 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3389 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3390 KASSERT(error == 0 && sg.sg_nseg == 4,
3391 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3393 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3395 if (ack != VIRTIO_NET_OK)
3396 if_printf(ifp, "error setting host MAC filter table\n");
3399 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3400 if_printf(ifp, "cannot enable promiscuous mode\n");
3401 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3402 if_printf(ifp, "cannot enable all-multicast mode\n");
3406 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3408 struct sglist_seg segs[3];
3411 struct virtio_net_ctrl_hdr hdr;
3419 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3420 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3422 s.ack = VIRTIO_NET_ERR;
3424 sglist_init(&sg, 3, segs);
3426 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3427 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3428 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3429 KASSERT(error == 0 && sg.sg_nseg == 3,
3430 ("%s: error %d adding VLAN message to sglist", __func__, error));
3432 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3434 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3438 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3444 VTNET_CORE_LOCK_ASSERT(sc);
3445 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3446 ("%s: VLAN_FILTER feature not negotiated", __func__));
3448 /* Enable the filter for each configured VLAN. */
3449 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3450 w = sc->vtnet_vlan_filter[i];
3452 while ((bit = ffs(w) - 1) != -1) {
3454 tag = sizeof(w) * CHAR_BIT * i + bit;
3456 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3457 device_printf(sc->vtnet_dev,
3458 "cannot enable VLAN %d filter\n", tag);
3465 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3470 ifp = sc->vtnet_ifp;
3471 idx = (tag >> 5) & 0x7F;
3474 if (tag == 0 || tag > 4095)
3477 VTNET_CORE_LOCK(sc);
3480 sc->vtnet_vlan_filter[idx] |= (1 << bit);
3482 sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3484 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3485 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3486 device_printf(sc->vtnet_dev,
3487 "cannot %s VLAN %d %s the host filter table\n",
3488 add ? "add" : "remove", tag, add ? "to" : "from");
3491 VTNET_CORE_UNLOCK(sc);
3495 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3498 if (ifp->if_softc != arg)
3501 vtnet_update_vlan_filter(arg, 1, tag);
3505 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3508 if (ifp->if_softc != arg)
3511 vtnet_update_vlan_filter(arg, 0, tag);
3515 vtnet_is_link_up(struct vtnet_softc *sc)
3521 dev = sc->vtnet_dev;
3522 ifp = sc->vtnet_ifp;
3524 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3525 status = VIRTIO_NET_S_LINK_UP;
3527 status = virtio_read_dev_config_2(dev,
3528 offsetof(struct virtio_net_config, status));
3530 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3534 vtnet_update_link_status(struct vtnet_softc *sc)
3539 ifp = sc->vtnet_ifp;
3541 VTNET_CORE_LOCK_ASSERT(sc);
3542 link = vtnet_is_link_up(sc);
3544 /* Notify if the link status has changed. */
3545 if (link != 0 && sc->vtnet_link_active == 0) {
3546 sc->vtnet_link_active = 1;
3547 if_link_state_change(ifp, LINK_STATE_UP);
3548 } else if (link == 0 && sc->vtnet_link_active != 0) {
3549 sc->vtnet_link_active = 0;
3550 if_link_state_change(ifp, LINK_STATE_DOWN);
3555 vtnet_ifmedia_upd(struct ifnet *ifp)
3557 struct vtnet_softc *sc;
3558 struct ifmedia *ifm;
3561 ifm = &sc->vtnet_media;
3563 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3570 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3572 struct vtnet_softc *sc;
3576 ifmr->ifm_status = IFM_AVALID;
3577 ifmr->ifm_active = IFM_ETHER;
3579 VTNET_CORE_LOCK(sc);
3580 if (vtnet_is_link_up(sc) != 0) {
3581 ifmr->ifm_status |= IFM_ACTIVE;
3582 ifmr->ifm_active |= VTNET_MEDIATYPE;
3584 ifmr->ifm_active |= IFM_NONE;
3585 VTNET_CORE_UNLOCK(sc);
3589 vtnet_set_hwaddr(struct vtnet_softc *sc)
3594 dev = sc->vtnet_dev;
3596 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3597 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3598 device_printf(dev, "unable to set MAC address\n");
3599 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3600 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3601 virtio_write_dev_config_1(dev,
3602 offsetof(struct virtio_net_config, mac) + i,
3603 sc->vtnet_hwaddr[i]);
3609 vtnet_get_hwaddr(struct vtnet_softc *sc)
3614 dev = sc->vtnet_dev;
3616 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3618 * Generate a random locally administered unicast address.
3620 * It would be nice to generate the same MAC address across
3621 * reboots, but it seems all the hosts currently available
3622 * support the MAC feature, so this isn't too important.
3624 sc->vtnet_hwaddr[0] = 0xB2;
3625 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3626 vtnet_set_hwaddr(sc);
3630 for (i = 0; i < ETHER_ADDR_LEN; i++) {
3631 sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3632 offsetof(struct virtio_net_config, mac) + i);
3637 vtnet_vlan_tag_remove(struct mbuf *m)
3639 struct ether_vlan_header *evh;
3641 evh = mtod(m, struct ether_vlan_header *);
3642 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3643 m->m_flags |= M_VLANTAG;
3645 /* Strip the 802.1Q header. */
3646 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3647 ETHER_HDR_LEN - ETHER_TYPE_LEN);
3648 m_adj(m, ETHER_VLAN_ENCAP_LEN);
3652 vtnet_set_rx_process_limit(struct vtnet_softc *sc)
3656 limit = vtnet_tunable_int(sc, "rx_process_limit",
3657 vtnet_rx_process_limit);
3660 sc->vtnet_rx_process_limit = limit;
3664 vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
3669 dev = sc->vtnet_dev;
3670 size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
3673 * The Tx interrupt is disabled until the queue free count falls
3674 * below our threshold. Completed frames are drained from the Tx
3675 * virtqueue before transmitting new frames and in the watchdog
3676 * callout, so the frequency of Tx interrupts is greatly reduced,
3677 * at the cost of not freeing mbufs as quickly as they otherwise
3680 * N.B. We assume all the Tx queues are the same size.
3685 * Without indirect descriptors, leave enough room for the most
3686 * segments we handle.
3688 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
3689 thresh < sc->vtnet_tx_nsegs)
3690 thresh = sc->vtnet_tx_nsegs;
3692 sc->vtnet_tx_intr_thresh = thresh;
3696 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3697 struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3699 struct sysctl_oid *node;
3700 struct sysctl_oid_list *list;
3701 struct vtnet_rxq_stats *stats;
3704 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3705 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3706 CTLFLAG_RD, NULL, "Receive Queue");
3707 list = SYSCTL_CHILDREN(node);
3709 stats = &rxq->vtnrx_stats;
3711 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3712 &stats->vrxs_ipackets, "Receive packets");
3713 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3714 &stats->vrxs_ibytes, "Receive bytes");
3715 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3716 &stats->vrxs_iqdrops, "Receive drops");
3717 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3718 &stats->vrxs_ierrors, "Receive errors");
3719 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3720 &stats->vrxs_csum, "Receive checksum offloaded");
3721 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3722 &stats->vrxs_csum_failed, "Receive checksum offload failed");
3723 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3724 &stats->vrxs_rescheduled,
3725 "Receive interrupt handler rescheduled");
3729 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3730 struct sysctl_oid_list *child, struct vtnet_txq *txq)
3732 struct sysctl_oid *node;
3733 struct sysctl_oid_list *list;
3734 struct vtnet_txq_stats *stats;
3737 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3738 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3739 CTLFLAG_RD, NULL, "Transmit Queue");
3740 list = SYSCTL_CHILDREN(node);
3742 stats = &txq->vtntx_stats;
3744 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3745 &stats->vtxs_opackets, "Transmit packets");
3746 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3747 &stats->vtxs_obytes, "Transmit bytes");
3748 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3749 &stats->vtxs_omcasts, "Transmit multicasts");
3750 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3751 &stats->vtxs_csum, "Transmit checksum offloaded");
3752 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3753 &stats->vtxs_tso, "Transmit segmentation offloaded");
3754 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3755 &stats->vtxs_rescheduled,
3756 "Transmit interrupt handler rescheduled");
3760 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3763 struct sysctl_ctx_list *ctx;
3764 struct sysctl_oid *tree;
3765 struct sysctl_oid_list *child;
3768 dev = sc->vtnet_dev;
3769 ctx = device_get_sysctl_ctx(dev);
3770 tree = device_get_sysctl_tree(dev);
3771 child = SYSCTL_CHILDREN(tree);
3773 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3774 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3775 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3780 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3781 struct sysctl_oid_list *child, struct vtnet_softc *sc)
3783 struct vtnet_statistics *stats;
3785 stats = &sc->vtnet_stats;
3787 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3788 CTLFLAG_RD, &stats->mbuf_alloc_failed,
3789 "Mbuf cluster allocation failures");
3791 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3792 CTLFLAG_RD, &stats->rx_frame_too_large,
3793 "Received frame larger than the mbuf chain");
3794 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3795 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3796 "Enqueuing the replacement receive mbuf failed");
3797 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3798 CTLFLAG_RD, &stats->rx_mergeable_failed,
3799 "Mergeable buffers receive failures");
3800 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3801 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3802 "Received checksum offloaded buffer with unsupported "
3804 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3805 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3806 "Received checksum offloaded buffer with incorrect IP protocol");
3807 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3808 CTLFLAG_RD, &stats->rx_csum_bad_offset,
3809 "Received checksum offloaded buffer with incorrect offset");
3810 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3811 CTLFLAG_RD, &stats->rx_csum_bad_proto,
3812 "Received checksum offloaded buffer with incorrect protocol");
3813 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3814 CTLFLAG_RD, &stats->rx_csum_failed,
3815 "Received buffer checksum offload failed");
3816 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3817 CTLFLAG_RD, &stats->rx_csum_offloaded,
3818 "Received buffer checksum offload succeeded");
3819 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3820 CTLFLAG_RD, &stats->rx_task_rescheduled,
3821 "Times the receive interrupt task rescheduled itself");
3823 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3824 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3825 "Aborted transmit of checksum offloaded buffer with unknown "
3827 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3828 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3829 "Aborted transmit of TSO buffer with unknown Ethernet type");
3830 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3831 CTLFLAG_RD, &stats->tx_tso_not_tcp,
3832 "Aborted transmit of TSO buffer with non TCP protocol");
3833 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3834 CTLFLAG_RD, &stats->tx_defragged,
3835 "Transmit mbufs defragged");
3836 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
3837 CTLFLAG_RD, &stats->tx_defrag_failed,
3838 "Aborted transmit of buffer because defrag failed");
3839 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3840 CTLFLAG_RD, &stats->tx_csum_offloaded,
3841 "Offloaded checksum of transmitted buffer");
3842 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3843 CTLFLAG_RD, &stats->tx_tso_offloaded,
3844 "Segmentation offload of transmitted buffer");
3845 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3846 CTLFLAG_RD, &stats->tx_task_rescheduled,
3847 "Times the transmit interrupt task rescheduled itself");
3851 vtnet_setup_sysctl(struct vtnet_softc *sc)
3854 struct sysctl_ctx_list *ctx;
3855 struct sysctl_oid *tree;
3856 struct sysctl_oid_list *child;
3858 dev = sc->vtnet_dev;
3859 ctx = device_get_sysctl_ctx(dev);
3860 tree = device_get_sysctl_tree(dev);
3861 child = SYSCTL_CHILDREN(tree);
3863 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3864 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3865 "Maximum number of supported virtqueue pairs");
3866 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
3867 CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
3868 "Requested number of virtqueue pairs");
3869 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3870 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3871 "Number of active virtqueue pairs");
3873 vtnet_setup_stat_sysctl(ctx, child, sc);
3877 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3880 return (virtqueue_enable_intr(rxq->vtnrx_vq));
3884 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3887 virtqueue_disable_intr(rxq->vtnrx_vq);
3891 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3893 struct virtqueue *vq;
3897 if (vtnet_txq_below_threshold(txq) != 0)
3898 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
3901 * The free count is above our threshold. Keep the Tx interrupt
3902 * disabled until the queue is fuller.
3908 vtnet_txq_disable_intr(struct vtnet_txq *txq)
3911 virtqueue_disable_intr(txq->vtntx_vq);
3915 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3919 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3920 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
3924 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
3928 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3929 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
3933 vtnet_enable_interrupts(struct vtnet_softc *sc)
3936 vtnet_enable_rx_interrupts(sc);
3937 vtnet_enable_tx_interrupts(sc);
3941 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
3945 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3946 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3950 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
3954 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3955 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3959 vtnet_disable_interrupts(struct vtnet_softc *sc)
3962 vtnet_disable_rx_interrupts(sc);
3963 vtnet_disable_tx_interrupts(sc);
3967 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
3971 snprintf(path, sizeof(path),
3972 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
3973 TUNABLE_INT_FETCH(path, &def);