2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/sockio.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
47 #include <machine/smp.h>
51 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/if_media.h>
57 #include <net/if_vlan_var.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet6/ip6_var.h>
66 #include <netinet/udp.h>
67 #include <netinet/tcp.h>
68 #include <netinet/sctp.h>
70 #include <machine/bus.h>
71 #include <machine/resource.h>
75 #include <dev/virtio/virtio.h>
76 #include <dev/virtio/virtqueue.h>
77 #include <dev/virtio/network/virtio_net.h>
78 #include <dev/virtio/network/if_vtnetvar.h>
80 #include "virtio_if.h"
83 #include "opt_inet6.h"
85 static int vtnet_modevent(module_t, int, void *);
87 static int vtnet_probe(device_t);
88 static int vtnet_attach(device_t);
89 static int vtnet_detach(device_t);
90 static int vtnet_suspend(device_t);
91 static int vtnet_resume(device_t);
92 static int vtnet_shutdown(device_t);
93 static int vtnet_attach_completed(device_t);
94 static int vtnet_config_change(device_t);
96 static void vtnet_negotiate_features(struct vtnet_softc *);
97 static void vtnet_setup_features(struct vtnet_softc *);
98 static int vtnet_init_rxq(struct vtnet_softc *, int);
99 static int vtnet_init_txq(struct vtnet_softc *, int);
100 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *);
101 static void vtnet_free_rxtx_queues(struct vtnet_softc *);
102 static int vtnet_alloc_rx_filters(struct vtnet_softc *);
103 static void vtnet_free_rx_filters(struct vtnet_softc *);
104 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
105 static int vtnet_setup_interface(struct vtnet_softc *);
106 static int vtnet_change_mtu(struct vtnet_softc *, int);
107 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t);
109 static int vtnet_rxq_populate(struct vtnet_rxq *);
110 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *);
112 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
113 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
115 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
116 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
117 static int vtnet_rxq_new_buf(struct vtnet_rxq *);
118 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
119 struct virtio_net_hdr *);
120 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
121 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
122 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
123 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
124 struct virtio_net_hdr *);
125 static int vtnet_rxq_eof(struct vtnet_rxq *);
126 static void vtnet_rx_vq_intr(void *);
127 static void vtnet_rxq_tq_intr(void *, int);
129 static void vtnet_txq_free_mbufs(struct vtnet_txq *);
130 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
131 int *, int *, int *);
132 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
133 int, struct virtio_net_hdr *);
135 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
136 struct virtio_net_hdr *);
137 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
138 struct vtnet_tx_header *);
139 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
140 #ifdef VTNET_LEGACY_TX
141 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
142 static void vtnet_start(struct ifnet *);
144 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
145 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
146 static void vtnet_txq_tq_deferred(void *, int);
148 static void vtnet_txq_tq_intr(void *, int);
149 static void vtnet_txq_eof(struct vtnet_txq *);
150 static void vtnet_tx_vq_intr(void *);
151 static void vtnet_tx_start_all(struct vtnet_softc *);
153 #ifndef VTNET_LEGACY_TX
154 static void vtnet_qflush(struct ifnet *);
157 static int vtnet_watchdog(struct vtnet_txq *);
158 static void vtnet_rxq_accum_stats(struct vtnet_rxq *,
159 struct vtnet_rxq_stats *);
160 static void vtnet_txq_accum_stats(struct vtnet_txq *,
161 struct vtnet_txq_stats *);
162 static void vtnet_accumulate_stats(struct vtnet_softc *);
163 static void vtnet_tick(void *);
165 static void vtnet_start_taskqueues(struct vtnet_softc *);
166 static void vtnet_free_taskqueues(struct vtnet_softc *);
167 static void vtnet_drain_taskqueues(struct vtnet_softc *);
169 static void vtnet_drain_rxtx_queues(struct vtnet_softc *);
170 static void vtnet_stop_rendezvous(struct vtnet_softc *);
171 static void vtnet_stop(struct vtnet_softc *);
172 static int vtnet_virtio_reinit(struct vtnet_softc *);
173 static void vtnet_init_rx_filters(struct vtnet_softc *);
174 static int vtnet_init_rx_queues(struct vtnet_softc *);
175 static int vtnet_init_tx_queues(struct vtnet_softc *);
176 static int vtnet_init_rxtx_queues(struct vtnet_softc *);
177 static void vtnet_set_active_vq_pairs(struct vtnet_softc *);
178 static int vtnet_reinit(struct vtnet_softc *);
179 static void vtnet_init_locked(struct vtnet_softc *);
180 static void vtnet_init(void *);
182 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
183 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
184 struct sglist *, int, int);
185 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
186 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
187 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
188 static int vtnet_set_promisc(struct vtnet_softc *, int);
189 static int vtnet_set_allmulti(struct vtnet_softc *, int);
190 static void vtnet_attach_disable_promisc(struct vtnet_softc *);
191 static void vtnet_rx_filter(struct vtnet_softc *);
192 static void vtnet_rx_filter_mac(struct vtnet_softc *);
193 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
194 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
195 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
196 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
197 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
199 static int vtnet_is_link_up(struct vtnet_softc *);
200 static void vtnet_update_link_status(struct vtnet_softc *);
201 static int vtnet_ifmedia_upd(struct ifnet *);
202 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
203 static void vtnet_get_hwaddr(struct vtnet_softc *);
204 static void vtnet_set_hwaddr(struct vtnet_softc *);
205 static void vtnet_vlan_tag_remove(struct mbuf *);
207 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
208 struct sysctl_oid_list *, struct vtnet_rxq *);
209 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
210 struct sysctl_oid_list *, struct vtnet_txq *);
211 static void vtnet_setup_queue_sysctl(struct vtnet_softc *);
212 static void vtnet_setup_sysctl(struct vtnet_softc *);
214 static int vtnet_rxq_enable_intr(struct vtnet_rxq *);
215 static void vtnet_rxq_disable_intr(struct vtnet_rxq *);
216 static int vtnet_txq_enable_intr(struct vtnet_txq *);
217 static void vtnet_txq_disable_intr(struct vtnet_txq *);
218 static void vtnet_enable_rx_interrupts(struct vtnet_softc *);
219 static void vtnet_enable_tx_interrupts(struct vtnet_softc *);
220 static void vtnet_enable_interrupts(struct vtnet_softc *);
221 static void vtnet_disable_rx_interrupts(struct vtnet_softc *);
222 static void vtnet_disable_tx_interrupts(struct vtnet_softc *);
223 static void vtnet_disable_interrupts(struct vtnet_softc *);
225 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
228 static int vtnet_csum_disable = 0;
229 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
230 static int vtnet_tso_disable = 0;
231 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
232 static int vtnet_lro_disable = 0;
233 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
234 static int vtnet_mq_disable = 0;
235 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
236 static int vtnet_mq_max_pairs = 0;
237 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
238 static int vtnet_rx_process_limit = 512;
239 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
242 * Reducing the number of transmit completed interrupts can improve
243 * performance. To do so, the define below keeps the Tx vq interrupt
244 * disabled and adds calls to vtnet_txeof() in the start and watchdog
245 * paths. The price to pay for this is the m_free'ing of transmitted
246 * mbufs may be delayed until the watchdog fires.
248 * BMV: Reintroduce this later as a run-time option, if it makes
249 * sense after the EVENT_IDX feature is supported.
251 * #define VTNET_TX_INTR_MODERATION
254 static uma_zone_t vtnet_tx_header_zone;
256 static struct virtio_feature_desc vtnet_feature_desc[] = {
257 { VIRTIO_NET_F_CSUM, "TxChecksum" },
258 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
259 { VIRTIO_NET_F_MAC, "MacAddress" },
260 { VIRTIO_NET_F_GSO, "TxAllGSO" },
261 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
262 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
263 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
264 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
265 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
266 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
267 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
268 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
269 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
270 { VIRTIO_NET_F_STATUS, "Status" },
271 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
272 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
273 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
274 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
275 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
276 { VIRTIO_NET_F_MQ, "Multiqueue" },
277 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
282 static device_method_t vtnet_methods[] = {
283 /* Device methods. */
284 DEVMETHOD(device_probe, vtnet_probe),
285 DEVMETHOD(device_attach, vtnet_attach),
286 DEVMETHOD(device_detach, vtnet_detach),
287 DEVMETHOD(device_suspend, vtnet_suspend),
288 DEVMETHOD(device_resume, vtnet_resume),
289 DEVMETHOD(device_shutdown, vtnet_shutdown),
291 /* VirtIO methods. */
292 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed),
293 DEVMETHOD(virtio_config_change, vtnet_config_change),
298 static driver_t vtnet_driver = {
301 sizeof(struct vtnet_softc)
303 static devclass_t vtnet_devclass;
305 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
307 MODULE_VERSION(vtnet, 1);
308 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
311 vtnet_modevent(module_t mod, int type, void *unused)
319 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
320 sizeof(struct vtnet_tx_header),
321 NULL, NULL, NULL, NULL, 0, 0);
325 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
327 else if (type == MOD_UNLOAD) {
328 uma_zdestroy(vtnet_tx_header_zone);
329 vtnet_tx_header_zone = NULL;
343 vtnet_probe(device_t dev)
346 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
349 device_set_desc(dev, "VirtIO Networking Adapter");
351 return (BUS_PROBE_DEFAULT);
355 vtnet_attach(device_t dev)
357 struct vtnet_softc *sc;
360 sc = device_get_softc(dev);
363 /* Register our feature descriptions. */
364 virtio_set_feature_desc(dev, vtnet_feature_desc);
366 VTNET_CORE_LOCK_INIT(sc);
367 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
369 vtnet_setup_sysctl(sc);
370 vtnet_setup_features(sc);
372 error = vtnet_alloc_rx_filters(sc);
374 device_printf(dev, "cannot allocate Rx filters\n");
378 error = vtnet_alloc_rxtx_queues(sc);
380 device_printf(dev, "cannot allocate queues\n");
384 error = vtnet_alloc_virtqueues(sc);
386 device_printf(dev, "cannot allocate virtqueues\n");
390 error = vtnet_setup_interface(sc);
392 device_printf(dev, "cannot setup interface\n");
396 error = virtio_setup_intr(dev, INTR_TYPE_NET);
398 device_printf(dev, "cannot setup virtqueue interrupts\n");
399 /* BMV: This will crash if during boot! */
400 ether_ifdetach(sc->vtnet_ifp);
404 vtnet_start_taskqueues(sc);
414 vtnet_detach(device_t dev)
416 struct vtnet_softc *sc;
419 sc = device_get_softc(dev);
422 if (device_is_attached(dev)) {
425 VTNET_CORE_UNLOCK(sc);
427 callout_drain(&sc->vtnet_tick_ch);
428 vtnet_drain_taskqueues(sc);
433 vtnet_free_taskqueues(sc);
435 if (sc->vtnet_vlan_attach != NULL) {
436 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
437 sc->vtnet_vlan_attach = NULL;
439 if (sc->vtnet_vlan_detach != NULL) {
440 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
441 sc->vtnet_vlan_detach = NULL;
444 ifmedia_removeall(&sc->vtnet_media);
448 sc->vtnet_ifp = NULL;
451 vtnet_free_rxtx_queues(sc);
452 vtnet_free_rx_filters(sc);
454 if (sc->vtnet_ctrl_vq != NULL)
455 vtnet_free_ctrl_vq(sc);
457 VTNET_CORE_LOCK_DESTROY(sc);
463 vtnet_suspend(device_t dev)
465 struct vtnet_softc *sc;
467 sc = device_get_softc(dev);
471 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
472 VTNET_CORE_UNLOCK(sc);
478 vtnet_resume(device_t dev)
480 struct vtnet_softc *sc;
483 sc = device_get_softc(dev);
487 if (ifp->if_flags & IFF_UP)
488 vtnet_init_locked(sc);
489 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
490 VTNET_CORE_UNLOCK(sc);
496 vtnet_shutdown(device_t dev)
500 * Suspend already does all of what we need to
501 * do here; we just never expect to be resumed.
503 return (vtnet_suspend(dev));
507 vtnet_attach_completed(device_t dev)
510 vtnet_attach_disable_promisc(device_get_softc(dev));
516 vtnet_config_change(device_t dev)
518 struct vtnet_softc *sc;
520 sc = device_get_softc(dev);
523 vtnet_update_link_status(sc);
524 if (sc->vtnet_link_active != 0)
525 vtnet_tx_start_all(sc);
526 VTNET_CORE_UNLOCK(sc);
532 vtnet_negotiate_features(struct vtnet_softc *sc)
535 uint64_t mask, features;
541 * TSO and LRO are only available when their corresponding checksum
542 * offload feature is also negotiated.
544 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
545 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
546 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
548 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
549 mask |= VTNET_TSO_FEATURES;
550 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
551 mask |= VTNET_LRO_FEATURES;
552 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
553 mask |= VIRTIO_NET_F_MQ;
554 #ifdef VTNET_LEGACY_TX
555 mask |= VIRTIO_NET_F_MQ;
558 features = VTNET_FEATURES & ~mask;
559 sc->vtnet_features = virtio_negotiate_features(dev, features);
561 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) == 0)
563 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF))
567 * LRO without mergeable buffers requires special care. This is not
568 * ideal because every receive buffer must be large enough to hold
569 * the maximum TCP packet, the Ethernet header, and the header. This
570 * requires up to 34 descriptors with MCLBYTES clusters. If we do
571 * not have indirect descriptors, LRO is disabled since the virtqueue
572 * will not contain very many receive buffers.
574 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
576 "LRO disabled due to both mergeable buffers and indirect "
577 "descriptors not negotiated\n");
579 features &= ~VTNET_LRO_FEATURES;
580 sc->vtnet_features = virtio_negotiate_features(dev, features);
582 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
586 vtnet_setup_features(struct vtnet_softc *sc)
593 vtnet_negotiate_features(sc);
595 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
596 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
598 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
599 /* This feature should always be negotiated. */
600 sc->vtnet_flags |= VTNET_FLAG_MAC;
603 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
604 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
605 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
607 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
609 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
610 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
612 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
613 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
614 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
615 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
616 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
617 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
620 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
621 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
622 max_pairs = virtio_read_dev_config_2(dev,
623 offsetof(struct virtio_net_config, max_virtqueue_pairs));
624 if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
625 max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
632 * Limit the maximum number of queue pairs to the number of
633 * CPUs or the configured maximum. The actual number of
634 * queues that get used may be less.
636 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
637 if (max > 0 && max_pairs > max)
639 if (max_pairs > mp_ncpus)
640 max_pairs = mp_ncpus;
641 if (max_pairs > VTNET_MAX_QUEUE_PAIRS)
642 max_pairs = VTNET_MAX_QUEUE_PAIRS;
644 sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
647 sc->vtnet_max_vq_pairs = max_pairs;
651 vtnet_init_rxq(struct vtnet_softc *sc, int id)
653 struct vtnet_rxq *rxq;
655 rxq = &sc->vtnet_rxqs[id];
657 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
658 device_get_nameunit(sc->vtnet_dev), id);
659 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
664 TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
665 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
666 taskqueue_thread_enqueue, &rxq->vtnrx_tq);
668 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
672 vtnet_init_txq(struct vtnet_softc *sc, int id)
674 struct vtnet_txq *txq;
676 txq = &sc->vtnet_txqs[id];
678 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
679 device_get_nameunit(sc->vtnet_dev), id);
680 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
685 #ifndef VTNET_LEGACY_TX
686 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
687 M_NOWAIT, &txq->vtntx_mtx);
688 if (txq->vtntx_br == NULL)
691 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
693 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
694 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
695 taskqueue_thread_enqueue, &txq->vtntx_tq);
696 if (txq->vtntx_tq == NULL)
703 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
705 int i, npairs, error;
707 npairs = sc->vtnet_max_vq_pairs;
709 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
711 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
713 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
716 for (i = 0; i < npairs; i++) {
717 error = vtnet_init_rxq(sc, i);
720 error = vtnet_init_txq(sc, i);
725 vtnet_setup_queue_sysctl(sc);
731 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
734 rxq->vtnrx_sc = NULL;
737 if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
738 mtx_destroy(&rxq->vtnrx_mtx);
742 vtnet_destroy_txq(struct vtnet_txq *txq)
745 txq->vtntx_sc = NULL;
748 #ifndef VTNET_LEGACY_TX
749 if (txq->vtntx_br != NULL) {
750 buf_ring_free(txq->vtntx_br, M_DEVBUF);
751 txq->vtntx_br = NULL;
755 if (mtx_initialized(&txq->vtntx_mtx) != 0)
756 mtx_destroy(&txq->vtntx_mtx);
760 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
764 if (sc->vtnet_rxqs != NULL) {
765 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
766 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
767 free(sc->vtnet_rxqs, M_DEVBUF);
768 sc->vtnet_rxqs = NULL;
771 if (sc->vtnet_txqs != NULL) {
772 for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
773 vtnet_destroy_txq(&sc->vtnet_txqs[i]);
774 free(sc->vtnet_txqs, M_DEVBUF);
775 sc->vtnet_txqs = NULL;
780 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
783 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
784 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
785 M_DEVBUF, M_NOWAIT | M_ZERO);
786 if (sc->vtnet_mac_filter == NULL)
790 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
791 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
792 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
793 if (sc->vtnet_vlan_filter == NULL)
801 vtnet_free_rx_filters(struct vtnet_softc *sc)
804 if (sc->vtnet_mac_filter != NULL) {
805 free(sc->vtnet_mac_filter, M_DEVBUF);
806 sc->vtnet_mac_filter = NULL;
809 if (sc->vtnet_vlan_filter != NULL) {
810 free(sc->vtnet_vlan_filter, M_DEVBUF);
811 sc->vtnet_vlan_filter = NULL;
816 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
819 struct vq_alloc_info *info;
820 struct vtnet_rxq *rxq;
821 struct vtnet_txq *txq;
822 int i, idx, flags, nvqs, rxsegs, error;
828 * Indirect descriptors are not needed for the Rx virtqueue when
829 * mergeable buffers are negotiated. The header is placed inline
830 * with the data, not in a separate descriptor, and mbuf clusters
831 * are always physically contiguous.
833 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
835 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
836 rxsegs = VTNET_MAX_RX_SEGS;
838 rxsegs = VTNET_MIN_RX_SEGS;
840 nvqs = sc->vtnet_max_vq_pairs * 2;
841 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
844 info = malloc(sizeof(struct vq_alloc_info) * nvqs , M_TEMP, M_NOWAIT);
848 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
849 rxq = &sc->vtnet_rxqs[i];
850 VQ_ALLOC_INFO_INIT(&info[idx], rxsegs,
851 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
852 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
854 txq = &sc->vtnet_txqs[i];
855 VQ_ALLOC_INFO_INIT(&info[idx+1], VTNET_MAX_TX_SEGS,
856 vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
857 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
860 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
861 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
862 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
866 * Enable interrupt binding if this is multiqueue. This only matters
867 * when per-vq MSIX is available.
869 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
872 error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
879 vtnet_setup_interface(struct vtnet_softc *sc)
887 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
889 device_printf(dev, "cannot allocate ifnet structure\n");
893 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
894 if_initbaudrate(ifp, IF_Gbps(10)); /* Approx. */
896 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
897 ifp->if_init = vtnet_init;
898 ifp->if_ioctl = vtnet_ioctl;
900 #ifndef VTNET_LEGACY_TX
901 ifp->if_transmit = vtnet_txq_mq_start;
902 ifp->if_qflush = vtnet_qflush;
904 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
905 ifp->if_start = vtnet_start;
906 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
907 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
908 IFQ_SET_READY(&ifp->if_snd);
911 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
913 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
914 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
916 /* Read (or generate) the MAC address for the adapter. */
917 vtnet_get_hwaddr(sc);
919 ether_ifattach(ifp, sc->vtnet_hwaddr);
921 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
922 ifp->if_capabilities |= IFCAP_LINKSTATE;
924 /* Tell the upper layer(s) we support long frames. */
925 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
926 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
928 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
929 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
931 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
932 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
933 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
935 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
936 ifp->if_capabilities |= IFCAP_TSO4;
937 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
938 ifp->if_capabilities |= IFCAP_TSO6;
939 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
940 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
943 if (ifp->if_capabilities & IFCAP_TSO)
944 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
947 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM))
948 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
950 if (ifp->if_capabilities & IFCAP_HWCSUM) {
952 * VirtIO does not support VLAN tagging, but we can fake
953 * it by inserting and removing the 802.1Q header during
954 * transmit and receive. We are then able to do checksum
955 * offloading of VLAN frames.
957 ifp->if_capabilities |=
958 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
961 ifp->if_capenable = ifp->if_capabilities;
964 * Capabilities after here are not enabled by default.
967 if (ifp->if_capabilities & IFCAP_RXCSUM) {
968 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
969 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
970 ifp->if_capabilities |= IFCAP_LRO;
973 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
974 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
976 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
977 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
978 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
979 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
982 limit = vtnet_tunable_int(sc, "rx_process_limit",
983 vtnet_rx_process_limit);
986 sc->vtnet_rx_process_limit = limit;
992 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
995 int frame_size, clsize;
999 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1002 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1006 * Based on the new MTU (and hence frame size) determine which
1007 * cluster size is most appropriate for the receive queues.
1009 if (frame_size <= MCLBYTES) {
1011 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1012 /* Avoid going past 9K jumbos. */
1013 if (frame_size > MJUM9BYTES)
1015 clsize = MJUM9BYTES;
1017 clsize = MJUMPAGESIZE;
1019 ifp->if_mtu = new_mtu;
1020 sc->vtnet_rx_new_clsize = clsize;
1022 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1023 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1024 vtnet_init_locked(sc);
1031 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1033 struct vtnet_softc *sc;
1035 int reinit, mask, error;
1038 ifr = (struct ifreq *) data;
1043 if (ifp->if_mtu != ifr->ifr_mtu) {
1044 VTNET_CORE_LOCK(sc);
1045 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1046 VTNET_CORE_UNLOCK(sc);
1051 VTNET_CORE_LOCK(sc);
1052 if ((ifp->if_flags & IFF_UP) == 0) {
1053 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1055 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1056 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1057 (IFF_PROMISC | IFF_ALLMULTI)) {
1058 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1059 vtnet_rx_filter(sc);
1064 vtnet_init_locked(sc);
1067 sc->vtnet_if_flags = ifp->if_flags;
1068 VTNET_CORE_UNLOCK(sc);
1073 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1075 VTNET_CORE_LOCK(sc);
1076 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1077 vtnet_rx_filter_mac(sc);
1078 VTNET_CORE_UNLOCK(sc);
1083 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1087 VTNET_CORE_LOCK(sc);
1088 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1090 if (mask & IFCAP_TXCSUM)
1091 ifp->if_capenable ^= IFCAP_TXCSUM;
1092 if (mask & IFCAP_TXCSUM_IPV6)
1093 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1094 if (mask & IFCAP_TSO4)
1095 ifp->if_capenable ^= IFCAP_TSO4;
1096 if (mask & IFCAP_TSO6)
1097 ifp->if_capenable ^= IFCAP_TSO6;
1099 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1100 IFCAP_VLAN_HWFILTER)) {
1101 /* These Rx features require us to renegotiate. */
1104 if (mask & IFCAP_RXCSUM)
1105 ifp->if_capenable ^= IFCAP_RXCSUM;
1106 if (mask & IFCAP_RXCSUM_IPV6)
1107 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1108 if (mask & IFCAP_LRO)
1109 ifp->if_capenable ^= IFCAP_LRO;
1110 if (mask & IFCAP_VLAN_HWFILTER)
1111 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1115 if (mask & IFCAP_VLAN_HWTSO)
1116 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1117 if (mask & IFCAP_VLAN_HWTAGGING)
1118 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1120 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1121 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1122 vtnet_init_locked(sc);
1125 VTNET_CORE_UNLOCK(sc);
1126 VLAN_CAPABILITIES(ifp);
1131 error = ether_ioctl(ifp, cmd, data);
1135 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1141 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1143 struct virtqueue *vq;
1149 for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1150 error = vtnet_rxq_new_buf(rxq);
1156 virtqueue_notify(vq);
1158 * EMSGSIZE signifies the virtqueue did not have enough
1159 * entries available to hold the last mbuf. This is not
1162 if (error == EMSGSIZE)
1170 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1172 struct virtqueue *vq;
1179 while ((m = virtqueue_drain(vq, &last)) != NULL)
1182 KASSERT(virtqueue_empty(vq),
1183 ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1186 static struct mbuf *
1187 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1189 struct mbuf *m_head, *m_tail, *m;
1192 clsize = sc->vtnet_rx_clsize;
1194 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1195 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1197 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1201 m_head->m_len = clsize;
1204 /* Allocate the rest of the chain. */
1205 for (i = 1; i < nbufs; i++) {
1206 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1215 if (m_tailp != NULL)
1221 sc->vtnet_stats.mbuf_alloc_failed++;
1228 * Slow path for when LRO without mergeable buffers is negotiated.
1231 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1234 struct vtnet_softc *sc;
1235 struct mbuf *m, *m_prev;
1236 struct mbuf *m_new, *m_tail;
1237 int len, clsize, nreplace, error;
1240 clsize = sc->vtnet_rx_clsize;
1250 * Since these mbuf chains are so large, we avoid allocating an
1251 * entire replacement chain if possible. When the received frame
1252 * did not consume the entire chain, the unused mbufs are moved
1253 * to the replacement chain.
1257 * Something is seriously wrong if we received a frame
1258 * larger than the chain. Drop it.
1261 sc->vtnet_stats.rx_frame_too_large++;
1265 /* We always allocate the same cluster size. */
1266 KASSERT(m->m_len == clsize,
1267 ("%s: mbuf size %d is not the cluster size %d",
1268 __func__, m->m_len, clsize));
1270 m->m_len = MIN(m->m_len, len);
1278 KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1279 ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1280 sc->vtnet_rx_nmbufs));
1282 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1283 if (m_new == NULL) {
1284 m_prev->m_len = clsize;
1289 * Move any unused mbufs from the received chain onto the end
1292 if (m_prev->m_next != NULL) {
1293 m_tail->m_next = m_prev->m_next;
1294 m_prev->m_next = NULL;
1297 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1300 * BAD! We could not enqueue the replacement mbuf chain. We
1301 * must restore the m0 chain to the original state if it was
1302 * modified so we can subsequently discard it.
1304 * NOTE: The replacement is suppose to be an identical copy
1305 * to the one just dequeued so this is an unexpected error.
1307 sc->vtnet_stats.rx_enq_replacement_failed++;
1309 if (m_tail->m_next != NULL) {
1310 m_prev->m_next = m_tail->m_next;
1311 m_tail->m_next = NULL;
1314 m_prev->m_len = clsize;
1322 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1324 struct vtnet_softc *sc;
1330 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1331 ("%s: chained mbuf without LRO_NOMRG", __func__));
1333 if (m->m_next == NULL) {
1334 /* Fast-path for the common case of just one mbuf. */
1338 m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1342 error = vtnet_rxq_enqueue_buf(rxq, m_new);
1345 * The new mbuf is suppose to be an identical
1346 * copy of the one just dequeued so this is an
1350 sc->vtnet_stats.rx_enq_replacement_failed++;
1354 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1360 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1363 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1364 struct vtnet_softc *sc;
1365 struct vtnet_rx_header *rxhdr;
1370 mdata = mtod(m, uint8_t *);
1372 VTNET_RXQ_LOCK_ASSERT(rxq);
1373 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1374 ("%s: chained mbuf without LRO_NOMRG", __func__));
1375 KASSERT(m->m_len == sc->vtnet_rx_clsize,
1376 ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1377 sc->vtnet_rx_clsize));
1379 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
1380 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1381 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1382 rxhdr = (struct vtnet_rx_header *) mdata;
1383 sglist_append(&sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1384 offset = sizeof(struct vtnet_rx_header);
1388 sglist_append(&sg, mdata + offset, m->m_len - offset);
1389 if (m->m_next != NULL) {
1390 error = sglist_append_mbuf(&sg, m->m_next);
1394 error = virtqueue_enqueue(rxq->vtnrx_vq, m, &sg, 0, sg.sg_nseg);
1400 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1402 struct vtnet_softc *sc;
1408 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1412 error = vtnet_rxq_enqueue_buf(rxq, m);
1420 * Use the checksum offset in the VirtIO header to set the
1421 * correct CSUM_* flags.
1424 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1425 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1427 struct vtnet_softc *sc;
1428 #if defined(INET) || defined(INET6)
1429 int offset = hdr->csum_start + hdr->csum_offset;
1434 /* Only do a basic sanity check on the offset. */
1438 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1443 case ETHERTYPE_IPV6:
1444 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1449 sc->vtnet_stats.rx_csum_bad_ethtype++;
1454 * Use the offset to determine the appropriate CSUM_* flags. This is
1455 * a bit dirty, but we can get by with it since the checksum offsets
1456 * happen to be different. We assume the host host does not do IPv4
1457 * header checksum offloading.
1459 switch (hdr->csum_offset) {
1460 case offsetof(struct udphdr, uh_sum):
1461 case offsetof(struct tcphdr, th_sum):
1462 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1463 m->m_pkthdr.csum_data = 0xFFFF;
1465 case offsetof(struct sctphdr, checksum):
1466 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1469 sc->vtnet_stats.rx_csum_bad_offset++;
1477 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1478 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1480 struct vtnet_softc *sc;
1487 case ETHERTYPE_IP: {
1489 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1491 ip = (struct ip *)(m->m_data + ip_start);
1493 offset = ip_start + (ip->ip_hl << 2);
1498 case ETHERTYPE_IPV6:
1499 if (__predict_false(m->m_len < ip_start +
1500 sizeof(struct ip6_hdr)))
1502 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1503 if (__predict_false(offset < 0))
1508 sc->vtnet_stats.rx_csum_bad_ethtype++;
1514 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1516 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1517 m->m_pkthdr.csum_data = 0xFFFF;
1520 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1522 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1523 m->m_pkthdr.csum_data = 0xFFFF;
1526 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1528 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1532 * For the remaining protocols, FreeBSD does not support
1533 * checksum offloading, so the checksum will be recomputed.
1536 if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1537 "protocol eth_type=%#x proto=%d csum_start=%d "
1538 "csum_offset=%d\n", __func__, eth_type, proto,
1539 hdr->csum_start, hdr->csum_offset);
1548 * Set the appropriate CSUM_* flags. Unfortunately, the information
1549 * provided is not directly useful to us. The VirtIO header gives the
1550 * offset of the checksum, which is all Linux needs, but this is not
1551 * how FreeBSD does things. We are forced to peek inside the packet
1554 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1555 * could accept the offsets and let the stack figure it out.
1558 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1559 struct virtio_net_hdr *hdr)
1561 struct ether_header *eh;
1562 struct ether_vlan_header *evh;
1566 eh = mtod(m, struct ether_header *);
1567 eth_type = ntohs(eh->ether_type);
1568 if (eth_type == ETHERTYPE_VLAN) {
1569 /* BMV: We should handle nested VLAN tags too. */
1570 evh = mtod(m, struct ether_vlan_header *);
1571 eth_type = ntohs(evh->evl_proto);
1572 offset = sizeof(struct ether_vlan_header);
1574 offset = sizeof(struct ether_header);
1576 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1577 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1579 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1585 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1589 while (--nbufs > 0) {
1590 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1593 vtnet_rxq_discard_buf(rxq, m);
1598 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1603 * Requeue the discarded mbuf. This should always be successful
1604 * since it was just dequeued.
1606 error = vtnet_rxq_enqueue_buf(rxq, m);
1608 ("%s: cannot requeue discarded mbuf %d", __func__, error));
1612 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1614 struct vtnet_softc *sc;
1616 struct virtqueue *vq;
1617 struct mbuf *m, *m_tail;
1622 ifp = sc->vtnet_ifp;
1625 while (--nbufs > 0) {
1626 m = virtqueue_dequeue(vq, &len);
1628 rxq->vtnrx_stats.vrxs_ierrors++;
1632 if (vtnet_rxq_new_buf(rxq) != 0) {
1633 rxq->vtnrx_stats.vrxs_iqdrops++;
1634 vtnet_rxq_discard_buf(rxq, m);
1636 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1644 m->m_flags &= ~M_PKTHDR;
1646 m_head->m_pkthdr.len += len;
1654 sc->vtnet_stats.rx_mergeable_failed++;
1661 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1662 struct virtio_net_hdr *hdr)
1664 struct vtnet_softc *sc;
1666 struct ether_header *eh;
1669 ifp = sc->vtnet_ifp;
1671 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1672 eh = mtod(m, struct ether_header *);
1673 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1674 vtnet_vlan_tag_remove(m);
1676 * With the 802.1Q header removed, update the
1677 * checksum starting location accordingly.
1679 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1680 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1684 m->m_pkthdr.flowid = rxq->vtnrx_id;
1685 m->m_flags |= M_FLOWID;
1688 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1689 * distinction that Linux does. Need to reevaluate if performing
1690 * offloading for the NEEDS_CSUM case is really appropriate.
1692 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1693 VIRTIO_NET_HDR_F_DATA_VALID)) {
1694 if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1695 rxq->vtnrx_stats.vrxs_csum++;
1697 rxq->vtnrx_stats.vrxs_csum_failed++;
1700 rxq->vtnrx_stats.vrxs_ipackets++;
1701 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1703 /* VTNET_RXQ_UNLOCK(rxq); */
1704 (*ifp->if_input)(ifp, m);
1705 /* VTNET_RXQ_LOCK(rxq); */
1709 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1711 struct virtio_net_hdr lhdr, *hdr;
1712 struct vtnet_softc *sc;
1714 struct virtqueue *vq;
1716 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1717 int len, deq, nbufs, adjsz, count;
1721 ifp = sc->vtnet_ifp;
1724 count = sc->vtnet_rx_process_limit;
1726 VTNET_RXQ_LOCK_ASSERT(rxq);
1728 while (count-- > 0) {
1729 m = virtqueue_dequeue(vq, &len);
1734 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1735 rxq->vtnrx_stats.vrxs_ierrors++;
1736 vtnet_rxq_discard_buf(rxq, m);
1740 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1742 adjsz = sizeof(struct vtnet_rx_header);
1744 * Account for our pad inserted between the header
1745 * and the actual start of the frame.
1747 len += VTNET_RX_HEADER_PAD;
1749 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1750 nbufs = mhdr->num_buffers;
1751 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1754 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1755 rxq->vtnrx_stats.vrxs_iqdrops++;
1756 vtnet_rxq_discard_buf(rxq, m);
1758 vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1762 m->m_pkthdr.len = len;
1763 m->m_pkthdr.rcvif = ifp;
1764 m->m_pkthdr.csum_flags = 0;
1767 /* Dequeue the rest of chain. */
1768 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1773 * Save copy of header before we strip it. For both mergeable
1774 * and non-mergeable, the header is at the beginning of the
1775 * mbuf data. We no longer need num_buffers, so always use a
1778 * BMV: Is this memcpy() expensive? We know the mbuf data is
1779 * still valid even after the m_adj().
1781 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1784 vtnet_rxq_input(rxq, m, hdr);
1788 virtqueue_notify(vq);
1790 return (count > 0 ? 0 : EAGAIN);
1794 vtnet_rx_vq_intr(void *xrxq)
1796 struct vtnet_softc *sc;
1797 struct vtnet_rxq *rxq;
1803 ifp = sc->vtnet_ifp;
1806 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1808 * Ignore this interrupt. Either this is a spurious interrupt
1809 * or multiqueue without per-VQ MSIX so every queue needs to
1810 * be polled (a brain dead configuration we could try harder
1813 vtnet_rxq_disable_intr(rxq);
1818 VTNET_RXQ_LOCK(rxq);
1820 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1821 VTNET_RXQ_UNLOCK(rxq);
1825 more = vtnet_rxq_eof(rxq);
1826 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1828 vtnet_rxq_disable_intr(rxq);
1830 * This is an occasional condition or race (when !more),
1831 * so retry a few times before scheduling the taskqueue.
1833 rxq->vtnrx_stats.vrxs_rescheduled++;
1834 VTNET_RXQ_UNLOCK(rxq);
1835 if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1837 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1839 VTNET_RXQ_UNLOCK(rxq);
1843 vtnet_rxq_tq_intr(void *xrxq, int pending)
1845 struct vtnet_softc *sc;
1846 struct vtnet_rxq *rxq;
1852 ifp = sc->vtnet_ifp;
1854 VTNET_RXQ_LOCK(rxq);
1856 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1857 VTNET_RXQ_UNLOCK(rxq);
1861 more = vtnet_rxq_eof(rxq);
1862 if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1864 vtnet_rxq_disable_intr(rxq);
1865 rxq->vtnrx_stats.vrxs_rescheduled++;
1866 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1869 VTNET_RXQ_UNLOCK(rxq);
1873 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
1875 struct virtqueue *vq;
1876 struct vtnet_tx_header *txhdr;
1882 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1883 m_freem(txhdr->vth_mbuf);
1884 uma_zfree(vtnet_tx_header_zone, txhdr);
1887 KASSERT(virtqueue_empty(vq),
1888 ("%s: mbufs remaining in tx queue %p", __func__, txq));
1892 * BMV: Much of this can go away once we finally have offsets in
1893 * the mbuf packet header. Bug andre@.
1896 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
1897 int *etype, int *proto, int *start)
1899 struct vtnet_softc *sc;
1900 struct ether_vlan_header *evh;
1905 evh = mtod(m, struct ether_vlan_header *);
1906 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1907 /* BMV: We should handle nested VLAN tags too. */
1908 *etype = ntohs(evh->evl_proto);
1909 offset = sizeof(struct ether_vlan_header);
1911 *etype = ntohs(evh->evl_encap_proto);
1912 offset = sizeof(struct ether_header);
1917 case ETHERTYPE_IP: {
1918 struct ip *ip, iphdr;
1919 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
1920 m_copydata(m, offset, sizeof(struct ip),
1924 ip = (struct ip *)(m->m_data + offset);
1926 *start = offset + (ip->ip_hl << 2);
1931 case ETHERTYPE_IPV6:
1933 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
1934 /* Assert the network stack sent us a valid packet. */
1935 KASSERT(*start > offset,
1936 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
1937 *start, offset, *proto));
1941 sc->vtnet_stats.tx_csum_bad_ethtype++;
1949 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
1950 int offset, struct virtio_net_hdr *hdr)
1952 static struct timeval lastecn;
1954 struct vtnet_softc *sc;
1955 struct tcphdr *tcp, tcphdr;
1959 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
1960 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
1963 tcp = (struct tcphdr *)(m->m_data + offset);
1965 hdr->hdr_len = offset + (tcp->th_off << 2);
1966 hdr->gso_size = m->m_pkthdr.tso_segsz;
1967 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
1968 VIRTIO_NET_HDR_GSO_TCPV6;
1970 if (tcp->th_flags & TH_CWR) {
1972 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
1973 * ECN support is not on a per-interface basis, but globally via
1974 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
1976 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1977 if (ppsratecheck(&lastecn, &curecn, 1))
1978 if_printf(sc->vtnet_ifp,
1979 "TSO with ECN not negotiated with host\n");
1982 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1985 txq->vtntx_stats.vtxs_tso++;
1990 static struct mbuf *
1991 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
1992 struct virtio_net_hdr *hdr)
1994 struct vtnet_softc *sc;
1995 int flags, etype, csum_start, proto, error;
1998 flags = m->m_pkthdr.csum_flags;
2000 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2004 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2005 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2007 * We could compare the IP protocol vs the CSUM_ flag too,
2008 * but that really should not be necessary.
2010 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2011 hdr->csum_start = csum_start;
2012 hdr->csum_offset = m->m_pkthdr.csum_data;
2013 txq->vtntx_stats.vtxs_csum++;
2016 if (flags & CSUM_TSO) {
2017 if (__predict_false(proto != IPPROTO_TCP)) {
2018 /* Likely failed to correctly parse the mbuf. */
2019 sc->vtnet_stats.tx_tso_not_tcp++;
2023 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2024 ("%s: mbuf %p TSO without checksum offload", __func__, m));
2026 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2039 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2040 struct vtnet_tx_header *txhdr)
2043 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
2044 struct vtnet_softc *sc;
2045 struct virtqueue *vq;
2047 int collapsed, error;
2054 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
2055 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2056 KASSERT(error == 0 && sg.sg_nseg == 1,
2057 ("%s: error %d adding header to sglist", __func__, error));
2060 error = sglist_append_mbuf(&sg, m);
2065 m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1);
2071 txq->vtntx_stats.vtxs_collapsed++;
2075 txhdr->vth_mbuf = m;
2076 error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
2088 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
2090 struct vtnet_softc *sc;
2091 struct vtnet_tx_header *txhdr;
2092 struct virtio_net_hdr *hdr;
2100 txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
2101 if (txhdr == NULL) {
2108 * Always use the non-mergeable header, regardless if the feature
2109 * was negotiated. For transmit, num_buffers is always zero. The
2110 * vtnet_hdr_size is used to enqueue the correct header size.
2112 hdr = &txhdr->vth_uhdr.hdr;
2114 if (m->m_flags & M_VLANTAG) {
2115 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2116 if ((*m_head = m) == NULL) {
2120 m->m_flags &= ~M_VLANTAG;
2123 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2124 m = vtnet_txq_offload(txq, m, hdr);
2125 if ((*m_head = m) == NULL) {
2131 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2136 uma_zfree(vtnet_tx_header_zone, txhdr);
2141 #ifdef VTNET_LEGACY_TX
2144 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2146 struct vtnet_softc *sc;
2147 struct virtqueue *vq;
2155 VTNET_TXQ_LOCK_ASSERT(txq);
2157 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2158 sc->vtnet_link_active == 0)
2163 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2164 if (virtqueue_full(vq))
2167 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2171 if (vtnet_txq_encap(txq, &m0) != 0) {
2173 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2178 ETHER_BPF_MTAP(ifp, m0);
2182 virtqueue_notify(vq);
2183 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2188 vtnet_start(struct ifnet *ifp)
2190 struct vtnet_softc *sc;
2191 struct vtnet_txq *txq;
2194 txq = &sc->vtnet_txqs[0];
2196 VTNET_TXQ_LOCK(txq);
2197 vtnet_start_locked(txq, ifp);
2198 VTNET_TXQ_UNLOCK(txq);
2201 #else /* !VTNET_LEGACY_TX */
2204 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2206 struct vtnet_softc *sc;
2207 struct virtqueue *vq;
2208 struct buf_ring *br;
2215 ifp = sc->vtnet_ifp;
2219 VTNET_TXQ_LOCK_ASSERT(txq);
2221 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2222 sc->vtnet_link_active == 0) {
2224 error = drbr_enqueue(ifp, br, m);
2229 error = drbr_enqueue(ifp, br, m);
2236 while ((m = drbr_peek(ifp, br)) != NULL) {
2237 error = vtnet_txq_encap(txq, &m);
2240 drbr_putback(ifp, br, m);
2242 drbr_advance(ifp, br);
2245 drbr_advance(ifp, br);
2248 ETHER_BPF_MTAP(ifp, m);
2252 virtqueue_notify(vq);
2253 txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2260 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2262 struct vtnet_softc *sc;
2263 struct vtnet_txq *txq;
2264 int i, npairs, error;
2267 npairs = sc->vtnet_act_vq_pairs;
2269 if (m->m_flags & M_FLOWID)
2270 i = m->m_pkthdr.flowid % npairs;
2272 i = curcpu % npairs;
2274 txq = &sc->vtnet_txqs[i];
2276 if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2277 error = vtnet_txq_mq_start_locked(txq, m);
2278 VTNET_TXQ_UNLOCK(txq);
2280 error = drbr_enqueue(ifp, txq->vtntx_br, m);
2281 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2288 vtnet_txq_tq_deferred(void *xtxq, int pending)
2290 struct vtnet_softc *sc;
2291 struct vtnet_txq *txq;
2296 VTNET_TXQ_LOCK(txq);
2297 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2298 vtnet_txq_mq_start_locked(txq, NULL);
2299 VTNET_TXQ_UNLOCK(txq);
2302 #endif /* VTNET_LEGACY_TX */
2305 vtnet_txq_tq_intr(void *xtxq, int pending)
2307 struct vtnet_softc *sc;
2308 struct vtnet_txq *txq;
2313 ifp = sc->vtnet_ifp;
2315 VTNET_TXQ_LOCK(txq);
2317 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2318 VTNET_TXQ_UNLOCK(txq);
2324 #ifdef VTNET_LEGACY_TX
2325 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2326 vtnet_start_locked(txq, ifp);
2328 if (!drbr_empty(ifp, txq->vtntx_br))
2329 vtnet_txq_mq_start_locked(txq, NULL);
2332 if (vtnet_txq_enable_intr(txq) != 0) {
2333 vtnet_txq_disable_intr(txq);
2334 txq->vtntx_stats.vtxs_rescheduled++;
2335 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2338 VTNET_TXQ_UNLOCK(txq);
2342 vtnet_txq_eof(struct vtnet_txq *txq)
2344 struct virtqueue *vq;
2345 struct vtnet_tx_header *txhdr;
2349 VTNET_TXQ_LOCK_ASSERT(txq);
2351 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2352 m = txhdr->vth_mbuf;
2354 txq->vtntx_stats.vtxs_opackets++;
2355 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2356 if (m->m_flags & M_MCAST)
2357 txq->vtntx_stats.vtxs_omcasts++;
2360 uma_zfree(vtnet_tx_header_zone, txhdr);
2363 if (virtqueue_empty(vq))
2364 txq->vtntx_watchdog = 0;
2368 vtnet_tx_vq_intr(void *xtxq)
2370 struct vtnet_softc *sc;
2371 struct vtnet_txq *txq;
2377 ifp = sc->vtnet_ifp;
2380 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2382 * Ignore this interrupt. Either this is a spurious interrupt
2383 * or multiqueue without per-VQ MSIX so every queue needs to
2384 * be polled (a brain dead configuration we could try harder
2387 vtnet_txq_disable_intr(txq);
2392 VTNET_TXQ_LOCK(txq);
2394 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2395 VTNET_TXQ_UNLOCK(txq);
2401 #ifdef VTNET_LEGACY_TX
2402 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2403 vtnet_start_locked(txq, ifp);
2405 if (!drbr_empty(ifp, txq->vtntx_br))
2406 vtnet_txq_mq_start_locked(txq, NULL);
2409 if (vtnet_txq_enable_intr(txq) != 0) {
2410 vtnet_txq_disable_intr(txq);
2412 * This is an occasional race, so retry a few times
2413 * before scheduling the taskqueue.
2415 VTNET_TXQ_UNLOCK(txq);
2416 if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2418 txq->vtntx_stats.vtxs_rescheduled++;
2419 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2421 VTNET_TXQ_UNLOCK(txq);
2425 vtnet_tx_start_all(struct vtnet_softc *sc)
2428 struct vtnet_txq *txq;
2431 ifp = sc->vtnet_ifp;
2432 VTNET_CORE_LOCK_ASSERT(sc);
2434 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2435 txq = &sc->vtnet_txqs[i];
2437 VTNET_TXQ_LOCK(txq);
2438 #ifdef VTNET_LEGACY_TX
2439 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2440 vtnet_start_locked(txq, ifp);
2442 if (!drbr_empty(ifp, txq->vtntx_br))
2443 vtnet_txq_mq_start_locked(txq, NULL);
2445 VTNET_TXQ_UNLOCK(txq);
2449 #ifndef VTNET_LEGACY_TX
2451 vtnet_qflush(struct ifnet *ifp)
2453 struct vtnet_softc *sc;
2454 struct vtnet_txq *txq;
2460 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2461 txq = &sc->vtnet_txqs[i];
2463 VTNET_TXQ_LOCK(txq);
2464 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2466 VTNET_TXQ_UNLOCK(txq);
2474 vtnet_watchdog(struct vtnet_txq *txq)
2476 struct vtnet_softc *sc;
2480 VTNET_TXQ_LOCK(txq);
2481 if (sc->vtnet_flags & VTNET_FLAG_EVENT_IDX)
2483 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2484 VTNET_TXQ_UNLOCK(txq);
2487 VTNET_TXQ_UNLOCK(txq);
2489 if_printf(sc->vtnet_ifp, "watchdog timeout on queue %d\n",
2495 vtnet_rxq_accum_stats(struct vtnet_rxq *rxq, struct vtnet_rxq_stats *accum)
2497 struct vtnet_rxq_stats *st;
2499 st = &rxq->vtnrx_stats;
2501 accum->vrxs_ipackets += st->vrxs_ipackets;
2502 accum->vrxs_ibytes += st->vrxs_ibytes;
2503 accum->vrxs_iqdrops += st->vrxs_iqdrops;
2504 accum->vrxs_csum += st->vrxs_csum;
2505 accum->vrxs_csum_failed += st->vrxs_csum_failed;
2506 accum->vrxs_rescheduled += st->vrxs_rescheduled;
2510 vtnet_txq_accum_stats(struct vtnet_txq *txq, struct vtnet_txq_stats *accum)
2512 struct vtnet_txq_stats *st;
2514 st = &txq->vtntx_stats;
2516 accum->vtxs_opackets += st->vtxs_opackets;
2517 accum->vtxs_obytes += st->vtxs_obytes;
2518 accum->vtxs_csum += st->vtxs_csum;
2519 accum->vtxs_tso += st->vtxs_tso;
2520 accum->vtxs_collapsed += st->vtxs_collapsed;
2521 accum->vtxs_rescheduled += st->vtxs_rescheduled;
2525 vtnet_accumulate_stats(struct vtnet_softc *sc)
2528 struct vtnet_statistics *st;
2529 struct vtnet_rxq_stats rxaccum;
2530 struct vtnet_txq_stats txaccum;
2533 ifp = sc->vtnet_ifp;
2534 st = &sc->vtnet_stats;
2535 bzero(&rxaccum, sizeof(struct vtnet_rxq_stats));
2536 bzero(&txaccum, sizeof(struct vtnet_txq_stats));
2538 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2539 vtnet_rxq_accum_stats(&sc->vtnet_rxqs[i], &rxaccum);
2540 vtnet_txq_accum_stats(&sc->vtnet_txqs[i], &txaccum);
2543 st->rx_csum_offloaded = rxaccum.vrxs_csum;
2544 st->rx_csum_failed = rxaccum.vrxs_csum_failed;
2545 st->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
2546 st->tx_csum_offloaded = txaccum.vtxs_csum;
2547 st->tx_tso_offloaded = txaccum.vtxs_tso;
2548 st->tx_task_rescheduled = txaccum.vtxs_rescheduled;
2551 * With the exception of if_ierrors, these ifnet statistics are
2552 * only updated in the driver, so just set them to our accumulated
2553 * values. if_ierrors is updated in ether_input() for malformed
2554 * frames that we should have already discarded.
2556 ifp->if_ipackets = rxaccum.vrxs_ipackets;
2557 ifp->if_iqdrops = rxaccum.vrxs_iqdrops;
2558 ifp->if_ierrors = rxaccum.vrxs_ierrors;
2559 ifp->if_opackets = txaccum.vtxs_opackets;
2560 #ifndef VTNET_LEGACY_TX
2561 ifp->if_obytes = txaccum.vtxs_obytes;
2562 ifp->if_omcasts = txaccum.vtxs_omcasts;
2567 vtnet_tick(void *xsc)
2569 struct vtnet_softc *sc;
2574 ifp = sc->vtnet_ifp;
2577 VTNET_CORE_LOCK_ASSERT(sc);
2578 vtnet_accumulate_stats(sc);
2580 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2581 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2583 if (timedout != 0) {
2584 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2585 vtnet_init_locked(sc);
2587 callout_schedule(&sc->vtnet_tick_ch, hz);
2591 vtnet_start_taskqueues(struct vtnet_softc *sc)
2594 struct vtnet_rxq *rxq;
2595 struct vtnet_txq *txq;
2598 dev = sc->vtnet_dev;
2601 * Errors here are very difficult to recover from - we cannot
2602 * easily fail because, if this is during boot, we will hang
2603 * when freeing any successfully started taskqueues because
2604 * the scheduler isn't up yet.
2606 * Most drivers just ignore the return value - it only fails
2607 * with ENOMEM so an error is not likely.
2609 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2610 rxq = &sc->vtnet_rxqs[i];
2611 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2612 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2614 device_printf(dev, "failed to start rx taskq %d\n",
2618 txq = &sc->vtnet_txqs[i];
2619 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2620 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2622 device_printf(dev, "failed to start tx taskq %d\n",
2629 vtnet_free_taskqueues(struct vtnet_softc *sc)
2631 struct vtnet_rxq *rxq;
2632 struct vtnet_txq *txq;
2635 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2636 rxq = &sc->vtnet_rxqs[i];
2637 if (rxq->vtnrx_tq != NULL) {
2638 taskqueue_free(rxq->vtnrx_tq);
2639 rxq->vtnrx_vq = NULL;
2642 txq = &sc->vtnet_txqs[i];
2643 if (txq->vtntx_tq != NULL) {
2644 taskqueue_free(txq->vtntx_tq);
2645 txq->vtntx_tq = NULL;
2651 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2653 struct vtnet_rxq *rxq;
2654 struct vtnet_txq *txq;
2657 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2658 rxq = &sc->vtnet_rxqs[i];
2659 if (rxq->vtnrx_tq != NULL)
2660 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2662 txq = &sc->vtnet_txqs[i];
2663 if (txq->vtntx_tq != NULL) {
2664 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2665 #ifndef VTNET_LEGACY_TX
2666 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2673 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2675 struct vtnet_rxq *rxq;
2676 struct vtnet_txq *txq;
2679 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2680 rxq = &sc->vtnet_rxqs[i];
2681 vtnet_rxq_free_mbufs(rxq);
2683 txq = &sc->vtnet_txqs[i];
2684 vtnet_txq_free_mbufs(txq);
2689 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2691 struct vtnet_rxq *rxq;
2692 struct vtnet_txq *txq;
2696 * Lock and unlock the per-queue mutex so we known the stop
2697 * state is visible. Doing only the active queues should be
2698 * sufficient, but it does not cost much extra to do all the
2699 * queues. Note we hold the core mutex here too.
2701 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2702 rxq = &sc->vtnet_rxqs[i];
2703 VTNET_RXQ_LOCK(rxq);
2704 VTNET_RXQ_UNLOCK(rxq);
2706 txq = &sc->vtnet_txqs[i];
2707 VTNET_TXQ_LOCK(txq);
2708 VTNET_TXQ_UNLOCK(txq);
2713 vtnet_stop(struct vtnet_softc *sc)
2718 dev = sc->vtnet_dev;
2719 ifp = sc->vtnet_ifp;
2721 VTNET_CORE_LOCK_ASSERT(sc);
2723 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2724 sc->vtnet_link_active = 0;
2725 callout_stop(&sc->vtnet_tick_ch);
2727 /* Only advisory. */
2728 vtnet_disable_interrupts(sc);
2731 * Stop the host adapter. This resets it to the pre-initialized
2732 * state. It will not generate any interrupts until after it is
2736 vtnet_stop_rendezvous(sc);
2738 /* Free any mbufs left in the virtqueues. */
2739 vtnet_drain_rxtx_queues(sc);
2743 vtnet_virtio_reinit(struct vtnet_softc *sc)
2750 dev = sc->vtnet_dev;
2751 ifp = sc->vtnet_ifp;
2752 features = sc->vtnet_features;
2756 mask |= IFCAP_RXCSUM;
2759 mask |= IFCAP_RXCSUM_IPV6;
2763 * Re-negotiate with the host, removing any disabled receive
2764 * features. Transmit features are disabled only on our side
2765 * via if_capenable and if_hwassist.
2768 if (ifp->if_capabilities & mask) {
2770 * We require both IPv4 and IPv6 offloading to be enabled
2771 * in order to negotiated it: VirtIO does not distinguish
2774 if ((ifp->if_capenable & mask) != mask)
2775 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2778 if (ifp->if_capabilities & IFCAP_LRO) {
2779 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2780 features &= ~VTNET_LRO_FEATURES;
2783 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2784 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2785 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2788 error = virtio_reinit(dev, features);
2790 device_printf(dev, "virtio reinit error %d\n", error);
2796 vtnet_init_rx_filters(struct vtnet_softc *sc)
2800 ifp = sc->vtnet_ifp;
2802 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2803 /* Restore promiscuous and all-multicast modes. */
2804 vtnet_rx_filter(sc);
2805 /* Restore filtered MAC addresses. */
2806 vtnet_rx_filter_mac(sc);
2809 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2810 vtnet_rx_filter_vlan(sc);
2814 vtnet_init_rx_queues(struct vtnet_softc *sc)
2817 struct vtnet_rxq *rxq;
2818 int i, clsize, error;
2820 dev = sc->vtnet_dev;
2823 * Use the new cluster size if one has been set (via a MTU
2824 * change). Otherwise, use the standard 2K clusters.
2826 * BMV: It might make sense to use page sized clusters as
2827 * the default (depending on the features negotiated).
2829 if (sc->vtnet_rx_new_clsize != 0) {
2830 clsize = sc->vtnet_rx_new_clsize;
2831 sc->vtnet_rx_new_clsize = 0;
2835 sc->vtnet_rx_clsize = clsize;
2836 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
2838 /* The first segment is reserved for the header. */
2839 KASSERT(sc->vtnet_rx_nmbufs < VTNET_MAX_RX_SEGS,
2840 ("%s: too many rx mbufs %d", __func__, sc->vtnet_rx_nmbufs));
2842 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2843 rxq = &sc->vtnet_rxqs[i];
2845 /* Hold the lock to satisfy asserts. */
2846 VTNET_RXQ_LOCK(rxq);
2847 error = vtnet_rxq_populate(rxq);
2848 VTNET_RXQ_UNLOCK(rxq);
2852 "cannot allocate mbufs for Rx queue %d\n", i);
2861 vtnet_init_tx_queues(struct vtnet_softc *sc)
2863 struct vtnet_txq *txq;
2866 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2867 txq = &sc->vtnet_txqs[i];
2868 txq->vtntx_watchdog = 0;
2875 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
2879 error = vtnet_init_rx_queues(sc);
2883 error = vtnet_init_tx_queues(sc);
2891 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
2896 dev = sc->vtnet_dev;
2898 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
2899 MPASS(sc->vtnet_max_vq_pairs == 1);
2900 sc->vtnet_act_vq_pairs = 1;
2904 /* BMV: Just use the maximum configured for now. */
2905 npairs = sc->vtnet_max_vq_pairs;
2907 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
2909 "cannot set active queue pairs to %d\n", npairs);
2913 sc->vtnet_act_vq_pairs = npairs;
2917 vtnet_reinit(struct vtnet_softc *sc)
2923 dev = sc->vtnet_dev;
2924 ifp = sc->vtnet_ifp;
2926 /* Use the current MAC address. */
2927 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2928 vtnet_set_hwaddr(sc);
2930 vtnet_set_active_vq_pairs(sc);
2932 ifp->if_hwassist = 0;
2933 if (ifp->if_capenable & IFCAP_TXCSUM)
2934 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2935 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
2936 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
2937 if (ifp->if_capenable & IFCAP_TSO4)
2938 ifp->if_hwassist |= CSUM_TSO;
2939 if (ifp->if_capenable & IFCAP_TSO6)
2940 ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
2942 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
2943 vtnet_init_rx_filters(sc);
2945 error = vtnet_init_rxtx_queues(sc);
2949 vtnet_enable_interrupts(sc);
2950 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2956 vtnet_init_locked(struct vtnet_softc *sc)
2961 dev = sc->vtnet_dev;
2962 ifp = sc->vtnet_ifp;
2964 VTNET_CORE_LOCK_ASSERT(sc);
2966 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2971 /* Reinitialize with the host. */
2972 if (vtnet_virtio_reinit(sc) != 0)
2975 if (vtnet_reinit(sc) != 0)
2978 virtio_reinit_complete(dev);
2980 vtnet_update_link_status(sc);
2981 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2990 vtnet_init(void *xsc)
2992 struct vtnet_softc *sc;
2996 VTNET_CORE_LOCK(sc);
2997 vtnet_init_locked(sc);
2998 VTNET_CORE_UNLOCK(sc);
3002 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3004 struct virtqueue *vq;
3006 vq = sc->vtnet_ctrl_vq;
3009 * The control virtqueue is only polled and therefore it should
3012 KASSERT(virtqueue_empty(vq),
3013 ("%s: ctrl vq %p not empty", __func__, vq));
3017 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3018 struct sglist *sg, int readable, int writable)
3020 struct virtqueue *vq;
3022 vq = sc->vtnet_ctrl_vq;
3024 VTNET_CORE_LOCK_ASSERT(sc);
3025 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3026 ("%s: CTRL_VQ feature not negotiated", __func__));
3028 if (!virtqueue_empty(vq))
3030 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3034 * Poll for the response, but the command is likely already
3035 * done when we return from the notify.
3037 virtqueue_notify(vq);
3038 virtqueue_poll(vq, NULL);
3042 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3044 struct virtio_net_ctrl_hdr hdr;
3045 struct sglist_seg segs[3];
3050 hdr.class = VIRTIO_NET_CTRL_MAC;
3051 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3052 ack = VIRTIO_NET_ERR;
3054 sglist_init(&sg, 3, segs);
3056 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3057 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3058 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3059 KASSERT(error == 0 && sg.sg_nseg == 3,
3060 ("%s: error %d adding set MAC msg to sglist", __func__, error));
3062 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3064 return (ack == VIRTIO_NET_OK ? 0 : EIO);
3068 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3070 struct sglist_seg segs[3];
3073 struct virtio_net_ctrl_hdr hdr;
3075 struct virtio_net_ctrl_mq mq;
3081 s.hdr.class = VIRTIO_NET_CTRL_MQ;
3082 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3083 s.mq.virtqueue_pairs = npairs;
3084 s.ack = VIRTIO_NET_ERR;
3086 sglist_init(&sg, 3, segs);
3088 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3089 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3090 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3091 KASSERT(error == 0 && sg.sg_nseg == 3,
3092 ("%s: error %d adding MQ message to sglist", __func__, error));
3094 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3096 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3100 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3102 struct sglist_seg segs[3];
3105 struct virtio_net_ctrl_hdr hdr;
3113 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3114 ("%s: CTRL_RX feature not negotiated", __func__));
3116 s.hdr.class = VIRTIO_NET_CTRL_RX;
3119 s.ack = VIRTIO_NET_ERR;
3121 sglist_init(&sg, 3, segs);
3123 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3124 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3125 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3126 KASSERT(error == 0 && sg.sg_nseg == 3,
3127 ("%s: error %d adding Rx message to sglist", __func__, error));
3129 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3131 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3135 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3138 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3142 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3145 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3149 * The device defaults to promiscuous mode for backwards compatibility.
3150 * Turn it off at attach time if possible.
3153 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3157 ifp = sc->vtnet_ifp;
3159 VTNET_CORE_LOCK(sc);
3160 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3161 ifp->if_flags |= IFF_PROMISC;
3162 } else if (vtnet_set_promisc(sc, 0) != 0) {
3163 ifp->if_flags |= IFF_PROMISC;
3164 device_printf(sc->vtnet_dev,
3165 "cannot disable default promiscuous mode\n");
3167 VTNET_CORE_UNLOCK(sc);
3171 vtnet_rx_filter(struct vtnet_softc *sc)
3176 dev = sc->vtnet_dev;
3177 ifp = sc->vtnet_ifp;
3179 VTNET_CORE_LOCK_ASSERT(sc);
3181 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3182 device_printf(dev, "cannot %s promiscuous mode\n",
3183 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3185 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3186 device_printf(dev, "cannot %s all-multicast mode\n",
3187 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3191 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3193 struct virtio_net_ctrl_hdr hdr;
3194 struct vtnet_mac_filter *filter;
3195 struct sglist_seg segs[4];
3199 struct ifmultiaddr *ifma;
3200 int ucnt, mcnt, promisc, allmulti, error;
3203 ifp = sc->vtnet_ifp;
3204 filter = sc->vtnet_mac_filter;
3210 VTNET_CORE_LOCK_ASSERT(sc);
3211 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3212 ("%s: CTRL_RX feature not negotiated", __func__));
3214 /* Unicast MAC addresses: */
3216 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
3217 if (ifa->ifa_addr->sa_family != AF_LINK)
3219 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3220 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3222 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
3227 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3228 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
3231 if_addr_runlock(ifp);
3234 filter->vmf_unicast.nentries = 0;
3235 if_printf(ifp, "more than %d MAC addresses assigned, "
3236 "falling back to promiscuous mode\n",
3237 VTNET_MAX_MAC_ENTRIES);
3239 filter->vmf_unicast.nentries = ucnt;
3241 /* Multicast MAC addresses: */
3242 if_maddr_rlock(ifp);
3243 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3244 if (ifma->ifma_addr->sa_family != AF_LINK)
3246 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
3251 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3252 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
3255 if_maddr_runlock(ifp);
3257 if (allmulti != 0) {
3258 filter->vmf_multicast.nentries = 0;
3259 if_printf(ifp, "more than %d multicast MAC addresses "
3260 "assigned, falling back to all-multicast mode\n",
3261 VTNET_MAX_MAC_ENTRIES);
3263 filter->vmf_multicast.nentries = mcnt;
3265 if (promisc != 0 && allmulti != 0)
3268 hdr.class = VIRTIO_NET_CTRL_MAC;
3269 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3270 ack = VIRTIO_NET_ERR;
3272 sglist_init(&sg, 4, segs);
3274 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3275 error |= sglist_append(&sg, &filter->vmf_unicast,
3276 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3277 error |= sglist_append(&sg, &filter->vmf_multicast,
3278 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3279 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3280 KASSERT(error == 0 && sg.sg_nseg == 4,
3281 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3283 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3285 if (ack != VIRTIO_NET_OK)
3286 if_printf(ifp, "error setting host MAC filter table\n");
3289 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3290 if_printf(ifp, "cannot enable promiscuous mode\n");
3291 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3292 if_printf(ifp, "cannot enable all-multicast mode\n");
3296 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3298 struct sglist_seg segs[3];
3301 struct virtio_net_ctrl_hdr hdr;
3309 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3310 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3312 s.ack = VIRTIO_NET_ERR;
3314 sglist_init(&sg, 3, segs);
3316 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3317 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3318 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3319 KASSERT(error == 0 && sg.sg_nseg == 3,
3320 ("%s: error %d adding VLAN message to sglist", __func__, error));
3322 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3324 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3328 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3334 VTNET_CORE_LOCK_ASSERT(sc);
3335 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3336 ("%s: VLAN_FILTER feature not negotiated", __func__));
3338 /* Enable the filter for each configured VLAN. */
3339 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3340 w = sc->vtnet_vlan_filter[i];
3342 while ((bit = ffs(w) - 1) != -1) {
3344 tag = sizeof(w) * CHAR_BIT * i + bit;
3346 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3347 device_printf(sc->vtnet_dev,
3348 "cannot enable VLAN %d filter\n", tag);
3355 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3360 ifp = sc->vtnet_ifp;
3361 idx = (tag >> 5) & 0x7F;
3364 if (tag == 0 || tag > 4095)
3367 VTNET_CORE_LOCK(sc);
3370 sc->vtnet_vlan_filter[idx] |= (1 << bit);
3372 sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3374 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3375 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3376 device_printf(sc->vtnet_dev,
3377 "cannot %s VLAN %d %s the host filter table\n",
3378 add ? "add" : "remove", tag, add ? "to" : "from");
3381 VTNET_CORE_UNLOCK(sc);
3385 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3388 if (ifp->if_softc != arg)
3391 vtnet_update_vlan_filter(arg, 1, tag);
3395 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3398 if (ifp->if_softc != arg)
3401 vtnet_update_vlan_filter(arg, 0, tag);
3405 vtnet_is_link_up(struct vtnet_softc *sc)
3411 dev = sc->vtnet_dev;
3412 ifp = sc->vtnet_ifp;
3414 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3415 status = VIRTIO_NET_S_LINK_UP;
3417 status = virtio_read_dev_config_2(dev,
3418 offsetof(struct virtio_net_config, status));
3420 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3424 vtnet_update_link_status(struct vtnet_softc *sc)
3429 ifp = sc->vtnet_ifp;
3431 VTNET_CORE_LOCK_ASSERT(sc);
3432 link = vtnet_is_link_up(sc);
3434 /* Notify if the link status has changed. */
3435 if (link != 0 && sc->vtnet_link_active == 0) {
3436 sc->vtnet_link_active = 1;
3437 if_link_state_change(ifp, LINK_STATE_UP);
3438 } else if (link == 0 && sc->vtnet_link_active != 0) {
3439 sc->vtnet_link_active = 0;
3440 if_link_state_change(ifp, LINK_STATE_DOWN);
3445 vtnet_ifmedia_upd(struct ifnet *ifp)
3447 struct vtnet_softc *sc;
3448 struct ifmedia *ifm;
3451 ifm = &sc->vtnet_media;
3453 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3460 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3462 struct vtnet_softc *sc;
3466 ifmr->ifm_status = IFM_AVALID;
3467 ifmr->ifm_active = IFM_ETHER;
3469 VTNET_CORE_LOCK(sc);
3470 if (vtnet_is_link_up(sc) != 0) {
3471 ifmr->ifm_status |= IFM_ACTIVE;
3472 ifmr->ifm_active |= VTNET_MEDIATYPE;
3474 ifmr->ifm_active |= IFM_NONE;
3475 VTNET_CORE_UNLOCK(sc);
3479 vtnet_set_hwaddr(struct vtnet_softc *sc)
3483 dev = sc->vtnet_dev;
3485 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3486 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3487 device_printf(dev, "unable to set MAC address\n");
3488 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3489 virtio_write_device_config(dev,
3490 offsetof(struct virtio_net_config, mac),
3491 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3496 vtnet_get_hwaddr(struct vtnet_softc *sc)
3500 dev = sc->vtnet_dev;
3502 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3504 * Generate a random locally administered unicast address.
3506 * It would be nice to generate the same MAC address across
3507 * reboots, but it seems all the hosts currently available
3508 * support the MAC feature, so this isn't too important.
3510 sc->vtnet_hwaddr[0] = 0xB2;
3511 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3512 vtnet_set_hwaddr(sc);
3516 virtio_read_device_config(dev, offsetof(struct virtio_net_config, mac),
3517 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3521 vtnet_vlan_tag_remove(struct mbuf *m)
3523 struct ether_vlan_header *evh;
3525 evh = mtod(m, struct ether_vlan_header *);
3526 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3527 m->m_flags |= M_VLANTAG;
3529 /* Strip the 802.1Q header. */
3530 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3531 ETHER_HDR_LEN - ETHER_TYPE_LEN);
3532 m_adj(m, ETHER_VLAN_ENCAP_LEN);
3536 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3537 struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3539 struct sysctl_oid *node;
3540 struct sysctl_oid_list *list;
3541 struct vtnet_rxq_stats *stats;
3544 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3545 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3546 CTLFLAG_RD, NULL, "Receive Queue");
3547 list = SYSCTL_CHILDREN(node);
3549 stats = &rxq->vtnrx_stats;
3551 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3552 &stats->vrxs_ipackets, "Receive packets");
3553 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3554 &stats->vrxs_ibytes, "Receive bytes");
3555 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3556 &stats->vrxs_iqdrops, "Receive drops");
3557 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3558 &stats->vrxs_ierrors, "Receive errors");
3559 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3560 &stats->vrxs_csum, "Receive checksum offloaded");
3561 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3562 &stats->vrxs_csum_failed, "Receive checksum offload failed");
3563 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3564 &stats->vrxs_rescheduled,
3565 "Receive interrupt handler rescheduled");
3569 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3570 struct sysctl_oid_list *child, struct vtnet_txq *txq)
3572 struct sysctl_oid *node;
3573 struct sysctl_oid_list *list;
3574 struct vtnet_txq_stats *stats;
3577 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3578 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3579 CTLFLAG_RD, NULL, "Transmit Queue");
3580 list = SYSCTL_CHILDREN(node);
3582 stats = &txq->vtntx_stats;
3584 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3585 &stats->vtxs_opackets, "Transmit packets");
3586 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3587 &stats->vtxs_obytes, "Transmit bytes");
3588 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3589 &stats->vtxs_omcasts, "Transmit multicasts");
3590 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3591 &stats->vtxs_csum, "Transmit checksum offloaded");
3592 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3593 &stats->vtxs_tso, "Transmit segmentation offloaded");
3594 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "collapsed", CTLFLAG_RD,
3595 &stats->vtxs_collapsed, "Transmit mbufs collapsed");
3596 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3597 &stats->vtxs_rescheduled,
3598 "Transmit interrupt handler rescheduled");
3602 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3605 struct sysctl_ctx_list *ctx;
3606 struct sysctl_oid *tree;
3607 struct sysctl_oid_list *child;
3610 dev = sc->vtnet_dev;
3611 ctx = device_get_sysctl_ctx(dev);
3612 tree = device_get_sysctl_tree(dev);
3613 child = SYSCTL_CHILDREN(tree);
3615 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3616 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3617 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3622 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3623 struct sysctl_oid_list *child, struct vtnet_softc *sc)
3625 struct vtnet_statistics *stats;
3627 stats = &sc->vtnet_stats;
3629 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3630 CTLFLAG_RD, &stats->mbuf_alloc_failed,
3631 "Mbuf cluster allocation failures");
3633 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3634 CTLFLAG_RD, &stats->rx_frame_too_large,
3635 "Received frame larger than the mbuf chain");
3636 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3637 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3638 "Enqueuing the replacement receive mbuf failed");
3639 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3640 CTLFLAG_RD, &stats->rx_mergeable_failed,
3641 "Mergeable buffers receive failures");
3642 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3643 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3644 "Received checksum offloaded buffer with unsupported "
3646 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3647 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3648 "Received checksum offloaded buffer with incorrect IP protocol");
3649 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3650 CTLFLAG_RD, &stats->rx_csum_bad_offset,
3651 "Received checksum offloaded buffer with incorrect offset");
3652 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3653 CTLFLAG_RD, &stats->rx_csum_bad_proto,
3654 "Received checksum offloaded buffer with incorrect protocol");
3655 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3656 CTLFLAG_RD, &stats->rx_csum_failed,
3657 "Received buffer checksum offload failed");
3658 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3659 CTLFLAG_RD, &stats->rx_csum_offloaded,
3660 "Received buffer checksum offload succeeded");
3661 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3662 CTLFLAG_RD, &stats->rx_task_rescheduled,
3663 "Times the receive interrupt task rescheduled itself");
3665 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3666 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3667 "Aborted transmit of checksum offloaded buffer with unknown "
3669 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3670 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3671 "Aborted transmit of TSO buffer with unknown Ethernet type");
3672 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3673 CTLFLAG_RD, &stats->tx_tso_not_tcp,
3674 "Aborted transmit of TSO buffer with non TCP protocol");
3675 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3676 CTLFLAG_RD, &stats->tx_csum_offloaded,
3677 "Offloaded checksum of transmitted buffer");
3678 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3679 CTLFLAG_RD, &stats->tx_tso_offloaded,
3680 "Segmentation offload of transmitted buffer");
3681 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3682 CTLFLAG_RD, &stats->tx_task_rescheduled,
3683 "Times the transmit interrupt task rescheduled itself");
3687 vtnet_setup_sysctl(struct vtnet_softc *sc)
3690 struct sysctl_ctx_list *ctx;
3691 struct sysctl_oid *tree;
3692 struct sysctl_oid_list *child;
3694 dev = sc->vtnet_dev;
3695 ctx = device_get_sysctl_ctx(dev);
3696 tree = device_get_sysctl_tree(dev);
3697 child = SYSCTL_CHILDREN(tree);
3699 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3700 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3701 "Maximum number of supported virtqueue pairs");
3702 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3703 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3704 "Number of active virtqueue pairs");
3706 vtnet_setup_stat_sysctl(ctx, child, sc);
3710 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3713 return (virtqueue_enable_intr(rxq->vtnrx_vq));
3717 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3720 virtqueue_disable_intr(rxq->vtnrx_vq);
3724 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3727 return (virtqueue_postpone_intr(txq->vtntx_vq, VQ_POSTPONE_LONG));
3731 vtnet_txq_disable_intr(struct vtnet_txq *txq)
3734 virtqueue_disable_intr(txq->vtntx_vq);
3738 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3742 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3743 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
3747 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
3751 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3752 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
3756 vtnet_enable_interrupts(struct vtnet_softc *sc)
3759 vtnet_enable_rx_interrupts(sc);
3760 vtnet_enable_tx_interrupts(sc);
3764 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
3768 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3769 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3773 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
3777 for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3778 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3782 vtnet_disable_interrupts(struct vtnet_softc *sc)
3785 vtnet_disable_rx_interrupts(sc);
3786 vtnet_disable_tx_interrupts(sc);
3790 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
3794 snprintf(path, sizeof(path),
3795 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
3796 TUNABLE_INT_FETCH(path, &def);