2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/eventhandler.h>
28 #include <sys/kernel.h>
29 #include <sys/endian.h>
30 #include <sys/sockio.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
41 #include <net/ethernet.h>
43 #include <net/if_var.h>
44 #include <net/if_arp.h>
45 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_media.h>
48 #include <net/if_vlan_var.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet/udp.h>
58 #include <netinet/tcp.h>
60 #include <machine/in_cksum.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
70 #include "if_vmxreg.h"
71 #include "if_vmxvar.h"
74 #include "opt_inet6.h"
76 #ifdef VMXNET3_FAILPOINTS
78 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
79 "vmxnet3 fail points");
80 #define VMXNET3_FP _debug_fail_point_vmxnet3
83 static int vmxnet3_probe(device_t);
84 static int vmxnet3_attach(device_t);
85 static int vmxnet3_detach(device_t);
86 static int vmxnet3_shutdown(device_t);
88 static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
89 static void vmxnet3_free_resources(struct vmxnet3_softc *);
90 static int vmxnet3_check_version(struct vmxnet3_softc *);
91 static void vmxnet3_initial_config(struct vmxnet3_softc *);
92 static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
94 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
95 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
96 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
97 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
98 struct vmxnet3_interrupt *);
99 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
100 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
101 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
102 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
103 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
105 static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
106 struct vmxnet3_interrupt *);
107 static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
109 #ifndef VMXNET3_LEGACY_TX
110 static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
111 static void vmxnet3_start_taskqueue(struct vmxnet3_softc *);
112 static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
113 static void vmxnet3_free_taskqueue(struct vmxnet3_softc *);
116 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
117 static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
118 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
119 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
120 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
121 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
123 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
124 static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
125 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
126 static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
127 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
128 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
129 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
130 static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
131 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
132 static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
133 static void vmxnet3_init_hwassist(struct vmxnet3_softc *);
134 static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
135 static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
136 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
137 static int vmxnet3_alloc_data(struct vmxnet3_softc *);
138 static void vmxnet3_free_data(struct vmxnet3_softc *);
139 static int vmxnet3_setup_interface(struct vmxnet3_softc *);
141 static void vmxnet3_evintr(struct vmxnet3_softc *);
142 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
143 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
144 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
145 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
146 struct vmxnet3_rxring *, int);
147 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
148 static void vmxnet3_legacy_intr(void *);
149 static void vmxnet3_txq_intr(void *);
150 static void vmxnet3_rxq_intr(void *);
151 static void vmxnet3_event_intr(void *);
153 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
154 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
155 static void vmxnet3_stop(struct vmxnet3_softc *);
157 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
158 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
159 static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
160 static int vmxnet3_enable_device(struct vmxnet3_softc *);
161 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
162 static int vmxnet3_reinit(struct vmxnet3_softc *);
163 static void vmxnet3_init_locked(struct vmxnet3_softc *);
164 static void vmxnet3_init(void *);
166 static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
167 int *, int *, int *);
168 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
169 bus_dmamap_t, bus_dma_segment_t [], int *);
170 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
171 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
172 #ifdef VMXNET3_LEGACY_TX
173 static void vmxnet3_start_locked(struct ifnet *);
174 static void vmxnet3_start(struct ifnet *);
176 static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
178 static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
179 static void vmxnet3_txq_tq_deferred(void *, int);
181 static void vmxnet3_txq_start(struct vmxnet3_txqueue *);
182 static void vmxnet3_tx_start_all(struct vmxnet3_softc *);
184 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
186 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
187 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
188 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
189 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
190 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
191 static uint64_t vmxnet3_get_counter(struct ifnet *, ift_counter);
193 #ifndef VMXNET3_LEGACY_TX
194 static void vmxnet3_qflush(struct ifnet *);
197 static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
198 static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
199 static void vmxnet3_tick(void *);
200 static void vmxnet3_link_status(struct vmxnet3_softc *);
201 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
202 static int vmxnet3_media_change(struct ifnet *);
203 static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
204 static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
206 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
207 struct sysctl_ctx_list *, struct sysctl_oid_list *);
208 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
209 struct sysctl_ctx_list *, struct sysctl_oid_list *);
210 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
211 struct sysctl_ctx_list *, struct sysctl_oid_list *);
212 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
214 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
216 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
217 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
219 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
220 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
222 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
223 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
224 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
225 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
227 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
228 bus_size_t, struct vmxnet3_dma_alloc *);
229 static void vmxnet3_dma_free(struct vmxnet3_softc *,
230 struct vmxnet3_dma_alloc *);
231 static int vmxnet3_tunable_int(struct vmxnet3_softc *,
237 VMXNET3_BARRIER_RDWR,
240 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
243 static int vmxnet3_mq_disable = 0;
244 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
245 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
246 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
247 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
248 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
249 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
250 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
251 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
252 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
254 static device_method_t vmxnet3_methods[] = {
255 /* Device interface. */
256 DEVMETHOD(device_probe, vmxnet3_probe),
257 DEVMETHOD(device_attach, vmxnet3_attach),
258 DEVMETHOD(device_detach, vmxnet3_detach),
259 DEVMETHOD(device_shutdown, vmxnet3_shutdown),
264 static driver_t vmxnet3_driver = {
265 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
268 static devclass_t vmxnet3_devclass;
269 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
271 MODULE_DEPEND(vmx, pci, 1, 1, 1);
272 MODULE_DEPEND(vmx, ether, 1, 1, 1);
274 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD
275 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0
278 vmxnet3_probe(device_t dev)
281 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
282 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
283 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
284 return (BUS_PROBE_DEFAULT);
291 vmxnet3_attach(device_t dev)
293 struct vmxnet3_softc *sc;
296 sc = device_get_softc(dev);
299 pci_enable_busmaster(dev);
301 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
302 callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
304 vmxnet3_initial_config(sc);
306 error = vmxnet3_alloc_resources(sc);
310 error = vmxnet3_check_version(sc);
314 error = vmxnet3_alloc_rxtx_queues(sc);
318 #ifndef VMXNET3_LEGACY_TX
319 error = vmxnet3_alloc_taskqueue(sc);
324 error = vmxnet3_alloc_interrupts(sc);
328 vmxnet3_check_multiqueue(sc);
330 error = vmxnet3_alloc_data(sc);
334 error = vmxnet3_setup_interface(sc);
338 error = vmxnet3_setup_interrupts(sc);
340 ether_ifdetach(sc->vmx_ifp);
341 device_printf(dev, "could not set up interrupt\n");
345 vmxnet3_setup_sysctl(sc);
346 #ifndef VMXNET3_LEGACY_TX
347 vmxnet3_start_taskqueue(sc);
358 vmxnet3_detach(device_t dev)
360 struct vmxnet3_softc *sc;
363 sc = device_get_softc(dev);
366 if (device_is_attached(dev)) {
367 VMXNET3_CORE_LOCK(sc);
369 VMXNET3_CORE_UNLOCK(sc);
371 callout_drain(&sc->vmx_tick);
372 #ifndef VMXNET3_LEGACY_TX
373 vmxnet3_drain_taskqueue(sc);
379 if (sc->vmx_vlan_attach != NULL) {
380 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
381 sc->vmx_vlan_attach = NULL;
383 if (sc->vmx_vlan_detach != NULL) {
384 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
385 sc->vmx_vlan_detach = NULL;
388 #ifndef VMXNET3_LEGACY_TX
389 vmxnet3_free_taskqueue(sc);
391 vmxnet3_free_interrupts(sc);
398 ifmedia_removeall(&sc->vmx_media);
400 vmxnet3_free_data(sc);
401 vmxnet3_free_resources(sc);
402 vmxnet3_free_rxtx_queues(sc);
404 VMXNET3_CORE_LOCK_DESTROY(sc);
410 vmxnet3_shutdown(device_t dev)
417 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
425 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
427 if (sc->vmx_res0 == NULL) {
429 "could not map BAR0 memory\n");
433 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
434 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
437 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
439 if (sc->vmx_res1 == NULL) {
441 "could not map BAR1 memory\n");
445 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
446 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
448 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
450 sc->vmx_msix_res = bus_alloc_resource_any(dev,
451 SYS_RES_MEMORY, &rid, RF_ACTIVE);
454 if (sc->vmx_msix_res == NULL)
455 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
461 vmxnet3_free_resources(struct vmxnet3_softc *sc)
468 if (sc->vmx_res0 != NULL) {
470 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
474 if (sc->vmx_res1 != NULL) {
476 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
480 if (sc->vmx_msix_res != NULL) {
482 bus_release_resource(dev, SYS_RES_MEMORY, rid,
484 sc->vmx_msix_res = NULL;
489 vmxnet3_check_version(struct vmxnet3_softc *sc)
496 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
497 if ((version & 0x01) == 0) {
498 device_printf(dev, "unsupported hardware version %#x\n",
502 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
504 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
505 if ((version & 0x01) == 0) {
506 device_printf(dev, "unsupported UPT version %#x\n", version);
509 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
515 trunc_powerof2(int val)
518 return (1U << (fls(val) - 1));
522 vmxnet3_initial_config(struct vmxnet3_softc *sc)
526 nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
527 if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
528 nqueue = VMXNET3_DEF_TX_QUEUES;
529 if (nqueue > mp_ncpus)
531 sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
533 nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
534 if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
535 nqueue = VMXNET3_DEF_RX_QUEUES;
536 if (nqueue > mp_ncpus)
538 sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
540 if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
541 sc->vmx_max_nrxqueues = 1;
542 sc->vmx_max_ntxqueues = 1;
545 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
546 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
547 ndesc = VMXNET3_DEF_TX_NDESC;
548 if (ndesc & VMXNET3_MASK_TX_NDESC)
549 ndesc &= ~VMXNET3_MASK_TX_NDESC;
550 sc->vmx_ntxdescs = ndesc;
552 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
553 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
554 ndesc = VMXNET3_DEF_RX_NDESC;
555 if (ndesc & VMXNET3_MASK_RX_NDESC)
556 ndesc &= ~VMXNET3_MASK_RX_NDESC;
557 sc->vmx_nrxdescs = ndesc;
558 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
562 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
565 if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
568 /* BMV: Just use the maximum configured for now. */
569 sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
570 sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
572 if (sc->vmx_nrxqueues > 1)
573 sc->vmx_flags |= VMXNET3_FLAG_RSS;
578 sc->vmx_ntxqueues = 1;
579 sc->vmx_nrxqueues = 1;
583 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
586 int nmsix, cnt, required;
590 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
593 /* Allocate an additional vector for the events interrupt. */
594 required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
596 nmsix = pci_msix_count(dev);
597 if (nmsix < required)
601 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
602 sc->vmx_nintrs = required;
605 pci_release_msi(dev);
607 /* BMV TODO Fallback to sharing MSIX vectors if possible. */
613 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
616 int nmsi, cnt, required;
621 nmsi = pci_msi_count(dev);
626 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
630 pci_release_msi(dev);
636 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
644 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
645 struct vmxnet3_interrupt *intr)
647 struct resource *irq;
649 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
653 intr->vmxi_irq = irq;
654 intr->vmxi_rid = rid;
660 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
662 int i, rid, flags, error;
667 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
668 flags |= RF_SHAREABLE;
672 for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
673 error = vmxnet3_alloc_interrupt(sc, rid, flags,
683 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
686 struct vmxnet3_txqueue *txq;
687 struct vmxnet3_rxqueue *rxq;
688 struct vmxnet3_interrupt *intr;
693 intr = &sc->vmx_intrs[0];
694 type = INTR_TYPE_NET | INTR_MPSAFE;
696 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
697 txq = &sc->vmx_txq[i];
698 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
699 vmxnet3_txq_intr, txq, &intr->vmxi_handler);
702 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
704 txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
707 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
708 rxq = &sc->vmx_rxq[i];
709 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
710 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
713 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
715 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
718 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
719 vmxnet3_event_intr, sc, &intr->vmxi_handler);
722 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
723 sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
729 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
731 struct vmxnet3_interrupt *intr;
734 intr = &sc->vmx_intrs[0];
735 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
736 INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
737 &intr->vmxi_handler);
739 for (i = 0; i < sc->vmx_ntxqueues; i++)
740 sc->vmx_txq[i].vxtxq_intr_idx = 0;
741 for (i = 0; i < sc->vmx_nrxqueues; i++)
742 sc->vmx_rxq[i].vxrxq_intr_idx = 0;
743 sc->vmx_event_intr_idx = 0;
749 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
751 struct vmxnet3_txqueue *txq;
752 struct vmxnet3_txq_shared *txs;
753 struct vmxnet3_rxqueue *rxq;
754 struct vmxnet3_rxq_shared *rxs;
757 sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
759 for (i = 0; i < sc->vmx_ntxqueues; i++) {
760 txq = &sc->vmx_txq[i];
762 txs->intr_idx = txq->vxtxq_intr_idx;
765 for (i = 0; i < sc->vmx_nrxqueues; i++) {
766 rxq = &sc->vmx_rxq[i];
768 rxs->intr_idx = rxq->vxrxq_intr_idx;
773 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
777 error = vmxnet3_alloc_intr_resources(sc);
781 switch (sc->vmx_intr_type) {
782 case VMXNET3_IT_MSIX:
783 error = vmxnet3_setup_msix_interrupts(sc);
786 case VMXNET3_IT_LEGACY:
787 error = vmxnet3_setup_legacy_interrupt(sc);
790 panic("%s: invalid interrupt type %d", __func__,
795 vmxnet3_set_interrupt_idx(sc);
801 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
808 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
810 sc->vmx_intr_type = config & 0x03;
811 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
813 switch (sc->vmx_intr_type) {
814 case VMXNET3_IT_AUTO:
815 sc->vmx_intr_type = VMXNET3_IT_MSIX;
817 case VMXNET3_IT_MSIX:
818 error = vmxnet3_alloc_msix_interrupts(sc);
821 sc->vmx_intr_type = VMXNET3_IT_MSI;
824 error = vmxnet3_alloc_msi_interrupts(sc);
827 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
829 case VMXNET3_IT_LEGACY:
830 error = vmxnet3_alloc_legacy_interrupts(sc);
835 sc->vmx_intr_type = -1;
836 device_printf(dev, "cannot allocate any interrupt resources\n");
844 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
845 struct vmxnet3_interrupt *intr)
851 if (intr->vmxi_handler != NULL) {
852 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
853 intr->vmxi_handler = NULL;
856 if (intr->vmxi_irq != NULL) {
857 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
859 intr->vmxi_irq = NULL;
865 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
869 for (i = 0; i < sc->vmx_nintrs; i++)
870 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
872 if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
873 sc->vmx_intr_type == VMXNET3_IT_MSIX)
874 pci_release_msi(sc->vmx_dev);
877 #ifndef VMXNET3_LEGACY_TX
879 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
885 sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
886 taskqueue_thread_enqueue, &sc->vmx_tq);
887 if (sc->vmx_tq == NULL)
894 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
902 * The taskqueue is typically not frequently used, so a dedicated
903 * thread for each queue is unnecessary.
905 nthreads = MAX(1, sc->vmx_ntxqueues / 2);
908 * Most drivers just ignore the return value - it only fails
909 * with ENOMEM so an error is not likely. It is hard for us
910 * to recover from an error here.
912 error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
913 "%s taskq", device_get_nameunit(dev));
915 device_printf(dev, "failed to start taskqueue: %d", error);
919 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
921 struct vmxnet3_txqueue *txq;
924 if (sc->vmx_tq != NULL) {
925 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
926 txq = &sc->vmx_txq[i];
927 taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
933 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
935 if (sc->vmx_tq != NULL) {
936 taskqueue_free(sc->vmx_tq);
943 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
945 struct vmxnet3_rxqueue *rxq;
946 struct vmxnet3_rxring *rxr;
949 rxq = &sc->vmx_rxq[q];
951 snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
952 device_get_nameunit(sc->vmx_dev), q);
953 mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
958 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
959 rxr = &rxq->vxrxq_cmd_ring[i];
961 rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
962 rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
963 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
964 if (rxr->vxrxr_rxbuf == NULL)
967 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
974 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
976 struct vmxnet3_txqueue *txq;
977 struct vmxnet3_txring *txr;
979 txq = &sc->vmx_txq[q];
980 txr = &txq->vxtxq_cmd_ring;
982 snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
983 device_get_nameunit(sc->vmx_dev), q);
984 mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
989 txr->vxtxr_ndesc = sc->vmx_ntxdescs;
990 txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
991 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
992 if (txr->vxtxr_txbuf == NULL)
995 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
997 #ifndef VMXNET3_LEGACY_TX
998 TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
1000 txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
1001 M_NOWAIT, &txq->vxtxq_mtx);
1002 if (txq->vxtxq_br == NULL)
1010 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1015 * Only attempt to create multiple queues if MSIX is available. MSIX is
1016 * disabled by default because its apparently broken for devices passed
1017 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1018 * must be set to zero for MSIX. This check prevents us from allocating
1019 * queue structures that we will not use.
1021 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1022 sc->vmx_max_nrxqueues = 1;
1023 sc->vmx_max_ntxqueues = 1;
1026 sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
1027 sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1028 sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
1029 sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1030 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1033 for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1034 error = vmxnet3_init_rxq(sc, i);
1039 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1040 error = vmxnet3_init_txq(sc, i);
1049 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1051 struct vmxnet3_rxring *rxr;
1054 rxq->vxrxq_sc = NULL;
1057 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1058 rxr = &rxq->vxrxq_cmd_ring[i];
1060 if (rxr->vxrxr_rxbuf != NULL) {
1061 free(rxr->vxrxr_rxbuf, M_DEVBUF);
1062 rxr->vxrxr_rxbuf = NULL;
1066 if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
1067 mtx_destroy(&rxq->vxrxq_mtx);
1071 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1073 struct vmxnet3_txring *txr;
1075 txr = &txq->vxtxq_cmd_ring;
1077 txq->vxtxq_sc = NULL;
1080 #ifndef VMXNET3_LEGACY_TX
1081 if (txq->vxtxq_br != NULL) {
1082 buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1083 txq->vxtxq_br = NULL;
1087 if (txr->vxtxr_txbuf != NULL) {
1088 free(txr->vxtxr_txbuf, M_DEVBUF);
1089 txr->vxtxr_txbuf = NULL;
1092 if (mtx_initialized(&txq->vxtxq_mtx) != 0)
1093 mtx_destroy(&txq->vxtxq_mtx);
1097 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1101 if (sc->vmx_rxq != NULL) {
1102 for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1103 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1104 free(sc->vmx_rxq, M_DEVBUF);
1108 if (sc->vmx_txq != NULL) {
1109 for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1110 vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1111 free(sc->vmx_txq, M_DEVBUF);
1117 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1126 size = sizeof(struct vmxnet3_driver_shared);
1127 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1129 device_printf(dev, "cannot alloc shared memory\n");
1132 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1134 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1135 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1136 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1138 device_printf(dev, "cannot alloc queue shared memory\n");
1141 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1144 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1145 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1146 kva += sizeof(struct vmxnet3_txq_shared);
1148 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1149 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1150 kva += sizeof(struct vmxnet3_rxq_shared);
1153 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1154 size = sizeof(struct vmxnet3_rss_shared);
1155 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1157 device_printf(dev, "cannot alloc rss shared memory\n");
1161 (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1168 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1171 if (sc->vmx_rss != NULL) {
1172 vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1176 if (sc->vmx_qs != NULL) {
1177 vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1181 if (sc->vmx_ds != NULL) {
1182 vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1188 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1191 struct vmxnet3_txqueue *txq;
1192 struct vmxnet3_txring *txr;
1193 struct vmxnet3_comp_ring *txc;
1194 size_t descsz, compsz;
1199 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1200 txq = &sc->vmx_txq[q];
1201 txr = &txq->vxtxq_cmd_ring;
1202 txc = &txq->vxtxq_comp_ring;
1204 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1205 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1207 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1208 1, 0, /* alignment, boundary */
1209 BUS_SPACE_MAXADDR, /* lowaddr */
1210 BUS_SPACE_MAXADDR, /* highaddr */
1211 NULL, NULL, /* filter, filterarg */
1212 VMXNET3_TX_MAXSIZE, /* maxsize */
1213 VMXNET3_TX_MAXSEGS, /* nsegments */
1214 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
1216 NULL, NULL, /* lockfunc, lockarg */
1220 "unable to create Tx buffer tag for queue %d\n", q);
1224 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1226 device_printf(dev, "cannot alloc Tx descriptors for "
1227 "queue %d error %d\n", q, error);
1231 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1233 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1235 device_printf(dev, "cannot alloc Tx comp descriptors "
1236 "for queue %d error %d\n", q, error);
1240 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1242 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1243 error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1244 &txr->vxtxr_txbuf[i].vtxb_dmamap);
1246 device_printf(dev, "unable to create Tx buf "
1247 "dmamap for queue %d idx %d\n", q, i);
1257 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1260 struct vmxnet3_txqueue *txq;
1261 struct vmxnet3_txring *txr;
1262 struct vmxnet3_comp_ring *txc;
1263 struct vmxnet3_txbuf *txb;
1268 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1269 txq = &sc->vmx_txq[q];
1270 txr = &txq->vxtxq_cmd_ring;
1271 txc = &txq->vxtxq_comp_ring;
1273 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1274 txb = &txr->vxtxr_txbuf[i];
1275 if (txb->vtxb_dmamap != NULL) {
1276 bus_dmamap_destroy(txr->vxtxr_txtag,
1278 txb->vtxb_dmamap = NULL;
1282 if (txc->vxcr_u.txcd != NULL) {
1283 vmxnet3_dma_free(sc, &txc->vxcr_dma);
1284 txc->vxcr_u.txcd = NULL;
1287 if (txr->vxtxr_txd != NULL) {
1288 vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1289 txr->vxtxr_txd = NULL;
1292 if (txr->vxtxr_txtag != NULL) {
1293 bus_dma_tag_destroy(txr->vxtxr_txtag);
1294 txr->vxtxr_txtag = NULL;
1300 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1303 struct vmxnet3_rxqueue *rxq;
1304 struct vmxnet3_rxring *rxr;
1305 struct vmxnet3_comp_ring *rxc;
1311 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1312 rxq = &sc->vmx_rxq[q];
1313 rxc = &rxq->vxrxq_comp_ring;
1316 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1317 rxr = &rxq->vxrxq_cmd_ring[i];
1319 descsz = rxr->vxrxr_ndesc *
1320 sizeof(struct vmxnet3_rxdesc);
1321 compsz += rxr->vxrxr_ndesc *
1322 sizeof(struct vmxnet3_rxcompdesc);
1324 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1325 1, 0, /* alignment, boundary */
1326 BUS_SPACE_MAXADDR, /* lowaddr */
1327 BUS_SPACE_MAXADDR, /* highaddr */
1328 NULL, NULL, /* filter, filterarg */
1329 MJUMPAGESIZE, /* maxsize */
1331 MJUMPAGESIZE, /* maxsegsize */
1333 NULL, NULL, /* lockfunc, lockarg */
1337 "unable to create Rx buffer tag for "
1342 error = vmxnet3_dma_malloc(sc, descsz, 512,
1345 device_printf(dev, "cannot allocate Rx "
1346 "descriptors for queue %d/%d error %d\n",
1351 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1354 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1356 device_printf(dev, "cannot alloc Rx comp descriptors "
1357 "for queue %d error %d\n", q, error);
1361 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1363 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1364 rxr = &rxq->vxrxq_cmd_ring[i];
1366 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1367 &rxr->vxrxr_spare_dmap);
1369 device_printf(dev, "unable to create spare "
1370 "dmamap for queue %d/%d error %d\n",
1375 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1376 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1377 &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1379 device_printf(dev, "unable to create "
1380 "dmamap for queue %d/%d slot %d "
1393 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1396 struct vmxnet3_rxqueue *rxq;
1397 struct vmxnet3_rxring *rxr;
1398 struct vmxnet3_comp_ring *rxc;
1399 struct vmxnet3_rxbuf *rxb;
1404 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1405 rxq = &sc->vmx_rxq[q];
1406 rxc = &rxq->vxrxq_comp_ring;
1408 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1409 rxr = &rxq->vxrxq_cmd_ring[i];
1411 if (rxr->vxrxr_spare_dmap != NULL) {
1412 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1413 rxr->vxrxr_spare_dmap);
1414 rxr->vxrxr_spare_dmap = NULL;
1417 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1418 rxb = &rxr->vxrxr_rxbuf[j];
1419 if (rxb->vrxb_dmamap != NULL) {
1420 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1422 rxb->vrxb_dmamap = NULL;
1427 if (rxc->vxcr_u.rxcd != NULL) {
1428 vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1429 rxc->vxcr_u.rxcd = NULL;
1432 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1433 rxr = &rxq->vxrxq_cmd_ring[i];
1435 if (rxr->vxrxr_rxd != NULL) {
1436 vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1437 rxr->vxrxr_rxd = NULL;
1440 if (rxr->vxrxr_rxtag != NULL) {
1441 bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1442 rxr->vxrxr_rxtag = NULL;
1449 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1453 error = vmxnet3_alloc_txq_data(sc);
1457 error = vmxnet3_alloc_rxq_data(sc);
1465 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1468 if (sc->vmx_rxq != NULL)
1469 vmxnet3_free_rxq_data(sc);
1471 if (sc->vmx_txq != NULL)
1472 vmxnet3_free_txq_data(sc);
1476 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1480 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1481 32, &sc->vmx_mcast_dma);
1483 device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1485 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1491 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1494 if (sc->vmx_mcast != NULL) {
1495 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1496 sc->vmx_mcast = NULL;
1501 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1503 struct vmxnet3_driver_shared *ds;
1504 struct vmxnet3_txqueue *txq;
1505 struct vmxnet3_txq_shared *txs;
1506 struct vmxnet3_rxqueue *rxq;
1507 struct vmxnet3_rxq_shared *rxs;
1513 * Initialize fields of the shared data that remains the same across
1514 * reinits. Note the shared data is zero'd when allocated.
1517 ds->magic = VMXNET3_REV1_MAGIC;
1520 ds->version = VMXNET3_DRIVER_VERSION;
1521 ds->guest = VMXNET3_GOS_FREEBSD |
1527 ds->vmxnet3_revision = 1;
1528 ds->upt_version = 1;
1531 ds->driver_data = vtophys(sc);
1532 ds->driver_data_len = sizeof(struct vmxnet3_softc);
1533 ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1534 ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1535 ds->nrxsg_max = sc->vmx_max_rxsegs;
1538 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1539 ds->rss.version = 1;
1540 ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1541 ds->rss.len = sc->vmx_rss_dma.dma_size;
1544 /* Interrupt control. */
1545 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1546 ds->nintr = sc->vmx_nintrs;
1547 ds->evintr = sc->vmx_event_intr_idx;
1548 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1550 for (i = 0; i < sc->vmx_nintrs; i++)
1551 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1553 /* Receive filter. */
1554 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1555 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1558 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1559 txq = &sc->vmx_txq[i];
1560 txs = txq->vxtxq_ts;
1562 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1563 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1564 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1565 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1566 txs->driver_data = vtophys(txq);
1567 txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1571 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1572 rxq = &sc->vmx_rxq[i];
1573 rxs = rxq->vxrxq_rs;
1575 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1576 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1577 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1578 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1579 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1580 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1581 rxs->driver_data = vtophys(rxq);
1582 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1587 vmxnet3_init_hwassist(struct vmxnet3_softc *sc)
1589 struct ifnet *ifp = sc->vmx_ifp;
1593 if (ifp->if_capenable & IFCAP_TXCSUM)
1594 hwassist |= VMXNET3_CSUM_OFFLOAD;
1595 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1596 hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1597 if (ifp->if_capenable & IFCAP_TSO4)
1598 hwassist |= CSUM_IP_TSO;
1599 if (ifp->if_capenable & IFCAP_TSO6)
1600 hwassist |= CSUM_IP6_TSO;
1601 ifp->if_hwassist = hwassist;
1605 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1611 /* Use the current MAC address. */
1612 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1613 vmxnet3_set_lladdr(sc);
1615 vmxnet3_init_hwassist(sc);
1619 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1622 * Use the same key as the Linux driver until FreeBSD can do
1623 * RSS (presumably Toeplitz) in software.
1625 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1626 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1627 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1628 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1629 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1630 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1633 struct vmxnet3_driver_shared *ds;
1634 struct vmxnet3_rss_shared *rss;
1641 UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1642 UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1643 rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1644 rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1645 rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1646 memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1648 for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1649 rss->ind_table[i] = i % sc->vmx_nrxqueues;
1653 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1656 struct vmxnet3_driver_shared *ds;
1661 ds->mtu = ifp->if_mtu;
1662 ds->ntxqueue = sc->vmx_ntxqueues;
1663 ds->nrxqueue = sc->vmx_nrxqueues;
1665 ds->upt_features = 0;
1666 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1667 ds->upt_features |= UPT1_F_CSUM;
1668 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1669 ds->upt_features |= UPT1_F_VLAN;
1670 if (ifp->if_capenable & IFCAP_LRO)
1671 ds->upt_features |= UPT1_F_LRO;
1673 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1674 ds->upt_features |= UPT1_F_RSS;
1675 vmxnet3_reinit_rss_shared_data(sc);
1678 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1679 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1680 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1684 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1688 error = vmxnet3_alloc_shared_data(sc);
1692 error = vmxnet3_alloc_queue_data(sc);
1696 error = vmxnet3_alloc_mcast_table(sc);
1700 vmxnet3_init_shared_data(sc);
1706 vmxnet3_free_data(struct vmxnet3_softc *sc)
1709 vmxnet3_free_mcast_table(sc);
1710 vmxnet3_free_queue_data(sc);
1711 vmxnet3_free_shared_data(sc);
1715 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1722 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1724 device_printf(dev, "cannot allocate ifnet structure\n");
1728 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1729 #if __FreeBSD_version < 1000025
1730 ifp->if_baudrate = 1000000000;
1731 #elif __FreeBSD_version < 1100011
1732 if_initbaudrate(ifp, IF_Gbps(10));
1734 ifp->if_baudrate = IF_Gbps(10);
1737 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1738 ifp->if_init = vmxnet3_init;
1739 ifp->if_ioctl = vmxnet3_ioctl;
1740 ifp->if_get_counter = vmxnet3_get_counter;
1741 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1742 ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1743 ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1745 #ifdef VMXNET3_LEGACY_TX
1746 ifp->if_start = vmxnet3_start;
1747 ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1748 IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1749 IFQ_SET_READY(&ifp->if_snd);
1751 ifp->if_transmit = vmxnet3_txq_mq_start;
1752 ifp->if_qflush = vmxnet3_qflush;
1755 vmxnet3_get_lladdr(sc);
1756 ether_ifattach(ifp, sc->vmx_lladdr);
1758 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1759 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1760 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1761 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1763 ifp->if_capenable = ifp->if_capabilities;
1765 /* These capabilities are not enabled by default. */
1766 ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1768 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1769 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1770 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1771 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1773 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1774 vmxnet3_media_status);
1775 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1776 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1782 vmxnet3_evintr(struct vmxnet3_softc *sc)
1786 struct vmxnet3_txq_shared *ts;
1787 struct vmxnet3_rxq_shared *rs;
1795 VMXNET3_CORE_LOCK(sc);
1798 event = sc->vmx_ds->event;
1799 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1801 if (event & VMXNET3_EVENT_LINK) {
1802 vmxnet3_link_status(sc);
1803 if (sc->vmx_link_active != 0)
1804 vmxnet3_tx_start_all(sc);
1807 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1809 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1810 ts = sc->vmx_txq[0].vxtxq_ts;
1811 if (ts->stopped != 0)
1812 device_printf(dev, "Tx queue error %#x\n", ts->error);
1813 rs = sc->vmx_rxq[0].vxrxq_rs;
1814 if (rs->stopped != 0)
1815 device_printf(dev, "Rx queue error %#x\n", rs->error);
1816 device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1819 if (event & VMXNET3_EVENT_DIC)
1820 device_printf(dev, "device implementation change event\n");
1821 if (event & VMXNET3_EVENT_DEBUG)
1822 device_printf(dev, "debug event\n");
1825 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1826 vmxnet3_init_locked(sc);
1829 VMXNET3_CORE_UNLOCK(sc);
1833 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1835 struct vmxnet3_softc *sc;
1837 struct vmxnet3_txring *txr;
1838 struct vmxnet3_comp_ring *txc;
1839 struct vmxnet3_txcompdesc *txcd;
1840 struct vmxnet3_txbuf *txb;
1846 txr = &txq->vxtxq_cmd_ring;
1847 txc = &txq->vxtxq_comp_ring;
1849 VMXNET3_TXQ_LOCK_ASSERT(txq);
1852 txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1853 if (txcd->gen != txc->vxcr_gen)
1855 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1857 if (++txc->vxcr_next == txc->vxcr_ndesc) {
1862 sop = txr->vxtxr_next;
1863 txb = &txr->vxtxr_txbuf[sop];
1865 if ((m = txb->vtxb_m) != NULL) {
1866 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1867 BUS_DMASYNC_POSTWRITE);
1868 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1870 txq->vxtxq_stats.vmtxs_opackets++;
1871 txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1872 if (m->m_flags & M_MCAST)
1873 txq->vxtxq_stats.vmtxs_omcasts++;
1879 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1882 if (txr->vxtxr_head == txr->vxtxr_next)
1883 txq->vxtxq_watchdog = 0;
1887 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1891 struct vmxnet3_rxdesc *rxd;
1892 struct vmxnet3_rxbuf *rxb;
1895 bus_dma_segment_t segs[1];
1896 int idx, clsize, btype, flags, nsegs, error;
1899 tag = rxr->vxrxr_rxtag;
1900 dmap = rxr->vxrxr_spare_dmap;
1901 idx = rxr->vxrxr_fill;
1902 rxd = &rxr->vxrxr_rxd[idx];
1903 rxb = &rxr->vxrxr_rxbuf[idx];
1905 #ifdef VMXNET3_FAILPOINTS
1906 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1907 if (rxr->vxrxr_rid != 0)
1908 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1911 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1914 btype = VMXNET3_BTYPE_HEAD;
1916 #if __FreeBSD_version < 902001
1918 * These mbufs will never be used for the start of a frame.
1919 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1920 * required the mbuf to always be a packet header. Avoid
1921 * unnecessary mbuf initialization in newer versions where
1922 * that is not the case.
1928 clsize = MJUMPAGESIZE;
1929 btype = VMXNET3_BTYPE_BODY;
1932 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1934 sc->vmx_stats.vmst_mgetcl_failed++;
1938 if (btype == VMXNET3_BTYPE_HEAD) {
1939 m->m_len = m->m_pkthdr.len = clsize;
1940 m_adj(m, ETHER_ALIGN);
1944 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1948 sc->vmx_stats.vmst_mbuf_load_failed++;
1952 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1953 #if __FreeBSD_version < 902001
1954 if (btype == VMXNET3_BTYPE_BODY)
1955 m->m_flags &= ~M_PKTHDR;
1958 if (rxb->vrxb_m != NULL) {
1959 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1960 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1963 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1964 rxb->vrxb_dmamap = dmap;
1967 rxd->addr = segs[0].ds_addr;
1968 rxd->len = segs[0].ds_len;
1970 rxd->gen = rxr->vxrxr_gen;
1972 vmxnet3_rxr_increment_fill(rxr);
1977 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1978 struct vmxnet3_rxring *rxr, int idx)
1980 struct vmxnet3_rxdesc *rxd;
1982 rxd = &rxr->vxrxr_rxd[idx];
1983 rxd->gen = rxr->vxrxr_gen;
1984 vmxnet3_rxr_increment_fill(rxr);
1988 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1990 struct vmxnet3_softc *sc;
1991 struct vmxnet3_rxring *rxr;
1992 struct vmxnet3_comp_ring *rxc;
1993 struct vmxnet3_rxcompdesc *rxcd;
1997 rxc = &rxq->vxrxq_comp_ring;
2000 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2001 if (rxcd->gen != rxc->vxcr_gen)
2002 break; /* Not expected. */
2003 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2005 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2010 idx = rxcd->rxd_idx;
2012 if (rxcd->qid < sc->vmx_nrxqueues)
2013 rxr = &rxq->vxrxq_cmd_ring[0];
2015 rxr = &rxq->vxrxq_cmd_ring[1];
2016 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2021 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2025 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2026 if (rxcd->ipcsum_ok)
2027 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2030 if (!rxcd->fragment) {
2031 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2032 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2034 m->m_pkthdr.csum_data = 0xFFFF;
2040 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2041 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2043 struct vmxnet3_softc *sc;
2050 rxq->vxrxq_stats.vmrxs_ierrors++;
2056 switch (rxcd->rss_type) {
2057 case VMXNET3_RCD_RSS_TYPE_IPV4:
2058 m->m_pkthdr.flowid = rxcd->rss_hash;
2059 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2061 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2062 m->m_pkthdr.flowid = rxcd->rss_hash;
2063 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2065 case VMXNET3_RCD_RSS_TYPE_IPV6:
2066 m->m_pkthdr.flowid = rxcd->rss_hash;
2067 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2069 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2070 m->m_pkthdr.flowid = rxcd->rss_hash;
2071 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2073 default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2074 m->m_pkthdr.flowid = rxq->vxrxq_id;
2075 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2079 m->m_pkthdr.flowid = rxq->vxrxq_id;
2080 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2084 vmxnet3_rx_csum(rxcd, m);
2086 m->m_flags |= M_VLANTAG;
2087 m->m_pkthdr.ether_vtag = rxcd->vtag;
2090 rxq->vxrxq_stats.vmrxs_ipackets++;
2091 rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2093 VMXNET3_RXQ_UNLOCK(rxq);
2094 (*ifp->if_input)(ifp, m);
2095 VMXNET3_RXQ_LOCK(rxq);
2099 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2101 struct vmxnet3_softc *sc;
2103 struct vmxnet3_rxring *rxr;
2104 struct vmxnet3_comp_ring *rxc;
2105 struct vmxnet3_rxdesc *rxd;
2106 struct vmxnet3_rxcompdesc *rxcd;
2107 struct mbuf *m, *m_head, *m_tail;
2112 rxc = &rxq->vxrxq_comp_ring;
2114 VMXNET3_RXQ_LOCK_ASSERT(rxq);
2116 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2119 m_head = rxq->vxrxq_mhead;
2120 rxq->vxrxq_mhead = NULL;
2121 m_tail = rxq->vxrxq_mtail;
2122 rxq->vxrxq_mtail = NULL;
2123 MPASS(m_head == NULL || m_tail != NULL);
2126 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2127 if (rxcd->gen != rxc->vxcr_gen) {
2128 rxq->vxrxq_mhead = m_head;
2129 rxq->vxrxq_mtail = m_tail;
2132 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2134 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2139 idx = rxcd->rxd_idx;
2141 if (rxcd->qid < sc->vmx_nrxqueues)
2142 rxr = &rxq->vxrxq_cmd_ring[0];
2144 rxr = &rxq->vxrxq_cmd_ring[1];
2145 rxd = &rxr->vxrxr_rxd[idx];
2147 m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2148 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2149 __func__, rxcd->qid, idx));
2152 * The host may skip descriptors. We detect this when this
2153 * descriptor does not match the previous fill index. Catch
2154 * up with the host now.
2156 if (__predict_false(rxr->vxrxr_fill != idx)) {
2157 while (rxr->vxrxr_fill != idx) {
2158 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2160 vmxnet3_rxr_increment_fill(rxr);
2165 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2166 ("%s: start of frame w/o head buffer", __func__));
2167 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2168 ("%s: start of frame not in ring 0", __func__));
2169 KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2170 ("%s: start of frame at unexcepted index %d (%d)",
2171 __func__, idx, sc->vmx_rx_max_chain));
2172 KASSERT(m_head == NULL,
2173 ("%s: duplicate start of frame?", __func__));
2176 /* Just ignore this descriptor. */
2177 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2181 if (vmxnet3_newbuf(sc, rxr) != 0) {
2182 rxq->vxrxq_stats.vmrxs_iqdrops++;
2183 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2185 vmxnet3_rxq_discard_chain(rxq);
2189 m->m_pkthdr.rcvif = ifp;
2190 m->m_pkthdr.len = m->m_len = length;
2191 m->m_pkthdr.csum_flags = 0;
2192 m_head = m_tail = m;
2195 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2196 ("%s: non start of frame w/o body buffer", __func__));
2198 if (m_head == NULL && m_tail == NULL) {
2200 * This is a continuation of a packet that we
2201 * started to drop, but could not drop entirely
2202 * because this segment was still owned by the
2203 * host. So, drop the remainder now.
2205 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2207 vmxnet3_rxq_discard_chain(rxq);
2211 KASSERT(m_head != NULL,
2212 ("%s: frame not started?", __func__));
2214 if (vmxnet3_newbuf(sc, rxr) != 0) {
2215 rxq->vxrxq_stats.vmrxs_iqdrops++;
2216 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2218 vmxnet3_rxq_discard_chain(rxq);
2220 m_head = m_tail = NULL;
2225 m_head->m_pkthdr.len += length;
2231 vmxnet3_rxq_input(rxq, rxcd, m_head);
2232 m_head = m_tail = NULL;
2234 /* Must recheck after dropping the Rx lock. */
2235 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2240 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2241 int qid = rxcd->qid;
2244 idx = (idx + 1) % rxr->vxrxr_ndesc;
2245 if (qid >= sc->vmx_nrxqueues) {
2246 qid -= sc->vmx_nrxqueues;
2247 r = VMXNET3_BAR0_RXH2(qid);
2249 r = VMXNET3_BAR0_RXH1(qid);
2250 vmxnet3_write_bar0(sc, r, idx);
2256 vmxnet3_legacy_intr(void *xsc)
2258 struct vmxnet3_softc *sc;
2259 struct vmxnet3_rxqueue *rxq;
2260 struct vmxnet3_txqueue *txq;
2263 rxq = &sc->vmx_rxq[0];
2264 txq = &sc->vmx_txq[0];
2266 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2267 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2270 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2271 vmxnet3_disable_all_intrs(sc);
2273 if (sc->vmx_ds->event != 0)
2276 VMXNET3_RXQ_LOCK(rxq);
2277 vmxnet3_rxq_eof(rxq);
2278 VMXNET3_RXQ_UNLOCK(rxq);
2280 VMXNET3_TXQ_LOCK(txq);
2281 vmxnet3_txq_eof(txq);
2282 vmxnet3_txq_start(txq);
2283 VMXNET3_TXQ_UNLOCK(txq);
2285 vmxnet3_enable_all_intrs(sc);
2289 vmxnet3_txq_intr(void *xtxq)
2291 struct vmxnet3_softc *sc;
2292 struct vmxnet3_txqueue *txq;
2297 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2298 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2300 VMXNET3_TXQ_LOCK(txq);
2301 vmxnet3_txq_eof(txq);
2302 vmxnet3_txq_start(txq);
2303 VMXNET3_TXQ_UNLOCK(txq);
2305 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2309 vmxnet3_rxq_intr(void *xrxq)
2311 struct vmxnet3_softc *sc;
2312 struct vmxnet3_rxqueue *rxq;
2317 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2318 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2320 VMXNET3_RXQ_LOCK(rxq);
2321 vmxnet3_rxq_eof(rxq);
2322 VMXNET3_RXQ_UNLOCK(rxq);
2324 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2328 vmxnet3_event_intr(void *xsc)
2330 struct vmxnet3_softc *sc;
2334 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2335 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2337 if (sc->vmx_ds->event != 0)
2340 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2344 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2346 struct vmxnet3_txring *txr;
2347 struct vmxnet3_txbuf *txb;
2350 txr = &txq->vxtxq_cmd_ring;
2352 for (i = 0; i < txr->vxtxr_ndesc; i++) {
2353 txb = &txr->vxtxr_txbuf[i];
2355 if (txb->vtxb_m == NULL)
2358 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2359 BUS_DMASYNC_POSTWRITE);
2360 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2361 m_freem(txb->vtxb_m);
2367 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2369 struct vmxnet3_rxring *rxr;
2370 struct vmxnet3_rxbuf *rxb;
2373 if (rxq->vxrxq_mhead != NULL) {
2374 m_freem(rxq->vxrxq_mhead);
2375 rxq->vxrxq_mhead = NULL;
2376 rxq->vxrxq_mtail = NULL;
2379 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2380 rxr = &rxq->vxrxq_cmd_ring[i];
2382 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2383 rxb = &rxr->vxrxr_rxbuf[j];
2385 if (rxb->vrxb_m == NULL)
2388 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2389 BUS_DMASYNC_POSTREAD);
2390 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2391 m_freem(rxb->vrxb_m);
2398 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2400 struct vmxnet3_rxqueue *rxq;
2401 struct vmxnet3_txqueue *txq;
2404 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2405 rxq = &sc->vmx_rxq[i];
2406 VMXNET3_RXQ_LOCK(rxq);
2407 VMXNET3_RXQ_UNLOCK(rxq);
2410 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2411 txq = &sc->vmx_txq[i];
2412 VMXNET3_TXQ_LOCK(txq);
2413 VMXNET3_TXQ_UNLOCK(txq);
2418 vmxnet3_stop(struct vmxnet3_softc *sc)
2424 VMXNET3_CORE_LOCK_ASSERT(sc);
2426 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2427 sc->vmx_link_active = 0;
2428 callout_stop(&sc->vmx_tick);
2430 /* Disable interrupts. */
2431 vmxnet3_disable_all_intrs(sc);
2432 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2434 vmxnet3_stop_rendezvous(sc);
2436 for (q = 0; q < sc->vmx_ntxqueues; q++)
2437 vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2438 for (q = 0; q < sc->vmx_nrxqueues; q++)
2439 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2441 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2445 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2447 struct vmxnet3_txring *txr;
2448 struct vmxnet3_comp_ring *txc;
2450 txr = &txq->vxtxq_cmd_ring;
2451 txr->vxtxr_head = 0;
2452 txr->vxtxr_next = 0;
2453 txr->vxtxr_gen = VMXNET3_INIT_GEN;
2454 bzero(txr->vxtxr_txd,
2455 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2457 txc = &txq->vxtxq_comp_ring;
2459 txc->vxcr_gen = VMXNET3_INIT_GEN;
2460 bzero(txc->vxcr_u.txcd,
2461 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2465 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2468 struct vmxnet3_rxring *rxr;
2469 struct vmxnet3_comp_ring *rxc;
2470 int i, populate, idx, frame_size, error;
2473 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2477 * If the MTU causes us to exceed what a regular sized cluster can
2478 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2479 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2481 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2482 * our life easier. We do not support changing the ring size after
2485 if (frame_size <= MCLBYTES)
2486 sc->vmx_rx_max_chain = 1;
2488 sc->vmx_rx_max_chain = 2;
2491 * Only populate ring 1 if the configuration will take advantage
2492 * of it. That is either when LRO is enabled or the frame size
2493 * exceeds what ring 0 can contain.
2495 if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2496 frame_size <= MCLBYTES + MJUMPAGESIZE)
2499 populate = VMXNET3_RXRINGS_PERQ;
2501 for (i = 0; i < populate; i++) {
2502 rxr = &rxq->vxrxq_cmd_ring[i];
2503 rxr->vxrxr_fill = 0;
2504 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2505 bzero(rxr->vxrxr_rxd,
2506 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2508 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2509 error = vmxnet3_newbuf(sc, rxr);
2515 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2516 rxr = &rxq->vxrxq_cmd_ring[i];
2517 rxr->vxrxr_fill = 0;
2519 bzero(rxr->vxrxr_rxd,
2520 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2523 rxc = &rxq->vxrxq_comp_ring;
2525 rxc->vxcr_gen = VMXNET3_INIT_GEN;
2526 bzero(rxc->vxcr_u.rxcd,
2527 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2533 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2540 for (q = 0; q < sc->vmx_ntxqueues; q++)
2541 vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2543 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2544 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2546 device_printf(dev, "cannot populate Rx queue %d\n", q);
2555 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2559 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2560 device_printf(sc->vmx_dev, "device enable command failed!\n");
2564 /* Reset the Rx queue heads. */
2565 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2566 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2567 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2574 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2580 vmxnet3_set_rxfilter(sc);
2582 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2583 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2584 sizeof(sc->vmx_ds->vlan_filter));
2586 bzero(sc->vmx_ds->vlan_filter,
2587 sizeof(sc->vmx_ds->vlan_filter));
2588 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2592 vmxnet3_reinit(struct vmxnet3_softc *sc)
2595 vmxnet3_reinit_interface(sc);
2596 vmxnet3_reinit_shared_data(sc);
2598 if (vmxnet3_reinit_queues(sc) != 0)
2601 if (vmxnet3_enable_device(sc) != 0)
2604 vmxnet3_reinit_rxfilters(sc);
2610 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2616 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2621 if (vmxnet3_reinit(sc) != 0) {
2626 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2627 vmxnet3_link_status(sc);
2629 vmxnet3_enable_all_intrs(sc);
2630 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2634 vmxnet3_init(void *xsc)
2636 struct vmxnet3_softc *sc;
2640 VMXNET3_CORE_LOCK(sc);
2641 vmxnet3_init_locked(sc);
2642 VMXNET3_CORE_UNLOCK(sc);
2646 * BMV: Much of this can go away once we finally have offsets in
2647 * the mbuf packet header. Bug andre@.
2650 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2651 int *etype, int *proto, int *start)
2653 struct ether_vlan_header *evh;
2656 struct ip *ip = NULL;
2660 struct ip6_hdr *ip6 = NULL;
2661 struct ip6_hdr ip6hdr;
2664 evh = mtod(m, struct ether_vlan_header *);
2665 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2666 /* BMV: We should handle nested VLAN tags too. */
2667 *etype = ntohs(evh->evl_proto);
2668 offset = sizeof(struct ether_vlan_header);
2670 *etype = ntohs(evh->evl_encap_proto);
2671 offset = sizeof(struct ether_header);
2677 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2678 m_copydata(m, offset, sizeof(struct ip),
2682 ip = mtodo(m, offset);
2684 *start = offset + (ip->ip_hl << 2);
2688 case ETHERTYPE_IPV6:
2689 if (__predict_false(m->m_len <
2690 offset + sizeof(struct ip6_hdr))) {
2691 m_copydata(m, offset, sizeof(struct ip6_hdr),
2695 ip6 = mtodo(m, offset);
2697 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2698 /* Assert the network stack sent us a valid packet. */
2699 KASSERT(*start > offset,
2700 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2701 *start, offset, *proto));
2708 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2709 struct tcphdr *tcp, tcphdr;
2712 if (__predict_false(*proto != IPPROTO_TCP)) {
2713 /* Likely failed to correctly parse the mbuf. */
2717 txq->vxtxq_stats.vmtxs_tso++;
2722 sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2723 htons(IPPROTO_TCP));
2727 case ETHERTYPE_IPV6:
2728 sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
2736 if (m->m_len < *start + sizeof(struct tcphdr)) {
2737 m_copyback(m, *start + offsetof(struct tcphdr, th_sum),
2738 sizeof(uint16_t), (caddr_t) &sum);
2739 m_copydata(m, *start, sizeof(struct tcphdr),
2743 tcp = mtodo(m, *start);
2748 * For TSO, the size of the protocol header is also
2749 * included in the descriptor header size.
2751 *start += (tcp->th_off << 2);
2753 txq->vxtxq_stats.vmtxs_csum++;
2759 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2760 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2762 struct vmxnet3_txring *txr;
2767 txr = &txq->vxtxq_cmd_ring;
2769 tag = txr->vxtxr_txtag;
2771 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2772 if (error == 0 || error != EFBIG)
2775 m = m_defrag(m, M_NOWAIT);
2778 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2785 txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2787 txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2793 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2795 struct vmxnet3_txring *txr;
2797 txr = &txq->vxtxq_cmd_ring;
2798 bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2802 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2804 struct vmxnet3_softc *sc;
2805 struct vmxnet3_txring *txr;
2806 struct vmxnet3_txdesc *txd, *sop;
2809 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2810 int i, gen, nsegs, etype, proto, start, error;
2815 txr = &txq->vxtxq_cmd_ring;
2816 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2818 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2824 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2825 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2827 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2828 txq->vxtxq_stats.vmtxs_full++;
2829 vmxnet3_txq_unload_mbuf(txq, dmap);
2831 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2832 error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2834 txq->vxtxq_stats.vmtxs_offload_failed++;
2835 vmxnet3_txq_unload_mbuf(txq, dmap);
2842 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2843 sop = &txr->vxtxr_txd[txr->vxtxr_head];
2844 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
2846 for (i = 0; i < nsegs; i++) {
2847 txd = &txr->vxtxr_txd[txr->vxtxr_head];
2849 txd->addr = segs[i].ds_addr;
2850 txd->len = segs[i].ds_len;
2853 txd->offload_mode = VMXNET3_OM_NONE;
2854 txd->offload_pos = 0;
2861 if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2862 txr->vxtxr_head = 0;
2863 txr->vxtxr_gen ^= 1;
2865 gen = txr->vxtxr_gen;
2870 if (m->m_flags & M_VLANTAG) {
2872 sop->vtag = m->m_pkthdr.ether_vtag;
2875 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2876 sop->offload_mode = VMXNET3_OM_TSO;
2878 sop->offload_pos = m->m_pkthdr.tso_segsz;
2879 } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2880 VMXNET3_CSUM_OFFLOAD_IPV6)) {
2881 sop->offload_mode = VMXNET3_OM_CSUM;
2883 sop->offload_pos = start + m->m_pkthdr.csum_data;
2886 /* Finally, change the ownership. */
2887 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2890 txq->vxtxq_ts->npending += nsegs;
2891 if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2892 txq->vxtxq_ts->npending = 0;
2893 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2900 #ifdef VMXNET3_LEGACY_TX
2903 vmxnet3_start_locked(struct ifnet *ifp)
2905 struct vmxnet3_softc *sc;
2906 struct vmxnet3_txqueue *txq;
2907 struct vmxnet3_txring *txr;
2908 struct mbuf *m_head;
2912 txq = &sc->vmx_txq[0];
2913 txr = &txq->vxtxq_cmd_ring;
2916 VMXNET3_TXQ_LOCK_ASSERT(txq);
2918 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2919 sc->vmx_link_active == 0)
2922 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2923 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2926 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2930 /* Assume worse case if this mbuf is the head of a chain. */
2931 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2932 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2936 if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2938 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2943 ETHER_BPF_MTAP(ifp, m_head);
2947 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2951 vmxnet3_start(struct ifnet *ifp)
2953 struct vmxnet3_softc *sc;
2954 struct vmxnet3_txqueue *txq;
2957 txq = &sc->vmx_txq[0];
2959 VMXNET3_TXQ_LOCK(txq);
2960 vmxnet3_start_locked(ifp);
2961 VMXNET3_TXQ_UNLOCK(txq);
2964 #else /* !VMXNET3_LEGACY_TX */
2967 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
2969 struct vmxnet3_softc *sc;
2970 struct vmxnet3_txring *txr;
2971 struct buf_ring *br;
2973 int tx, avail, error;
2978 txr = &txq->vxtxq_cmd_ring;
2982 VMXNET3_TXQ_LOCK_ASSERT(txq);
2984 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2985 sc->vmx_link_active == 0) {
2987 error = drbr_enqueue(ifp, br, m);
2992 error = drbr_enqueue(ifp, br, m);
2997 while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
2998 m = drbr_peek(ifp, br);
3002 /* Assume worse case if this mbuf is the head of a chain. */
3003 if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
3004 drbr_putback(ifp, br, m);
3008 if (vmxnet3_txq_encap(txq, &m) != 0) {
3010 drbr_putback(ifp, br, m);
3012 drbr_advance(ifp, br);
3015 drbr_advance(ifp, br);
3018 ETHER_BPF_MTAP(ifp, m);
3022 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3028 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
3030 struct vmxnet3_softc *sc;
3031 struct vmxnet3_txqueue *txq;
3035 ntxq = sc->vmx_ntxqueues;
3037 /* check if flowid is set */
3038 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3039 i = m->m_pkthdr.flowid % ntxq;
3043 txq = &sc->vmx_txq[i];
3045 if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3046 error = vmxnet3_txq_mq_start_locked(txq, m);
3047 VMXNET3_TXQ_UNLOCK(txq);
3049 error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3050 taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3057 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3059 struct vmxnet3_softc *sc;
3060 struct vmxnet3_txqueue *txq;
3065 VMXNET3_TXQ_LOCK(txq);
3066 if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3067 vmxnet3_txq_mq_start_locked(txq, NULL);
3068 VMXNET3_TXQ_UNLOCK(txq);
3071 #endif /* VMXNET3_LEGACY_TX */
3074 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3076 struct vmxnet3_softc *sc;
3082 #ifdef VMXNET3_LEGACY_TX
3083 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3084 vmxnet3_start_locked(ifp);
3086 if (!drbr_empty(ifp, txq->vxtxq_br))
3087 vmxnet3_txq_mq_start_locked(txq, NULL);
3092 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3094 struct vmxnet3_txqueue *txq;
3097 VMXNET3_CORE_LOCK_ASSERT(sc);
3099 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3100 txq = &sc->vmx_txq[i];
3102 VMXNET3_TXQ_LOCK(txq);
3103 vmxnet3_txq_start(txq);
3104 VMXNET3_TXQ_UNLOCK(txq);
3109 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3115 idx = (tag >> 5) & 0x7F;
3118 if (tag == 0 || tag > 4095)
3121 VMXNET3_CORE_LOCK(sc);
3123 /* Update our private VLAN bitvector. */
3125 sc->vmx_vlan_filter[idx] |= (1 << bit);
3127 sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3129 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3131 sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3133 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3134 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3137 VMXNET3_CORE_UNLOCK(sc);
3141 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3144 if (ifp->if_softc == arg)
3145 vmxnet3_update_vlan_filter(arg, 1, tag);
3149 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3152 if (ifp->if_softc == arg)
3153 vmxnet3_update_vlan_filter(arg, 0, tag);
3157 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3160 struct vmxnet3_driver_shared *ds;
3161 struct ifmultiaddr *ifma;
3167 mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3168 if (ifp->if_flags & IFF_PROMISC)
3169 mode |= VMXNET3_RXMODE_PROMISC;
3170 if (ifp->if_flags & IFF_ALLMULTI)
3171 mode |= VMXNET3_RXMODE_ALLMULTI;
3173 int cnt = 0, overflow = 0;
3175 if_maddr_rlock(ifp);
3176 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3177 if (ifma->ifma_addr->sa_family != AF_LINK)
3179 else if (cnt == VMXNET3_MULTICAST_MAX) {
3184 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3185 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3188 if_maddr_runlock(ifp);
3190 if (overflow != 0) {
3192 mode |= VMXNET3_RXMODE_ALLMULTI;
3194 mode |= VMXNET3_RXMODE_MCAST;
3195 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3200 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3201 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3205 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3211 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3216 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3217 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3218 vmxnet3_init_locked(sc);
3225 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3227 struct vmxnet3_softc *sc;
3229 int reinit, mask, error;
3232 ifr = (struct ifreq *) data;
3237 if (ifp->if_mtu != ifr->ifr_mtu) {
3238 VMXNET3_CORE_LOCK(sc);
3239 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3240 VMXNET3_CORE_UNLOCK(sc);
3245 VMXNET3_CORE_LOCK(sc);
3246 if (ifp->if_flags & IFF_UP) {
3247 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3248 if ((ifp->if_flags ^ sc->vmx_if_flags) &
3249 (IFF_PROMISC | IFF_ALLMULTI)) {
3250 vmxnet3_set_rxfilter(sc);
3253 vmxnet3_init_locked(sc);
3255 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3258 sc->vmx_if_flags = ifp->if_flags;
3259 VMXNET3_CORE_UNLOCK(sc);
3264 VMXNET3_CORE_LOCK(sc);
3265 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3266 vmxnet3_set_rxfilter(sc);
3267 VMXNET3_CORE_UNLOCK(sc);
3272 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3276 VMXNET3_CORE_LOCK(sc);
3277 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3279 if (mask & IFCAP_TXCSUM)
3280 ifp->if_capenable ^= IFCAP_TXCSUM;
3281 if (mask & IFCAP_TXCSUM_IPV6)
3282 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3283 if (mask & IFCAP_TSO4)
3284 ifp->if_capenable ^= IFCAP_TSO4;
3285 if (mask & IFCAP_TSO6)
3286 ifp->if_capenable ^= IFCAP_TSO6;
3288 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
3289 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
3290 /* Changing these features requires us to reinit. */
3293 if (mask & IFCAP_RXCSUM)
3294 ifp->if_capenable ^= IFCAP_RXCSUM;
3295 if (mask & IFCAP_RXCSUM_IPV6)
3296 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3297 if (mask & IFCAP_LRO)
3298 ifp->if_capenable ^= IFCAP_LRO;
3299 if (mask & IFCAP_VLAN_HWTAGGING)
3300 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3301 if (mask & IFCAP_VLAN_HWFILTER)
3302 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3306 if (mask & IFCAP_VLAN_HWTSO)
3307 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3309 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3310 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3311 vmxnet3_init_locked(sc);
3313 vmxnet3_init_hwassist(sc);
3316 VMXNET3_CORE_UNLOCK(sc);
3317 VLAN_CAPABILITIES(ifp);
3321 error = ether_ioctl(ifp, cmd, data);
3325 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3330 #ifndef VMXNET3_LEGACY_TX
3332 vmxnet3_qflush(struct ifnet *ifp)
3334 struct vmxnet3_softc *sc;
3335 struct vmxnet3_txqueue *txq;
3341 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3342 txq = &sc->vmx_txq[i];
3344 VMXNET3_TXQ_LOCK(txq);
3345 while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3347 VMXNET3_TXQ_UNLOCK(txq);
3355 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3357 struct vmxnet3_softc *sc;
3361 VMXNET3_TXQ_LOCK(txq);
3362 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3363 VMXNET3_TXQ_UNLOCK(txq);
3366 VMXNET3_TXQ_UNLOCK(txq);
3368 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3374 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3377 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3381 vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt)
3383 struct vmxnet3_softc *sc;
3386 sc = if_getsoftc(ifp);
3390 * With the exception of if_ierrors, these ifnet statistics are
3391 * only updated in the driver, so just set them to our accumulated
3392 * values. if_ierrors is updated in ether_input() for malformed
3393 * frames that we should have already discarded.
3396 case IFCOUNTER_IPACKETS:
3397 for (int i = 0; i < sc->vmx_nrxqueues; i++)
3398 rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets;
3400 case IFCOUNTER_IQDROPS:
3401 for (int i = 0; i < sc->vmx_nrxqueues; i++)
3402 rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops;
3404 case IFCOUNTER_IERRORS:
3405 for (int i = 0; i < sc->vmx_nrxqueues; i++)
3406 rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors;
3408 case IFCOUNTER_OPACKETS:
3409 for (int i = 0; i < sc->vmx_ntxqueues; i++)
3410 rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets;
3412 #ifndef VMXNET3_LEGACY_TX
3413 case IFCOUNTER_OBYTES:
3414 for (int i = 0; i < sc->vmx_ntxqueues; i++)
3415 rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes;
3417 case IFCOUNTER_OMCASTS:
3418 for (int i = 0; i < sc->vmx_ntxqueues; i++)
3419 rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts;
3423 return (if_get_counter_default(ifp, cnt));
3428 vmxnet3_tick(void *xsc)
3430 struct vmxnet3_softc *sc;
3438 VMXNET3_CORE_LOCK_ASSERT(sc);
3440 vmxnet3_refresh_host_stats(sc);
3442 for (i = 0; i < sc->vmx_ntxqueues; i++)
3443 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3445 if (timedout != 0) {
3446 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3447 vmxnet3_init_locked(sc);
3449 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3453 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3457 /* Also update the link speed while here. */
3458 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3459 sc->vmx_link_speed = status >> 16;
3460 return !!(status & 0x1);
3464 vmxnet3_link_status(struct vmxnet3_softc *sc)
3470 link = vmxnet3_link_is_up(sc);
3472 if (link != 0 && sc->vmx_link_active == 0) {
3473 sc->vmx_link_active = 1;
3474 if_link_state_change(ifp, LINK_STATE_UP);
3475 } else if (link == 0 && sc->vmx_link_active != 0) {
3476 sc->vmx_link_active = 0;
3477 if_link_state_change(ifp, LINK_STATE_DOWN);
3482 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3484 struct vmxnet3_softc *sc;
3488 ifmr->ifm_status = IFM_AVALID;
3489 ifmr->ifm_active = IFM_ETHER;
3491 VMXNET3_CORE_LOCK(sc);
3492 if (vmxnet3_link_is_up(sc) != 0) {
3493 ifmr->ifm_status |= IFM_ACTIVE;
3494 ifmr->ifm_active |= IFM_AUTO;
3496 ifmr->ifm_active |= IFM_NONE;
3497 VMXNET3_CORE_UNLOCK(sc);
3501 vmxnet3_media_change(struct ifnet *ifp)
3509 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3513 ml = sc->vmx_lladdr[0];
3514 ml |= sc->vmx_lladdr[1] << 8;
3515 ml |= sc->vmx_lladdr[2] << 16;
3516 ml |= sc->vmx_lladdr[3] << 24;
3517 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3519 mh = sc->vmx_lladdr[4];
3520 mh |= sc->vmx_lladdr[5] << 8;
3521 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3525 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3529 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3530 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3532 sc->vmx_lladdr[0] = ml;
3533 sc->vmx_lladdr[1] = ml >> 8;
3534 sc->vmx_lladdr[2] = ml >> 16;
3535 sc->vmx_lladdr[3] = ml >> 24;
3536 sc->vmx_lladdr[4] = mh;
3537 sc->vmx_lladdr[5] = mh >> 8;
3541 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3542 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3544 struct sysctl_oid *node, *txsnode;
3545 struct sysctl_oid_list *list, *txslist;
3546 struct vmxnet3_txq_stats *stats;
3547 struct UPT1_TxStats *txstats;
3550 stats = &txq->vxtxq_stats;
3551 txstats = &txq->vxtxq_ts->stats;
3553 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3554 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3555 NULL, "Transmit Queue");
3556 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3558 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3559 &stats->vmtxs_opackets, "Transmit packets");
3560 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3561 &stats->vmtxs_obytes, "Transmit bytes");
3562 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3563 &stats->vmtxs_omcasts, "Transmit multicasts");
3564 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3565 &stats->vmtxs_csum, "Transmit checksum offloaded");
3566 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3567 &stats->vmtxs_tso, "Transmit TCP segmentation offloaded");
3568 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3569 &stats->vmtxs_full, "Transmit ring full");
3570 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3571 &stats->vmtxs_offload_failed, "Transmit checksum offload failed");
3574 * Add statistics reported by the host. These are updated once
3577 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3578 NULL, "Host Statistics");
3579 txslist = SYSCTL_CHILDREN(txsnode);
3580 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3581 &txstats->TSO_packets, "TSO packets");
3582 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3583 &txstats->TSO_bytes, "TSO bytes");
3584 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3585 &txstats->ucast_packets, "Unicast packets");
3586 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3587 &txstats->ucast_bytes, "Unicast bytes");
3588 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3589 &txstats->mcast_packets, "Multicast packets");
3590 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3591 &txstats->mcast_bytes, "Multicast bytes");
3592 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3593 &txstats->error, "Errors");
3594 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3595 &txstats->discard, "Discards");
3599 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3600 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3602 struct sysctl_oid *node, *rxsnode;
3603 struct sysctl_oid_list *list, *rxslist;
3604 struct vmxnet3_rxq_stats *stats;
3605 struct UPT1_RxStats *rxstats;
3608 stats = &rxq->vxrxq_stats;
3609 rxstats = &rxq->vxrxq_rs->stats;
3611 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3612 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3613 NULL, "Receive Queue");
3614 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3616 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3617 &stats->vmrxs_ipackets, "Receive packets");
3618 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3619 &stats->vmrxs_ibytes, "Receive bytes");
3620 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3621 &stats->vmrxs_iqdrops, "Receive drops");
3622 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3623 &stats->vmrxs_ierrors, "Receive errors");
3626 * Add statistics reported by the host. These are updated once
3629 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3630 NULL, "Host Statistics");
3631 rxslist = SYSCTL_CHILDREN(rxsnode);
3632 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3633 &rxstats->LRO_packets, "LRO packets");
3634 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3635 &rxstats->LRO_bytes, "LRO bytes");
3636 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3637 &rxstats->ucast_packets, "Unicast packets");
3638 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3639 &rxstats->ucast_bytes, "Unicast bytes");
3640 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3641 &rxstats->mcast_packets, "Multicast packets");
3642 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3643 &rxstats->mcast_bytes, "Multicast bytes");
3644 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3645 &rxstats->bcast_packets, "Broadcast packets");
3646 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3647 &rxstats->bcast_bytes, "Broadcast bytes");
3648 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3649 &rxstats->nobuffer, "No buffer");
3650 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3651 &rxstats->error, "Errors");
3655 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3656 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3658 struct sysctl_oid *node;
3659 struct sysctl_oid_list *list;
3662 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3663 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3665 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3666 "debug", CTLFLAG_RD, NULL, "");
3667 list = SYSCTL_CHILDREN(node);
3669 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3670 &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3671 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3672 &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3673 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3674 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3675 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3676 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3677 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3678 &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3679 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3680 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3681 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3682 &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3685 for (i = 0; i < sc->vmx_nrxqueues; i++) {
3686 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3688 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3689 "debug", CTLFLAG_RD, NULL, "");
3690 list = SYSCTL_CHILDREN(node);
3692 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3693 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3694 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3695 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3696 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3697 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3698 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3699 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3700 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3701 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3702 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3703 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3704 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3705 &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3706 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3707 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3708 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3709 &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3714 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3715 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3719 for (i = 0; i < sc->vmx_ntxqueues; i++)
3720 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3721 for (i = 0; i < sc->vmx_nrxqueues; i++)
3722 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3724 vmxnet3_setup_debug_sysctl(sc, ctx, child);
3728 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3731 struct vmxnet3_statistics *stats;
3732 struct sysctl_ctx_list *ctx;
3733 struct sysctl_oid *tree;
3734 struct sysctl_oid_list *child;
3737 ctx = device_get_sysctl_ctx(dev);
3738 tree = device_get_sysctl_tree(dev);
3739 child = SYSCTL_CHILDREN(tree);
3741 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3742 &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3743 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3744 &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3745 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3746 &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3747 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3748 &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3750 stats = &sc->vmx_stats;
3751 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3752 &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3753 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3754 &stats->vmst_defrag_failed, 0,
3755 "Tx mbuf dropped because defrag failed");
3756 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3757 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3758 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3759 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3761 vmxnet3_setup_queue_sysctl(sc, ctx, child);
3765 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3768 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3772 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3775 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3779 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3782 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3786 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3789 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3793 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3796 vmxnet3_write_cmd(sc, cmd);
3797 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3798 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3799 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3803 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3806 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3810 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3813 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3817 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3821 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3822 for (i = 0; i < sc->vmx_nintrs; i++)
3823 vmxnet3_enable_intr(sc, i);
3827 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3831 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3832 for (i = 0; i < sc->vmx_nintrs; i++)
3833 vmxnet3_disable_intr(sc, i);
3837 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3839 bus_addr_t *baddr = arg;
3842 *baddr = segs->ds_addr;
3846 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3847 struct vmxnet3_dma_alloc *dma)
3853 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3855 error = bus_dma_tag_create(bus_get_dma_tag(dev),
3856 align, 0, /* alignment, bounds */
3857 BUS_SPACE_MAXADDR, /* lowaddr */
3858 BUS_SPACE_MAXADDR, /* highaddr */
3859 NULL, NULL, /* filter, filterarg */
3862 size, /* maxsegsize */
3863 BUS_DMA_ALLOCNOW, /* flags */
3864 NULL, /* lockfunc */
3865 NULL, /* lockfuncarg */
3868 device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3872 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3873 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3875 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3879 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3880 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3882 device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3886 dma->dma_size = size;
3890 vmxnet3_dma_free(sc, dma);
3896 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3899 if (dma->dma_tag != NULL) {
3900 if (dma->dma_paddr != 0) {
3901 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3902 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3903 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3906 if (dma->dma_vaddr != NULL) {
3907 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3911 bus_dma_tag_destroy(dma->dma_tag);
3913 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3917 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3921 snprintf(path, sizeof(path),
3922 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3923 TUNABLE_INT_FETCH(path, &def);
3929 * Since this is a purely paravirtualized device, we do not have
3930 * to worry about DMA coherency. But at times, we must make sure
3931 * both the compiler and CPU do not reorder memory operations.
3934 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3938 case VMXNET3_BARRIER_RD:
3941 case VMXNET3_BARRIER_WR:
3944 case VMXNET3_BARRIER_RDWR:
3948 panic("%s: bad barrier type %d", __func__, type);