2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/endian.h>
29 #include <sys/sockio.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
36 #include <sys/taskqueue.h>
40 #include <net/ethernet.h>
42 #include <net/if_arp.h>
43 #include <net/if_dl.h>
44 #include <net/if_types.h>
45 #include <net/if_media.h>
46 #include <net/if_vlan_var.h>
50 #include <netinet/in_systm.h>
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/udp.h>
56 #include <netinet/tcp.h>
58 #include <machine/in_cksum.h>
60 #include <machine/bus.h>
61 #include <machine/resource.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
68 #include "if_vmxreg.h"
69 #include "if_vmxvar.h"
72 #include "opt_inet6.h"
74 #ifdef VMXNET3_FAILPOINTS
76 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
77 "vmxnet3 fail points");
78 #define VMXNET3_FP _debug_fail_point_vmxnet3
81 static int vmxnet3_probe(device_t);
82 static int vmxnet3_attach(device_t);
83 static int vmxnet3_detach(device_t);
84 static int vmxnet3_shutdown(device_t);
86 static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
87 static void vmxnet3_free_resources(struct vmxnet3_softc *);
88 static int vmxnet3_check_version(struct vmxnet3_softc *);
89 static void vmxnet3_initial_config(struct vmxnet3_softc *);
90 static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
92 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
93 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
94 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
95 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
96 struct vmxnet3_interrupt *);
97 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
98 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
99 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
100 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
101 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
103 static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
104 struct vmxnet3_interrupt *);
105 static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
107 #ifndef VMXNET3_LEGACY_TX
108 static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
109 static void vmxnet3_start_taskqueue(struct vmxnet3_softc *);
110 static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
111 static void vmxnet3_free_taskqueue(struct vmxnet3_softc *);
114 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
115 static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
116 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
117 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
118 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
119 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
121 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
122 static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
123 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
124 static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
125 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
126 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
127 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
128 static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
129 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
130 static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
131 static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
132 static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
133 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
134 static int vmxnet3_alloc_data(struct vmxnet3_softc *);
135 static void vmxnet3_free_data(struct vmxnet3_softc *);
136 static int vmxnet3_setup_interface(struct vmxnet3_softc *);
138 static void vmxnet3_evintr(struct vmxnet3_softc *);
139 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
140 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
141 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
142 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
143 struct vmxnet3_rxring *, int);
144 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
145 static void vmxnet3_legacy_intr(void *);
146 static void vmxnet3_txq_intr(void *);
147 static void vmxnet3_rxq_intr(void *);
148 static void vmxnet3_event_intr(void *);
150 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
151 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
152 static void vmxnet3_stop(struct vmxnet3_softc *);
154 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
155 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
156 static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
157 static int vmxnet3_enable_device(struct vmxnet3_softc *);
158 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
159 static int vmxnet3_reinit(struct vmxnet3_softc *);
160 static void vmxnet3_init_locked(struct vmxnet3_softc *);
161 static void vmxnet3_init(void *);
163 static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
164 int *, int *, int *);
165 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
166 bus_dmamap_t, bus_dma_segment_t [], int *);
167 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
168 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
169 #ifdef VMXNET3_LEGACY_TX
170 static void vmxnet3_start_locked(struct ifnet *);
171 static void vmxnet3_start(struct ifnet *);
173 static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
175 static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
176 static void vmxnet3_txq_tq_deferred(void *, int);
178 static void vmxnet3_txq_start(struct vmxnet3_txqueue *);
179 static void vmxnet3_tx_start_all(struct vmxnet3_softc *);
181 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
183 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
184 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
185 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
186 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
187 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
189 #ifndef VMXNET3_LEGACY_TX
190 static void vmxnet3_qflush(struct ifnet *);
193 static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
194 static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
195 static void vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *,
196 struct vmxnet3_txq_stats *);
197 static void vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *,
198 struct vmxnet3_rxq_stats *);
199 static void vmxnet3_tick(void *);
200 static void vmxnet3_link_status(struct vmxnet3_softc *);
201 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
202 static int vmxnet3_media_change(struct ifnet *);
203 static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
204 static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
206 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
207 struct sysctl_ctx_list *, struct sysctl_oid_list *);
208 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
209 struct sysctl_ctx_list *, struct sysctl_oid_list *);
210 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
211 struct sysctl_ctx_list *, struct sysctl_oid_list *);
212 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
214 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
216 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
217 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
219 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
220 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
222 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
223 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
224 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
225 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
227 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
228 bus_size_t, struct vmxnet3_dma_alloc *);
229 static void vmxnet3_dma_free(struct vmxnet3_softc *,
230 struct vmxnet3_dma_alloc *);
231 static int vmxnet3_tunable_int(struct vmxnet3_softc *,
237 VMXNET3_BARRIER_RDWR,
240 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
243 static int vmxnet3_mq_disable = 0;
244 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
245 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
246 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
247 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
248 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
249 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
250 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
251 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
252 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
254 static device_method_t vmxnet3_methods[] = {
255 /* Device interface. */
256 DEVMETHOD(device_probe, vmxnet3_probe),
257 DEVMETHOD(device_attach, vmxnet3_attach),
258 DEVMETHOD(device_detach, vmxnet3_detach),
259 DEVMETHOD(device_shutdown, vmxnet3_shutdown),
264 static driver_t vmxnet3_driver = {
265 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
268 static devclass_t vmxnet3_devclass;
269 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
271 MODULE_DEPEND(vmx, pci, 1, 1, 1);
272 MODULE_DEPEND(vmx, ether, 1, 1, 1);
274 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD
275 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0
278 vmxnet3_probe(device_t dev)
281 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
282 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
283 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
284 return (BUS_PROBE_DEFAULT);
291 vmxnet3_attach(device_t dev)
293 struct vmxnet3_softc *sc;
296 sc = device_get_softc(dev);
299 pci_enable_busmaster(dev);
301 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
302 callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
304 vmxnet3_initial_config(sc);
306 error = vmxnet3_alloc_resources(sc);
310 error = vmxnet3_check_version(sc);
314 error = vmxnet3_alloc_rxtx_queues(sc);
318 #ifndef VMXNET3_LEGACY_TX
319 error = vmxnet3_alloc_taskqueue(sc);
324 error = vmxnet3_alloc_interrupts(sc);
328 vmxnet3_check_multiqueue(sc);
330 error = vmxnet3_alloc_data(sc);
334 error = vmxnet3_setup_interface(sc);
338 error = vmxnet3_setup_interrupts(sc);
340 ether_ifdetach(sc->vmx_ifp);
341 device_printf(dev, "could not set up interrupt\n");
345 vmxnet3_setup_sysctl(sc);
346 #ifndef VMXNET3_LEGACY_TX
347 vmxnet3_start_taskqueue(sc);
358 vmxnet3_detach(device_t dev)
360 struct vmxnet3_softc *sc;
363 sc = device_get_softc(dev);
366 if (device_is_attached(dev)) {
367 VMXNET3_CORE_LOCK(sc);
369 VMXNET3_CORE_UNLOCK(sc);
371 callout_drain(&sc->vmx_tick);
372 #ifndef VMXNET3_LEGACY_TX
373 vmxnet3_drain_taskqueue(sc);
379 if (sc->vmx_vlan_attach != NULL) {
380 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
381 sc->vmx_vlan_attach = NULL;
383 if (sc->vmx_vlan_detach != NULL) {
384 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
385 sc->vmx_vlan_detach = NULL;
388 #ifndef VMXNET3_LEGACY_TX
389 vmxnet3_free_taskqueue(sc);
391 vmxnet3_free_interrupts(sc);
398 ifmedia_removeall(&sc->vmx_media);
400 vmxnet3_free_data(sc);
401 vmxnet3_free_resources(sc);
402 vmxnet3_free_rxtx_queues(sc);
404 VMXNET3_CORE_LOCK_DESTROY(sc);
410 vmxnet3_shutdown(device_t dev)
417 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
425 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
427 if (sc->vmx_res0 == NULL) {
429 "could not map BAR0 memory\n");
433 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
434 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
437 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
439 if (sc->vmx_res1 == NULL) {
441 "could not map BAR1 memory\n");
445 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
446 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
448 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
450 sc->vmx_msix_res = bus_alloc_resource_any(dev,
451 SYS_RES_MEMORY, &rid, RF_ACTIVE);
454 if (sc->vmx_msix_res == NULL)
455 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
461 vmxnet3_free_resources(struct vmxnet3_softc *sc)
468 if (sc->vmx_res0 != NULL) {
470 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
474 if (sc->vmx_res1 != NULL) {
476 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
480 if (sc->vmx_msix_res != NULL) {
482 bus_release_resource(dev, SYS_RES_MEMORY, rid,
484 sc->vmx_msix_res = NULL;
489 vmxnet3_check_version(struct vmxnet3_softc *sc)
496 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
497 if ((version & 0x01) == 0) {
498 device_printf(dev, "unsupported hardware version %#x\n",
502 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
504 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
505 if ((version & 0x01) == 0) {
506 device_printf(dev, "unsupported UPT version %#x\n", version);
509 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
515 vmxnet3_initial_config(struct vmxnet3_softc *sc)
519 nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
520 if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
521 nqueue = VMXNET3_DEF_TX_QUEUES;
522 if (nqueue > mp_ncpus)
524 sc->vmx_max_ntxqueues = nqueue;
526 nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
527 if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
528 nqueue = VMXNET3_DEF_RX_QUEUES;
529 if (nqueue > mp_ncpus)
531 sc->vmx_max_nrxqueues = nqueue;
533 if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
534 sc->vmx_max_nrxqueues = 1;
535 sc->vmx_max_ntxqueues = 1;
538 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
539 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
540 ndesc = VMXNET3_DEF_TX_NDESC;
541 if (ndesc & VMXNET3_MASK_TX_NDESC)
542 ndesc &= ~VMXNET3_MASK_TX_NDESC;
543 sc->vmx_ntxdescs = ndesc;
545 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
546 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
547 ndesc = VMXNET3_DEF_RX_NDESC;
548 if (ndesc & VMXNET3_MASK_RX_NDESC)
549 ndesc &= ~VMXNET3_MASK_RX_NDESC;
550 sc->vmx_nrxdescs = ndesc;
551 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
555 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
558 if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
561 /* BMV: Just use the maximum configured for now. */
562 sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
563 sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
565 if (sc->vmx_nrxqueues > 1)
566 sc->vmx_flags |= VMXNET3_FLAG_RSS;
571 sc->vmx_ntxqueues = 1;
572 sc->vmx_nrxqueues = 1;
576 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
579 int nmsix, cnt, required;
583 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
586 /* Allocate an additional vector for the events interrupt. */
587 required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
589 nmsix = pci_msix_count(dev);
590 if (nmsix < required)
594 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
595 sc->vmx_nintrs = required;
598 pci_release_msi(dev);
600 /* BMV TODO Fallback to sharing MSIX vectors if possible. */
606 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
609 int nmsi, cnt, required;
614 nmsi = pci_msi_count(dev);
619 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
623 pci_release_msi(dev);
629 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
637 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
638 struct vmxnet3_interrupt *intr)
640 struct resource *irq;
642 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
646 intr->vmxi_irq = irq;
647 intr->vmxi_rid = rid;
653 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
655 int i, rid, flags, error;
660 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
661 flags |= RF_SHAREABLE;
665 for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
666 error = vmxnet3_alloc_interrupt(sc, rid, flags,
676 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
679 struct vmxnet3_txqueue *txq;
680 struct vmxnet3_rxqueue *rxq;
681 struct vmxnet3_interrupt *intr;
686 intr = &sc->vmx_intrs[0];
687 type = INTR_TYPE_NET | INTR_MPSAFE;
689 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
690 txq = &sc->vmx_txq[i];
691 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
692 vmxnet3_txq_intr, txq, &intr->vmxi_handler);
695 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
697 txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
700 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
701 rxq = &sc->vmx_rxq[i];
702 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
703 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
706 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
708 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
711 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
712 vmxnet3_event_intr, sc, &intr->vmxi_handler);
715 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
716 sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
722 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
724 struct vmxnet3_interrupt *intr;
727 intr = &sc->vmx_intrs[0];
728 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
729 INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
730 &intr->vmxi_handler);
732 for (i = 0; i < sc->vmx_ntxqueues; i++)
733 sc->vmx_txq[i].vxtxq_intr_idx = 0;
734 for (i = 0; i < sc->vmx_nrxqueues; i++)
735 sc->vmx_rxq[i].vxrxq_intr_idx = 0;
736 sc->vmx_event_intr_idx = 0;
742 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
744 struct vmxnet3_txqueue *txq;
745 struct vmxnet3_txq_shared *txs;
746 struct vmxnet3_rxqueue *rxq;
747 struct vmxnet3_rxq_shared *rxs;
750 sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
752 for (i = 0; i < sc->vmx_ntxqueues; i++) {
753 txq = &sc->vmx_txq[i];
755 txs->intr_idx = txq->vxtxq_intr_idx;
758 for (i = 0; i < sc->vmx_nrxqueues; i++) {
759 rxq = &sc->vmx_rxq[i];
761 rxs->intr_idx = rxq->vxrxq_intr_idx;
766 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
770 error = vmxnet3_alloc_intr_resources(sc);
774 switch (sc->vmx_intr_type) {
775 case VMXNET3_IT_MSIX:
776 error = vmxnet3_setup_msix_interrupts(sc);
779 case VMXNET3_IT_LEGACY:
780 error = vmxnet3_setup_legacy_interrupt(sc);
783 panic("%s: invalid interrupt type %d", __func__,
788 vmxnet3_set_interrupt_idx(sc);
794 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
801 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
803 sc->vmx_intr_type = config & 0x03;
804 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
806 switch (sc->vmx_intr_type) {
807 case VMXNET3_IT_AUTO:
808 sc->vmx_intr_type = VMXNET3_IT_MSIX;
810 case VMXNET3_IT_MSIX:
811 error = vmxnet3_alloc_msix_interrupts(sc);
814 sc->vmx_intr_type = VMXNET3_IT_MSI;
817 error = vmxnet3_alloc_msi_interrupts(sc);
820 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
822 case VMXNET3_IT_LEGACY:
823 error = vmxnet3_alloc_legacy_interrupts(sc);
828 sc->vmx_intr_type = -1;
829 device_printf(dev, "cannot allocate any interrupt resources\n");
837 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
838 struct vmxnet3_interrupt *intr)
844 if (intr->vmxi_handler != NULL) {
845 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
846 intr->vmxi_handler = NULL;
849 if (intr->vmxi_irq != NULL) {
850 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
852 intr->vmxi_irq = NULL;
858 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
862 for (i = 0; i < sc->vmx_nintrs; i++)
863 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
865 if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
866 sc->vmx_intr_type == VMXNET3_IT_MSIX)
867 pci_release_msi(sc->vmx_dev);
870 #ifndef VMXNET3_LEGACY_TX
872 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
878 sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
879 taskqueue_thread_enqueue, &sc->vmx_tq);
880 if (sc->vmx_tq == NULL)
887 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
895 * The taskqueue is typically not frequently used, so a dedicated
896 * thread for each queue is unnecessary.
898 nthreads = MAX(1, sc->vmx_ntxqueues / 2);
901 * Most drivers just ignore the return value - it only fails
902 * with ENOMEM so an error is not likely. It is hard for us
903 * to recover from an error here.
905 error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
906 "%s taskq", device_get_nameunit(dev));
908 device_printf(dev, "failed to start taskqueue: %d", error);
912 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
914 struct vmxnet3_txqueue *txq;
917 if (sc->vmx_tq != NULL) {
918 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
919 txq = &sc->vmx_txq[i];
920 taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
926 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
928 if (sc->vmx_tq != NULL) {
929 taskqueue_free(sc->vmx_tq);
936 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
938 struct vmxnet3_rxqueue *rxq;
939 struct vmxnet3_rxring *rxr;
942 rxq = &sc->vmx_rxq[q];
944 snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
945 device_get_nameunit(sc->vmx_dev), q);
946 mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
951 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
952 rxr = &rxq->vxrxq_cmd_ring[i];
954 rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
955 rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
956 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
957 if (rxr->vxrxr_rxbuf == NULL)
960 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
967 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
969 struct vmxnet3_txqueue *txq;
970 struct vmxnet3_txring *txr;
972 txq = &sc->vmx_txq[q];
973 txr = &txq->vxtxq_cmd_ring;
975 snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
976 device_get_nameunit(sc->vmx_dev), q);
977 mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
982 txr->vxtxr_ndesc = sc->vmx_ntxdescs;
983 txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
984 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
985 if (txr->vxtxr_txbuf == NULL)
988 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
990 #ifndef VMXNET3_LEGACY_TX
991 TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
993 txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
994 M_NOWAIT, &txq->vxtxq_mtx);
995 if (txq->vxtxq_br == NULL)
1003 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1008 * Only attempt to create multiple queues if MSIX is available. MSIX is
1009 * disabled by default because its apparently broken for devices passed
1010 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1011 * must be set to zero for MSIX. This check prevents us from allocating
1012 * queue structures that we will not use.
1014 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1015 sc->vmx_max_nrxqueues = 1;
1016 sc->vmx_max_ntxqueues = 1;
1019 sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
1020 sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1021 sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
1022 sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1023 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1026 for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1027 error = vmxnet3_init_rxq(sc, i);
1032 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1033 error = vmxnet3_init_txq(sc, i);
1042 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1044 struct vmxnet3_rxring *rxr;
1047 rxq->vxrxq_sc = NULL;
1050 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1051 rxr = &rxq->vxrxq_cmd_ring[i];
1053 if (rxr->vxrxr_rxbuf != NULL) {
1054 free(rxr->vxrxr_rxbuf, M_DEVBUF);
1055 rxr->vxrxr_rxbuf = NULL;
1059 if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
1060 mtx_destroy(&rxq->vxrxq_mtx);
1064 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1066 struct vmxnet3_txring *txr;
1068 txr = &txq->vxtxq_cmd_ring;
1070 txq->vxtxq_sc = NULL;
1073 #ifndef VMXNET3_LEGACY_TX
1074 if (txq->vxtxq_br != NULL) {
1075 buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1076 txq->vxtxq_br = NULL;
1080 if (txr->vxtxr_txbuf != NULL) {
1081 free(txr->vxtxr_txbuf, M_DEVBUF);
1082 txr->vxtxr_txbuf = NULL;
1085 if (mtx_initialized(&txq->vxtxq_mtx) != 0)
1086 mtx_destroy(&txq->vxtxq_mtx);
1090 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1094 if (sc->vmx_rxq != NULL) {
1095 for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1096 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1097 free(sc->vmx_rxq, M_DEVBUF);
1101 if (sc->vmx_txq != NULL) {
1102 for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1103 vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1104 free(sc->vmx_txq, M_DEVBUF);
1110 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1119 size = sizeof(struct vmxnet3_driver_shared);
1120 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1122 device_printf(dev, "cannot alloc shared memory\n");
1125 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1127 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1128 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1129 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1131 device_printf(dev, "cannot alloc queue shared memory\n");
1134 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1137 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1138 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1139 kva += sizeof(struct vmxnet3_txq_shared);
1141 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1142 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1143 kva += sizeof(struct vmxnet3_rxq_shared);
1146 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1147 size = sizeof(struct vmxnet3_rss_shared);
1148 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1150 device_printf(dev, "cannot alloc rss shared memory\n");
1154 (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1161 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1164 if (sc->vmx_rss != NULL) {
1165 vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1169 if (sc->vmx_qs != NULL) {
1170 vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1174 if (sc->vmx_ds != NULL) {
1175 vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1181 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1184 struct vmxnet3_txqueue *txq;
1185 struct vmxnet3_txring *txr;
1186 struct vmxnet3_comp_ring *txc;
1187 size_t descsz, compsz;
1192 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1193 txq = &sc->vmx_txq[q];
1194 txr = &txq->vxtxq_cmd_ring;
1195 txc = &txq->vxtxq_comp_ring;
1197 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1198 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1200 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1201 1, 0, /* alignment, boundary */
1202 BUS_SPACE_MAXADDR, /* lowaddr */
1203 BUS_SPACE_MAXADDR, /* highaddr */
1204 NULL, NULL, /* filter, filterarg */
1205 VMXNET3_TX_MAXSIZE, /* maxsize */
1206 VMXNET3_TX_MAXSEGS, /* nsegments */
1207 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
1209 NULL, NULL, /* lockfunc, lockarg */
1213 "unable to create Tx buffer tag for queue %d\n", q);
1217 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1219 device_printf(dev, "cannot alloc Tx descriptors for "
1220 "queue %d error %d\n", q, error);
1224 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1226 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1228 device_printf(dev, "cannot alloc Tx comp descriptors "
1229 "for queue %d error %d\n", q, error);
1233 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1235 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1236 error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1237 &txr->vxtxr_txbuf[i].vtxb_dmamap);
1239 device_printf(dev, "unable to create Tx buf "
1240 "dmamap for queue %d idx %d\n", q, i);
1250 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1253 struct vmxnet3_txqueue *txq;
1254 struct vmxnet3_txring *txr;
1255 struct vmxnet3_comp_ring *txc;
1256 struct vmxnet3_txbuf *txb;
1261 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1262 txq = &sc->vmx_txq[q];
1263 txr = &txq->vxtxq_cmd_ring;
1264 txc = &txq->vxtxq_comp_ring;
1266 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1267 txb = &txr->vxtxr_txbuf[i];
1268 if (txb->vtxb_dmamap != NULL) {
1269 bus_dmamap_destroy(txr->vxtxr_txtag,
1271 txb->vtxb_dmamap = NULL;
1275 if (txc->vxcr_u.txcd != NULL) {
1276 vmxnet3_dma_free(sc, &txc->vxcr_dma);
1277 txc->vxcr_u.txcd = NULL;
1280 if (txr->vxtxr_txd != NULL) {
1281 vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1282 txr->vxtxr_txd = NULL;
1285 if (txr->vxtxr_txtag != NULL) {
1286 bus_dma_tag_destroy(txr->vxtxr_txtag);
1287 txr->vxtxr_txtag = NULL;
1293 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1296 struct vmxnet3_rxqueue *rxq;
1297 struct vmxnet3_rxring *rxr;
1298 struct vmxnet3_comp_ring *rxc;
1304 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1305 rxq = &sc->vmx_rxq[q];
1306 rxc = &rxq->vxrxq_comp_ring;
1309 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1310 rxr = &rxq->vxrxq_cmd_ring[i];
1312 descsz = rxr->vxrxr_ndesc *
1313 sizeof(struct vmxnet3_rxdesc);
1314 compsz += rxr->vxrxr_ndesc *
1315 sizeof(struct vmxnet3_rxcompdesc);
1317 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1318 1, 0, /* alignment, boundary */
1319 BUS_SPACE_MAXADDR, /* lowaddr */
1320 BUS_SPACE_MAXADDR, /* highaddr */
1321 NULL, NULL, /* filter, filterarg */
1322 MJUMPAGESIZE, /* maxsize */
1324 MJUMPAGESIZE, /* maxsegsize */
1326 NULL, NULL, /* lockfunc, lockarg */
1330 "unable to create Rx buffer tag for "
1335 error = vmxnet3_dma_malloc(sc, descsz, 512,
1338 device_printf(dev, "cannot allocate Rx "
1339 "descriptors for queue %d/%d error %d\n",
1344 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1347 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1349 device_printf(dev, "cannot alloc Rx comp descriptors "
1350 "for queue %d error %d\n", q, error);
1354 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1356 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1357 rxr = &rxq->vxrxq_cmd_ring[i];
1359 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1360 &rxr->vxrxr_spare_dmap);
1362 device_printf(dev, "unable to create spare "
1363 "dmamap for queue %d/%d error %d\n",
1368 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1369 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1370 &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1372 device_printf(dev, "unable to create "
1373 "dmamap for queue %d/%d slot %d "
1386 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1389 struct vmxnet3_rxqueue *rxq;
1390 struct vmxnet3_rxring *rxr;
1391 struct vmxnet3_comp_ring *rxc;
1392 struct vmxnet3_rxbuf *rxb;
1397 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1398 rxq = &sc->vmx_rxq[q];
1399 rxc = &rxq->vxrxq_comp_ring;
1401 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1402 rxr = &rxq->vxrxq_cmd_ring[i];
1404 if (rxr->vxrxr_spare_dmap != NULL) {
1405 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1406 rxr->vxrxr_spare_dmap);
1407 rxr->vxrxr_spare_dmap = NULL;
1410 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1411 rxb = &rxr->vxrxr_rxbuf[j];
1412 if (rxb->vrxb_dmamap != NULL) {
1413 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1415 rxb->vrxb_dmamap = NULL;
1420 if (rxc->vxcr_u.rxcd != NULL) {
1421 vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1422 rxc->vxcr_u.rxcd = NULL;
1425 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1426 rxr = &rxq->vxrxq_cmd_ring[i];
1428 if (rxr->vxrxr_rxd != NULL) {
1429 vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1430 rxr->vxrxr_rxd = NULL;
1433 if (rxr->vxrxr_rxtag != NULL) {
1434 bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1435 rxr->vxrxr_rxtag = NULL;
1442 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1446 error = vmxnet3_alloc_txq_data(sc);
1450 error = vmxnet3_alloc_rxq_data(sc);
1458 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1461 if (sc->vmx_rxq != NULL)
1462 vmxnet3_free_rxq_data(sc);
1464 if (sc->vmx_txq != NULL)
1465 vmxnet3_free_txq_data(sc);
1469 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1473 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1474 32, &sc->vmx_mcast_dma);
1476 device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1478 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1484 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1487 if (sc->vmx_mcast != NULL) {
1488 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1489 sc->vmx_mcast = NULL;
1494 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1496 struct vmxnet3_driver_shared *ds;
1497 struct vmxnet3_txqueue *txq;
1498 struct vmxnet3_txq_shared *txs;
1499 struct vmxnet3_rxqueue *rxq;
1500 struct vmxnet3_rxq_shared *rxs;
1506 * Initialize fields of the shared data that remains the same across
1507 * reinits. Note the shared data is zero'd when allocated.
1510 ds->magic = VMXNET3_REV1_MAGIC;
1513 ds->version = VMXNET3_DRIVER_VERSION;
1514 ds->guest = VMXNET3_GOS_FREEBSD |
1520 ds->vmxnet3_revision = 1;
1521 ds->upt_version = 1;
1524 ds->driver_data = vtophys(sc);
1525 ds->driver_data_len = sizeof(struct vmxnet3_softc);
1526 ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1527 ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1528 ds->nrxsg_max = sc->vmx_max_rxsegs;
1531 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1532 ds->rss.version = 1;
1533 ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1534 ds->rss.len = sc->vmx_rss_dma.dma_size;
1537 /* Interrupt control. */
1538 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1539 ds->nintr = sc->vmx_nintrs;
1540 ds->evintr = sc->vmx_event_intr_idx;
1541 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1543 for (i = 0; i < sc->vmx_nintrs; i++)
1544 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1546 /* Receive filter. */
1547 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1548 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1551 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1552 txq = &sc->vmx_txq[i];
1553 txs = txq->vxtxq_ts;
1555 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1556 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1557 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1558 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1559 txs->driver_data = vtophys(txq);
1560 txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1564 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1565 rxq = &sc->vmx_rxq[i];
1566 rxs = rxq->vxrxq_rs;
1568 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1569 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1570 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1571 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1572 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1573 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1574 rxs->driver_data = vtophys(rxq);
1575 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1580 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1586 /* Use the current MAC address. */
1587 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1588 vmxnet3_set_lladdr(sc);
1590 ifp->if_hwassist = 0;
1591 if (ifp->if_capenable & IFCAP_TXCSUM)
1592 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD;
1593 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1594 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1595 if (ifp->if_capenable & IFCAP_TSO4)
1596 ifp->if_hwassist |= CSUM_IP_TSO;
1597 if (ifp->if_capenable & IFCAP_TSO6)
1598 ifp->if_hwassist |= CSUM_IP6_TSO;
1602 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1605 * Use the same key as the Linux driver until FreeBSD can do
1606 * RSS (presumably Toeplitz) in software.
1608 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1609 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1610 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1611 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1612 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1613 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1616 struct vmxnet3_driver_shared *ds;
1617 struct vmxnet3_rss_shared *rss;
1624 UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1625 UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1626 rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1627 rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1628 rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1629 memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1631 for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1632 rss->ind_table[i] = i % sc->vmx_nrxqueues;
1636 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1639 struct vmxnet3_driver_shared *ds;
1644 ds->mtu = ifp->if_mtu;
1645 ds->ntxqueue = sc->vmx_ntxqueues;
1646 ds->nrxqueue = sc->vmx_nrxqueues;
1648 ds->upt_features = 0;
1649 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1650 ds->upt_features |= UPT1_F_CSUM;
1651 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1652 ds->upt_features |= UPT1_F_VLAN;
1653 if (ifp->if_capenable & IFCAP_LRO)
1654 ds->upt_features |= UPT1_F_LRO;
1656 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1657 ds->upt_features |= UPT1_F_RSS;
1658 vmxnet3_reinit_rss_shared_data(sc);
1661 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1662 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1663 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1667 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1671 error = vmxnet3_alloc_shared_data(sc);
1675 error = vmxnet3_alloc_queue_data(sc);
1679 error = vmxnet3_alloc_mcast_table(sc);
1683 vmxnet3_init_shared_data(sc);
1689 vmxnet3_free_data(struct vmxnet3_softc *sc)
1692 vmxnet3_free_mcast_table(sc);
1693 vmxnet3_free_queue_data(sc);
1694 vmxnet3_free_shared_data(sc);
1698 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1705 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1707 device_printf(dev, "cannot allocate ifnet structure\n");
1711 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1712 #if __FreeBSD_version < 1000025
1713 ifp->if_baudrate = 1000000000;
1714 #elif __FreeBSD_version < 1100011
1715 if_initbaudrate(ifp, IF_Gbps(10));
1717 ifp->if_baudrate = IF_Gbps(10);
1720 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1721 ifp->if_init = vmxnet3_init;
1722 ifp->if_ioctl = vmxnet3_ioctl;
1723 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1724 ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1725 ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1727 #ifdef VMXNET3_LEGACY_TX
1728 ifp->if_start = vmxnet3_start;
1729 ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1730 IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1731 IFQ_SET_READY(&ifp->if_snd);
1733 ifp->if_transmit = vmxnet3_txq_mq_start;
1734 ifp->if_qflush = vmxnet3_qflush;
1737 vmxnet3_get_lladdr(sc);
1738 ether_ifattach(ifp, sc->vmx_lladdr);
1740 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1741 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1742 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1743 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1745 ifp->if_capenable = ifp->if_capabilities;
1747 /* These capabilities are not enabled by default. */
1748 ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1750 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1751 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1752 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1753 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1755 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1756 vmxnet3_media_status);
1757 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1758 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1764 vmxnet3_evintr(struct vmxnet3_softc *sc)
1768 struct vmxnet3_txq_shared *ts;
1769 struct vmxnet3_rxq_shared *rs;
1777 VMXNET3_CORE_LOCK(sc);
1780 event = sc->vmx_ds->event;
1781 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1783 if (event & VMXNET3_EVENT_LINK) {
1784 vmxnet3_link_status(sc);
1785 if (sc->vmx_link_active != 0)
1786 vmxnet3_tx_start_all(sc);
1789 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1791 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1792 ts = sc->vmx_txq[0].vxtxq_ts;
1793 if (ts->stopped != 0)
1794 device_printf(dev, "Tx queue error %#x\n", ts->error);
1795 rs = sc->vmx_rxq[0].vxrxq_rs;
1796 if (rs->stopped != 0)
1797 device_printf(dev, "Rx queue error %#x\n", rs->error);
1798 device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1801 if (event & VMXNET3_EVENT_DIC)
1802 device_printf(dev, "device implementation change event\n");
1803 if (event & VMXNET3_EVENT_DEBUG)
1804 device_printf(dev, "debug event\n");
1807 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1808 vmxnet3_init_locked(sc);
1811 VMXNET3_CORE_UNLOCK(sc);
1815 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1817 struct vmxnet3_softc *sc;
1819 struct vmxnet3_txring *txr;
1820 struct vmxnet3_comp_ring *txc;
1821 struct vmxnet3_txcompdesc *txcd;
1822 struct vmxnet3_txbuf *txb;
1828 txr = &txq->vxtxq_cmd_ring;
1829 txc = &txq->vxtxq_comp_ring;
1831 VMXNET3_TXQ_LOCK_ASSERT(txq);
1834 txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1835 if (txcd->gen != txc->vxcr_gen)
1837 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1839 if (++txc->vxcr_next == txc->vxcr_ndesc) {
1844 sop = txr->vxtxr_next;
1845 txb = &txr->vxtxr_txbuf[sop];
1847 if ((m = txb->vtxb_m) != NULL) {
1848 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1849 BUS_DMASYNC_POSTWRITE);
1850 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1852 txq->vxtxq_stats.vmtxs_opackets++;
1853 txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1854 if (m->m_flags & M_MCAST)
1855 txq->vxtxq_stats.vmtxs_omcasts++;
1861 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1864 if (txr->vxtxr_head == txr->vxtxr_next)
1865 txq->vxtxq_watchdog = 0;
1869 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1873 struct vmxnet3_rxdesc *rxd;
1874 struct vmxnet3_rxbuf *rxb;
1877 bus_dma_segment_t segs[1];
1878 int idx, clsize, btype, flags, nsegs, error;
1881 tag = rxr->vxrxr_rxtag;
1882 dmap = rxr->vxrxr_spare_dmap;
1883 idx = rxr->vxrxr_fill;
1884 rxd = &rxr->vxrxr_rxd[idx];
1885 rxb = &rxr->vxrxr_rxbuf[idx];
1887 #ifdef VMXNET3_FAILPOINTS
1888 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1889 if (rxr->vxrxr_rid != 0)
1890 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1893 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1896 btype = VMXNET3_BTYPE_HEAD;
1898 #if __FreeBSD_version < 902001
1900 * These mbufs will never be used for the start of a frame.
1901 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1902 * required the mbuf to always be a packet header. Avoid
1903 * unnecessary mbuf initialization in newer versions where
1904 * that is not the case.
1910 clsize = MJUMPAGESIZE;
1911 btype = VMXNET3_BTYPE_BODY;
1914 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1916 sc->vmx_stats.vmst_mgetcl_failed++;
1920 if (btype == VMXNET3_BTYPE_HEAD) {
1921 m->m_len = m->m_pkthdr.len = clsize;
1922 m_adj(m, ETHER_ALIGN);
1926 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1930 sc->vmx_stats.vmst_mbuf_load_failed++;
1934 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1935 #if __FreeBSD_version < 902001
1936 if (btype == VMXNET3_BTYPE_BODY)
1937 m->m_flags &= ~M_PKTHDR;
1940 if (rxb->vrxb_m != NULL) {
1941 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1942 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1945 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1946 rxb->vrxb_dmamap = dmap;
1949 rxd->addr = segs[0].ds_addr;
1950 rxd->len = segs[0].ds_len;
1952 rxd->gen = rxr->vxrxr_gen;
1954 vmxnet3_rxr_increment_fill(rxr);
1959 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1960 struct vmxnet3_rxring *rxr, int idx)
1962 struct vmxnet3_rxdesc *rxd;
1964 rxd = &rxr->vxrxr_rxd[idx];
1965 rxd->gen = rxr->vxrxr_gen;
1966 vmxnet3_rxr_increment_fill(rxr);
1970 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1972 struct vmxnet3_softc *sc;
1973 struct vmxnet3_rxring *rxr;
1974 struct vmxnet3_comp_ring *rxc;
1975 struct vmxnet3_rxcompdesc *rxcd;
1979 rxc = &rxq->vxrxq_comp_ring;
1982 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1983 if (rxcd->gen != rxc->vxcr_gen)
1984 break; /* Not expected. */
1985 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1987 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1992 idx = rxcd->rxd_idx;
1994 if (rxcd->qid < sc->vmx_nrxqueues)
1995 rxr = &rxq->vxrxq_cmd_ring[0];
1997 rxr = &rxq->vxrxq_cmd_ring[1];
1998 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2003 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2007 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2008 if (rxcd->ipcsum_ok)
2009 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2012 if (!rxcd->fragment) {
2013 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2014 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2016 m->m_pkthdr.csum_data = 0xFFFF;
2022 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2023 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2025 struct vmxnet3_softc *sc;
2032 rxq->vxrxq_stats.vmrxs_ierrors++;
2038 switch (rxcd->rss_type) {
2039 case VMXNET3_RCD_RSS_TYPE_IPV4:
2040 m->m_pkthdr.flowid = rxcd->rss_hash;
2041 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2043 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2044 m->m_pkthdr.flowid = rxcd->rss_hash;
2045 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2047 case VMXNET3_RCD_RSS_TYPE_IPV6:
2048 m->m_pkthdr.flowid = rxcd->rss_hash;
2049 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2051 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2052 m->m_pkthdr.flowid = rxcd->rss_hash;
2053 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2055 default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2056 m->m_pkthdr.flowid = rxq->vxrxq_id;
2057 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2061 m->m_pkthdr.flowid = rxq->vxrxq_id;
2062 m->m_flags |= M_FLOWID;
2066 vmxnet3_rx_csum(rxcd, m);
2068 m->m_flags |= M_VLANTAG;
2069 m->m_pkthdr.ether_vtag = rxcd->vtag;
2072 rxq->vxrxq_stats.vmrxs_ipackets++;
2073 rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2075 VMXNET3_RXQ_UNLOCK(rxq);
2076 (*ifp->if_input)(ifp, m);
2077 VMXNET3_RXQ_LOCK(rxq);
2081 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2083 struct vmxnet3_softc *sc;
2085 struct vmxnet3_rxring *rxr;
2086 struct vmxnet3_comp_ring *rxc;
2087 struct vmxnet3_rxdesc *rxd;
2088 struct vmxnet3_rxcompdesc *rxcd;
2089 struct mbuf *m, *m_head, *m_tail;
2094 rxc = &rxq->vxrxq_comp_ring;
2096 VMXNET3_RXQ_LOCK_ASSERT(rxq);
2098 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2101 m_head = rxq->vxrxq_mhead;
2102 rxq->vxrxq_mhead = NULL;
2103 m_tail = rxq->vxrxq_mtail;
2104 rxq->vxrxq_mtail = NULL;
2105 MPASS(m_head == NULL || m_tail != NULL);
2108 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2109 if (rxcd->gen != rxc->vxcr_gen) {
2110 rxq->vxrxq_mhead = m_head;
2111 rxq->vxrxq_mtail = m_tail;
2114 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2116 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2121 idx = rxcd->rxd_idx;
2123 if (rxcd->qid < sc->vmx_nrxqueues)
2124 rxr = &rxq->vxrxq_cmd_ring[0];
2126 rxr = &rxq->vxrxq_cmd_ring[1];
2127 rxd = &rxr->vxrxr_rxd[idx];
2129 m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2130 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2131 __func__, rxcd->qid, idx));
2134 * The host may skip descriptors. We detect this when this
2135 * descriptor does not match the previous fill index. Catch
2136 * up with the host now.
2138 if (__predict_false(rxr->vxrxr_fill != idx)) {
2139 while (rxr->vxrxr_fill != idx) {
2140 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2142 vmxnet3_rxr_increment_fill(rxr);
2147 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2148 ("%s: start of frame w/o head buffer", __func__));
2149 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2150 ("%s: start of frame not in ring 0", __func__));
2151 KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2152 ("%s: start of frame at unexcepted index %d (%d)",
2153 __func__, idx, sc->vmx_rx_max_chain));
2154 KASSERT(m_head == NULL,
2155 ("%s: duplicate start of frame?", __func__));
2158 /* Just ignore this descriptor. */
2159 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2163 if (vmxnet3_newbuf(sc, rxr) != 0) {
2164 rxq->vxrxq_stats.vmrxs_iqdrops++;
2165 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2167 vmxnet3_rxq_discard_chain(rxq);
2171 m->m_pkthdr.rcvif = ifp;
2172 m->m_pkthdr.len = m->m_len = length;
2173 m->m_pkthdr.csum_flags = 0;
2174 m_head = m_tail = m;
2177 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2178 ("%s: non start of frame w/o body buffer", __func__));
2179 KASSERT(m_head != NULL,
2180 ("%s: frame not started?", __func__));
2182 if (vmxnet3_newbuf(sc, rxr) != 0) {
2183 rxq->vxrxq_stats.vmrxs_iqdrops++;
2184 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2186 vmxnet3_rxq_discard_chain(rxq);
2188 m_head = m_tail = NULL;
2193 m_head->m_pkthdr.len += length;
2199 vmxnet3_rxq_input(rxq, rxcd, m_head);
2200 m_head = m_tail = NULL;
2202 /* Must recheck after dropping the Rx lock. */
2203 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2208 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2209 int qid = rxcd->qid;
2212 idx = (idx + 1) % rxr->vxrxr_ndesc;
2213 if (qid >= sc->vmx_nrxqueues) {
2214 qid -= sc->vmx_nrxqueues;
2215 r = VMXNET3_BAR0_RXH2(qid);
2217 r = VMXNET3_BAR0_RXH1(qid);
2218 vmxnet3_write_bar0(sc, r, idx);
2224 vmxnet3_legacy_intr(void *xsc)
2226 struct vmxnet3_softc *sc;
2227 struct vmxnet3_rxqueue *rxq;
2228 struct vmxnet3_txqueue *txq;
2231 rxq = &sc->vmx_rxq[0];
2232 txq = &sc->vmx_txq[0];
2234 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2235 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2238 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2239 vmxnet3_disable_all_intrs(sc);
2241 if (sc->vmx_ds->event != 0)
2244 VMXNET3_RXQ_LOCK(rxq);
2245 vmxnet3_rxq_eof(rxq);
2246 VMXNET3_RXQ_UNLOCK(rxq);
2248 VMXNET3_TXQ_LOCK(txq);
2249 vmxnet3_txq_eof(txq);
2250 vmxnet3_txq_start(txq);
2251 VMXNET3_TXQ_UNLOCK(txq);
2253 vmxnet3_enable_all_intrs(sc);
2257 vmxnet3_txq_intr(void *xtxq)
2259 struct vmxnet3_softc *sc;
2260 struct vmxnet3_txqueue *txq;
2265 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2266 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2268 VMXNET3_TXQ_LOCK(txq);
2269 vmxnet3_txq_eof(txq);
2270 vmxnet3_txq_start(txq);
2271 VMXNET3_TXQ_UNLOCK(txq);
2273 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2277 vmxnet3_rxq_intr(void *xrxq)
2279 struct vmxnet3_softc *sc;
2280 struct vmxnet3_rxqueue *rxq;
2285 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2286 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2288 VMXNET3_RXQ_LOCK(rxq);
2289 vmxnet3_rxq_eof(rxq);
2290 VMXNET3_RXQ_UNLOCK(rxq);
2292 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2296 vmxnet3_event_intr(void *xsc)
2298 struct vmxnet3_softc *sc;
2302 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2303 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2305 if (sc->vmx_ds->event != 0)
2308 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2312 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2314 struct vmxnet3_txring *txr;
2315 struct vmxnet3_txbuf *txb;
2318 txr = &txq->vxtxq_cmd_ring;
2320 for (i = 0; i < txr->vxtxr_ndesc; i++) {
2321 txb = &txr->vxtxr_txbuf[i];
2323 if (txb->vtxb_m == NULL)
2326 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2327 BUS_DMASYNC_POSTWRITE);
2328 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2329 m_freem(txb->vtxb_m);
2335 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2337 struct vmxnet3_rxring *rxr;
2338 struct vmxnet3_rxbuf *rxb;
2341 if (rxq->vxrxq_mhead != NULL) {
2342 m_freem(rxq->vxrxq_mhead);
2343 rxq->vxrxq_mhead = NULL;
2344 rxq->vxrxq_mtail = NULL;
2347 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2348 rxr = &rxq->vxrxq_cmd_ring[i];
2350 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2351 rxb = &rxr->vxrxr_rxbuf[j];
2353 if (rxb->vrxb_m == NULL)
2356 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2357 BUS_DMASYNC_POSTREAD);
2358 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2359 m_freem(rxb->vrxb_m);
2366 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2368 struct vmxnet3_rxqueue *rxq;
2369 struct vmxnet3_txqueue *txq;
2372 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2373 rxq = &sc->vmx_rxq[i];
2374 VMXNET3_RXQ_LOCK(rxq);
2375 VMXNET3_RXQ_UNLOCK(rxq);
2378 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2379 txq = &sc->vmx_txq[i];
2380 VMXNET3_TXQ_LOCK(txq);
2381 VMXNET3_TXQ_UNLOCK(txq);
2386 vmxnet3_stop(struct vmxnet3_softc *sc)
2392 VMXNET3_CORE_LOCK_ASSERT(sc);
2394 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2395 sc->vmx_link_active = 0;
2396 callout_stop(&sc->vmx_tick);
2398 /* Disable interrupts. */
2399 vmxnet3_disable_all_intrs(sc);
2400 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2402 vmxnet3_stop_rendezvous(sc);
2404 for (q = 0; q < sc->vmx_ntxqueues; q++)
2405 vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2406 for (q = 0; q < sc->vmx_nrxqueues; q++)
2407 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2409 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2413 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2415 struct vmxnet3_txring *txr;
2416 struct vmxnet3_comp_ring *txc;
2418 txr = &txq->vxtxq_cmd_ring;
2419 txr->vxtxr_head = 0;
2420 txr->vxtxr_next = 0;
2421 txr->vxtxr_gen = VMXNET3_INIT_GEN;
2422 bzero(txr->vxtxr_txd,
2423 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2425 txc = &txq->vxtxq_comp_ring;
2427 txc->vxcr_gen = VMXNET3_INIT_GEN;
2428 bzero(txc->vxcr_u.txcd,
2429 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2433 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2436 struct vmxnet3_rxring *rxr;
2437 struct vmxnet3_comp_ring *rxc;
2438 int i, populate, idx, frame_size, error;
2441 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2445 * If the MTU causes us to exceed what a regular sized cluster can
2446 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2447 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2449 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2450 * our life easier. We do not support changing the ring size after
2453 if (frame_size <= MCLBYTES)
2454 sc->vmx_rx_max_chain = 1;
2456 sc->vmx_rx_max_chain = 2;
2459 * Only populate ring 1 if the configuration will take advantage
2460 * of it. That is either when LRO is enabled or the frame size
2461 * exceeds what ring 0 can contain.
2463 if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2464 frame_size <= MCLBYTES + MJUMPAGESIZE)
2467 populate = VMXNET3_RXRINGS_PERQ;
2469 for (i = 0; i < populate; i++) {
2470 rxr = &rxq->vxrxq_cmd_ring[i];
2471 rxr->vxrxr_fill = 0;
2472 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2473 bzero(rxr->vxrxr_rxd,
2474 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2476 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2477 error = vmxnet3_newbuf(sc, rxr);
2483 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2484 rxr = &rxq->vxrxq_cmd_ring[i];
2485 rxr->vxrxr_fill = 0;
2487 bzero(rxr->vxrxr_rxd,
2488 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2491 rxc = &rxq->vxrxq_comp_ring;
2493 rxc->vxcr_gen = VMXNET3_INIT_GEN;
2494 bzero(rxc->vxcr_u.rxcd,
2495 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2501 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2508 for (q = 0; q < sc->vmx_ntxqueues; q++)
2509 vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2511 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2512 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2514 device_printf(dev, "cannot populate Rx queue %d\n", q);
2523 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2527 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2528 device_printf(sc->vmx_dev, "device enable command failed!\n");
2532 /* Reset the Rx queue heads. */
2533 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2534 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2535 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2542 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2548 vmxnet3_set_rxfilter(sc);
2550 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2551 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2552 sizeof(sc->vmx_ds->vlan_filter));
2554 bzero(sc->vmx_ds->vlan_filter,
2555 sizeof(sc->vmx_ds->vlan_filter));
2556 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2560 vmxnet3_reinit(struct vmxnet3_softc *sc)
2563 vmxnet3_reinit_interface(sc);
2564 vmxnet3_reinit_shared_data(sc);
2566 if (vmxnet3_reinit_queues(sc) != 0)
2569 if (vmxnet3_enable_device(sc) != 0)
2572 vmxnet3_reinit_rxfilters(sc);
2578 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2584 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2589 if (vmxnet3_reinit(sc) != 0) {
2594 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2595 vmxnet3_link_status(sc);
2597 vmxnet3_enable_all_intrs(sc);
2598 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2602 vmxnet3_init(void *xsc)
2604 struct vmxnet3_softc *sc;
2608 VMXNET3_CORE_LOCK(sc);
2609 vmxnet3_init_locked(sc);
2610 VMXNET3_CORE_UNLOCK(sc);
2614 * BMV: Much of this can go away once we finally have offsets in
2615 * the mbuf packet header. Bug andre@.
2618 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2619 int *etype, int *proto, int *start)
2621 struct ether_vlan_header *evh;
2624 struct ip *ip = NULL;
2628 struct ip6_hdr *ip6 = NULL;
2629 struct ip6_hdr ip6hdr;
2632 evh = mtod(m, struct ether_vlan_header *);
2633 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2634 /* BMV: We should handle nested VLAN tags too. */
2635 *etype = ntohs(evh->evl_proto);
2636 offset = sizeof(struct ether_vlan_header);
2638 *etype = ntohs(evh->evl_encap_proto);
2639 offset = sizeof(struct ether_header);
2645 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2646 m_copydata(m, offset, sizeof(struct ip),
2650 ip = mtodo(m, offset);
2652 *start = offset + (ip->ip_hl << 2);
2656 case ETHERTYPE_IPV6:
2657 if (__predict_false(m->m_len <
2658 offset + sizeof(struct ip6_hdr))) {
2659 m_copydata(m, offset, sizeof(struct ip6_hdr),
2663 ip6 = mtodo(m, offset);
2665 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2666 /* Assert the network stack sent us a valid packet. */
2667 KASSERT(*start > offset,
2668 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2669 *start, offset, *proto));
2676 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2677 struct tcphdr *tcp, tcphdr;
2680 if (__predict_false(*proto != IPPROTO_TCP)) {
2681 /* Likely failed to correctly parse the mbuf. */
2685 txq->vxtxq_stats.vmtxs_tso++;
2690 sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2691 htons(IPPROTO_TCP));
2695 case ETHERTYPE_IPV6:
2696 sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
2704 if (m->m_len < *start + sizeof(struct tcphdr)) {
2705 m_copyback(m, *start + offsetof(struct tcphdr, th_sum),
2706 sizeof(uint16_t), (caddr_t) &sum);
2707 m_copydata(m, *start, sizeof(struct tcphdr),
2711 tcp = mtodo(m, *start);
2716 * For TSO, the size of the protocol header is also
2717 * included in the descriptor header size.
2719 *start += (tcp->th_off << 2);
2721 txq->vxtxq_stats.vmtxs_csum++;
2727 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2728 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2730 struct vmxnet3_txring *txr;
2735 txr = &txq->vxtxq_cmd_ring;
2737 tag = txr->vxtxr_txtag;
2739 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2740 if (error == 0 || error != EFBIG)
2743 m = m_defrag(m, M_NOWAIT);
2746 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2753 txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2755 txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2761 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2763 struct vmxnet3_txring *txr;
2765 txr = &txq->vxtxq_cmd_ring;
2766 bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2770 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2772 struct vmxnet3_softc *sc;
2773 struct vmxnet3_txring *txr;
2774 struct vmxnet3_txdesc *txd, *sop;
2777 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2778 int i, gen, nsegs, etype, proto, start, error;
2783 txr = &txq->vxtxq_cmd_ring;
2784 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2786 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2792 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2793 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2795 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2796 txq->vxtxq_stats.vmtxs_full++;
2797 vmxnet3_txq_unload_mbuf(txq, dmap);
2799 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2800 error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2802 txq->vxtxq_stats.vmtxs_offload_failed++;
2803 vmxnet3_txq_unload_mbuf(txq, dmap);
2810 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2811 sop = &txr->vxtxr_txd[txr->vxtxr_head];
2812 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
2814 for (i = 0; i < nsegs; i++) {
2815 txd = &txr->vxtxr_txd[txr->vxtxr_head];
2817 txd->addr = segs[i].ds_addr;
2818 txd->len = segs[i].ds_len;
2821 txd->offload_mode = VMXNET3_OM_NONE;
2822 txd->offload_pos = 0;
2829 if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2830 txr->vxtxr_head = 0;
2831 txr->vxtxr_gen ^= 1;
2833 gen = txr->vxtxr_gen;
2838 if (m->m_flags & M_VLANTAG) {
2840 sop->vtag = m->m_pkthdr.ether_vtag;
2843 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2844 sop->offload_mode = VMXNET3_OM_TSO;
2846 sop->offload_pos = m->m_pkthdr.tso_segsz;
2847 } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2848 VMXNET3_CSUM_OFFLOAD_IPV6)) {
2849 sop->offload_mode = VMXNET3_OM_CSUM;
2851 sop->offload_pos = start + m->m_pkthdr.csum_data;
2854 /* Finally, change the ownership. */
2855 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2858 txq->vxtxq_ts->npending += nsegs;
2859 if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2860 txq->vxtxq_ts->npending = 0;
2861 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2868 #ifdef VMXNET3_LEGACY_TX
2871 vmxnet3_start_locked(struct ifnet *ifp)
2873 struct vmxnet3_softc *sc;
2874 struct vmxnet3_txqueue *txq;
2875 struct vmxnet3_txring *txr;
2876 struct mbuf *m_head;
2880 txq = &sc->vmx_txq[0];
2881 txr = &txq->vxtxq_cmd_ring;
2884 VMXNET3_TXQ_LOCK_ASSERT(txq);
2886 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2887 sc->vmx_link_active == 0)
2890 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2891 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2894 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2898 /* Assume worse case if this mbuf is the head of a chain. */
2899 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2900 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2904 if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2906 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2911 ETHER_BPF_MTAP(ifp, m_head);
2915 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2919 vmxnet3_start(struct ifnet *ifp)
2921 struct vmxnet3_softc *sc;
2922 struct vmxnet3_txqueue *txq;
2925 txq = &sc->vmx_txq[0];
2927 VMXNET3_TXQ_LOCK(txq);
2928 vmxnet3_start_locked(ifp);
2929 VMXNET3_TXQ_UNLOCK(txq);
2932 #else /* !VMXNET3_LEGACY_TX */
2935 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
2937 struct vmxnet3_softc *sc;
2938 struct vmxnet3_txring *txr;
2939 struct buf_ring *br;
2941 int tx, avail, error;
2946 txr = &txq->vxtxq_cmd_ring;
2950 VMXNET3_TXQ_LOCK_ASSERT(txq);
2952 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2953 sc->vmx_link_active == 0) {
2955 error = drbr_enqueue(ifp, br, m);
2960 error = drbr_enqueue(ifp, br, m);
2965 while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
2966 m = drbr_peek(ifp, br);
2970 /* Assume worse case if this mbuf is the head of a chain. */
2971 if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2972 drbr_putback(ifp, br, m);
2976 if (vmxnet3_txq_encap(txq, &m) != 0) {
2978 drbr_putback(ifp, br, m);
2980 drbr_advance(ifp, br);
2983 drbr_advance(ifp, br);
2986 ETHER_BPF_MTAP(ifp, m);
2990 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2996 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2998 struct vmxnet3_softc *sc;
2999 struct vmxnet3_txqueue *txq;
3003 ntxq = sc->vmx_ntxqueues;
3005 if (m->m_flags & M_FLOWID)
3006 i = m->m_pkthdr.flowid % ntxq;
3010 txq = &sc->vmx_txq[i];
3012 if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3013 error = vmxnet3_txq_mq_start_locked(txq, m);
3014 VMXNET3_TXQ_UNLOCK(txq);
3016 error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3017 taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3024 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3026 struct vmxnet3_softc *sc;
3027 struct vmxnet3_txqueue *txq;
3032 VMXNET3_TXQ_LOCK(txq);
3033 if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3034 vmxnet3_txq_mq_start_locked(txq, NULL);
3035 VMXNET3_TXQ_UNLOCK(txq);
3038 #endif /* VMXNET3_LEGACY_TX */
3041 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3043 struct vmxnet3_softc *sc;
3049 #ifdef VMXNET3_LEGACY_TX
3050 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3051 vmxnet3_start_locked(ifp);
3053 if (!drbr_empty(ifp, txq->vxtxq_br))
3054 vmxnet3_txq_mq_start_locked(txq, NULL);
3059 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3061 struct vmxnet3_txqueue *txq;
3064 VMXNET3_CORE_LOCK_ASSERT(sc);
3066 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3067 txq = &sc->vmx_txq[i];
3069 VMXNET3_TXQ_LOCK(txq);
3070 vmxnet3_txq_start(txq);
3071 VMXNET3_TXQ_UNLOCK(txq);
3076 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3082 idx = (tag >> 5) & 0x7F;
3085 if (tag == 0 || tag > 4095)
3088 VMXNET3_CORE_LOCK(sc);
3090 /* Update our private VLAN bitvector. */
3092 sc->vmx_vlan_filter[idx] |= (1 << bit);
3094 sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3096 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3098 sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3100 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3101 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3104 VMXNET3_CORE_UNLOCK(sc);
3108 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3111 if (ifp->if_softc == arg)
3112 vmxnet3_update_vlan_filter(arg, 1, tag);
3116 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3119 if (ifp->if_softc == arg)
3120 vmxnet3_update_vlan_filter(arg, 0, tag);
3124 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3127 struct vmxnet3_driver_shared *ds;
3128 struct ifmultiaddr *ifma;
3134 mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3135 if (ifp->if_flags & IFF_PROMISC)
3136 mode |= VMXNET3_RXMODE_PROMISC;
3137 if (ifp->if_flags & IFF_ALLMULTI)
3138 mode |= VMXNET3_RXMODE_ALLMULTI;
3140 int cnt = 0, overflow = 0;
3142 if_maddr_rlock(ifp);
3143 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3144 if (ifma->ifma_addr->sa_family != AF_LINK)
3146 else if (cnt == VMXNET3_MULTICAST_MAX) {
3151 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3152 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3155 if_maddr_runlock(ifp);
3157 if (overflow != 0) {
3159 mode |= VMXNET3_RXMODE_ALLMULTI;
3161 mode |= VMXNET3_RXMODE_MCAST;
3162 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3167 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3168 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3172 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3178 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3183 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3184 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3185 vmxnet3_init_locked(sc);
3192 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3194 struct vmxnet3_softc *sc;
3196 int reinit, mask, error;
3199 ifr = (struct ifreq *) data;
3204 if (ifp->if_mtu != ifr->ifr_mtu) {
3205 VMXNET3_CORE_LOCK(sc);
3206 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3207 VMXNET3_CORE_UNLOCK(sc);
3212 VMXNET3_CORE_LOCK(sc);
3213 if (ifp->if_flags & IFF_UP) {
3214 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3215 if ((ifp->if_flags ^ sc->vmx_if_flags) &
3216 (IFF_PROMISC | IFF_ALLMULTI)) {
3217 vmxnet3_set_rxfilter(sc);
3220 vmxnet3_init_locked(sc);
3222 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3225 sc->vmx_if_flags = ifp->if_flags;
3226 VMXNET3_CORE_UNLOCK(sc);
3231 VMXNET3_CORE_LOCK(sc);
3232 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3233 vmxnet3_set_rxfilter(sc);
3234 VMXNET3_CORE_UNLOCK(sc);
3239 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3243 VMXNET3_CORE_LOCK(sc);
3244 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3246 if (mask & IFCAP_TXCSUM)
3247 ifp->if_capenable ^= IFCAP_TXCSUM;
3248 if (mask & IFCAP_TXCSUM_IPV6)
3249 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3250 if (mask & IFCAP_TSO4)
3251 ifp->if_capenable ^= IFCAP_TSO4;
3252 if (mask & IFCAP_TSO6)
3253 ifp->if_capenable ^= IFCAP_TSO6;
3255 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
3256 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
3257 /* Changing these features requires us to reinit. */
3260 if (mask & IFCAP_RXCSUM)
3261 ifp->if_capenable ^= IFCAP_RXCSUM;
3262 if (mask & IFCAP_RXCSUM_IPV6)
3263 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3264 if (mask & IFCAP_LRO)
3265 ifp->if_capenable ^= IFCAP_LRO;
3266 if (mask & IFCAP_VLAN_HWTAGGING)
3267 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3268 if (mask & IFCAP_VLAN_HWFILTER)
3269 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3273 if (mask & IFCAP_VLAN_HWTSO)
3274 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3276 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3277 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3278 vmxnet3_init_locked(sc);
3281 VMXNET3_CORE_UNLOCK(sc);
3282 VLAN_CAPABILITIES(ifp);
3286 error = ether_ioctl(ifp, cmd, data);
3290 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3295 #ifndef VMXNET3_LEGACY_TX
3297 vmxnet3_qflush(struct ifnet *ifp)
3299 struct vmxnet3_softc *sc;
3300 struct vmxnet3_txqueue *txq;
3306 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3307 txq = &sc->vmx_txq[i];
3309 VMXNET3_TXQ_LOCK(txq);
3310 while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3312 VMXNET3_TXQ_UNLOCK(txq);
3320 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3322 struct vmxnet3_softc *sc;
3326 VMXNET3_TXQ_LOCK(txq);
3327 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3328 VMXNET3_TXQ_UNLOCK(txq);
3331 VMXNET3_TXQ_UNLOCK(txq);
3333 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3339 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3342 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3346 vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *txq,
3347 struct vmxnet3_txq_stats *accum)
3349 struct vmxnet3_txq_stats *st;
3351 st = &txq->vxtxq_stats;
3353 accum->vmtxs_opackets += st->vmtxs_opackets;
3354 accum->vmtxs_obytes += st->vmtxs_obytes;
3355 accum->vmtxs_omcasts += st->vmtxs_omcasts;
3356 accum->vmtxs_csum += st->vmtxs_csum;
3357 accum->vmtxs_tso += st->vmtxs_tso;
3358 accum->vmtxs_full += st->vmtxs_full;
3359 accum->vmtxs_offload_failed += st->vmtxs_offload_failed;
3363 vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *rxq,
3364 struct vmxnet3_rxq_stats *accum)
3366 struct vmxnet3_rxq_stats *st;
3368 st = &rxq->vxrxq_stats;
3370 accum->vmrxs_ipackets += st->vmrxs_ipackets;
3371 accum->vmrxs_ibytes += st->vmrxs_ibytes;
3372 accum->vmrxs_iqdrops += st->vmrxs_iqdrops;
3373 accum->vmrxs_ierrors += st->vmrxs_ierrors;
3377 vmxnet3_accumulate_stats(struct vmxnet3_softc *sc)
3380 struct vmxnet3_statistics *st;
3381 struct vmxnet3_txq_stats txaccum;
3382 struct vmxnet3_rxq_stats rxaccum;
3386 st = &sc->vmx_stats;
3388 bzero(&txaccum, sizeof(struct vmxnet3_txq_stats));
3389 bzero(&rxaccum, sizeof(struct vmxnet3_rxq_stats));
3391 for (i = 0; i < sc->vmx_ntxqueues; i++)
3392 vmxnet3_txq_accum_stats(&sc->vmx_txq[i], &txaccum);
3393 for (i = 0; i < sc->vmx_nrxqueues; i++)
3394 vmxnet3_rxq_accum_stats(&sc->vmx_rxq[i], &rxaccum);
3397 * With the exception of if_ierrors, these ifnet statistics are
3398 * only updated in the driver, so just set them to our accumulated
3399 * values. if_ierrors is updated in ether_input() for malformed
3400 * frames that we should have already discarded.
3402 ifp->if_ipackets = rxaccum.vmrxs_ipackets;
3403 ifp->if_iqdrops = rxaccum.vmrxs_iqdrops;
3404 ifp->if_ierrors = rxaccum.vmrxs_ierrors;
3405 ifp->if_opackets = txaccum.vmtxs_opackets;
3406 #ifndef VMXNET3_LEGACY_TX
3407 ifp->if_obytes = txaccum.vmtxs_obytes;
3408 ifp->if_omcasts = txaccum.vmtxs_omcasts;
3413 vmxnet3_tick(void *xsc)
3415 struct vmxnet3_softc *sc;
3423 VMXNET3_CORE_LOCK_ASSERT(sc);
3425 vmxnet3_accumulate_stats(sc);
3426 vmxnet3_refresh_host_stats(sc);
3428 for (i = 0; i < sc->vmx_ntxqueues; i++)
3429 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3431 if (timedout != 0) {
3432 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3433 vmxnet3_init_locked(sc);
3435 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3439 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3443 /* Also update the link speed while here. */
3444 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3445 sc->vmx_link_speed = status >> 16;
3446 return !!(status & 0x1);
3450 vmxnet3_link_status(struct vmxnet3_softc *sc)
3456 link = vmxnet3_link_is_up(sc);
3458 if (link != 0 && sc->vmx_link_active == 0) {
3459 sc->vmx_link_active = 1;
3460 if_link_state_change(ifp, LINK_STATE_UP);
3461 } else if (link == 0 && sc->vmx_link_active != 0) {
3462 sc->vmx_link_active = 0;
3463 if_link_state_change(ifp, LINK_STATE_DOWN);
3468 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3470 struct vmxnet3_softc *sc;
3474 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
3475 ifmr->ifm_status = IFM_AVALID;
3477 VMXNET3_CORE_LOCK(sc);
3478 if (vmxnet3_link_is_up(sc) != 0)
3479 ifmr->ifm_status |= IFM_ACTIVE;
3481 ifmr->ifm_status |= IFM_NONE;
3482 VMXNET3_CORE_UNLOCK(sc);
3486 vmxnet3_media_change(struct ifnet *ifp)
3494 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3498 ml = sc->vmx_lladdr[0];
3499 ml |= sc->vmx_lladdr[1] << 8;
3500 ml |= sc->vmx_lladdr[2] << 16;
3501 ml |= sc->vmx_lladdr[3] << 24;
3502 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3504 mh = sc->vmx_lladdr[4];
3505 mh |= sc->vmx_lladdr[5] << 8;
3506 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3510 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3514 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3515 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3517 sc->vmx_lladdr[0] = ml;
3518 sc->vmx_lladdr[1] = ml >> 8;
3519 sc->vmx_lladdr[2] = ml >> 16;
3520 sc->vmx_lladdr[3] = ml >> 24;
3521 sc->vmx_lladdr[4] = mh;
3522 sc->vmx_lladdr[5] = mh >> 8;
3526 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3527 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3529 struct sysctl_oid *node, *txsnode;
3530 struct sysctl_oid_list *list, *txslist;
3531 struct vmxnet3_txq_stats *stats;
3532 struct UPT1_TxStats *txstats;
3535 stats = &txq->vxtxq_stats;
3536 txstats = &txq->vxtxq_ts->stats;
3538 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3539 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3540 NULL, "Transmit Queue");
3541 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3543 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3544 &stats->vmtxs_opackets, "Transmit packets");
3545 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3546 &stats->vmtxs_obytes, "Transmit bytes");
3547 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3548 &stats->vmtxs_omcasts, "Transmit multicasts");
3549 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3550 &stats->vmtxs_csum, "Transmit checksum offloaded");
3551 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3552 &stats->vmtxs_tso, "Transmit TCP segmentation offloaded");
3553 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3554 &stats->vmtxs_full, "Transmit ring full");
3555 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3556 &stats->vmtxs_offload_failed, "Transmit checksum offload failed");
3559 * Add statistics reported by the host. These are updated once
3562 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3563 NULL, "Host Statistics");
3564 txslist = SYSCTL_CHILDREN(txsnode);
3565 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3566 &txstats->TSO_packets, "TSO packets");
3567 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3568 &txstats->TSO_bytes, "TSO bytes");
3569 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3570 &txstats->ucast_packets, "Unicast packets");
3571 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3572 &txstats->ucast_bytes, "Unicast bytes");
3573 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3574 &txstats->mcast_packets, "Multicast packets");
3575 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3576 &txstats->mcast_bytes, "Multicast bytes");
3577 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3578 &txstats->error, "Errors");
3579 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3580 &txstats->discard, "Discards");
3584 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3585 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3587 struct sysctl_oid *node, *rxsnode;
3588 struct sysctl_oid_list *list, *rxslist;
3589 struct vmxnet3_rxq_stats *stats;
3590 struct UPT1_RxStats *rxstats;
3593 stats = &rxq->vxrxq_stats;
3594 rxstats = &rxq->vxrxq_rs->stats;
3596 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3597 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3598 NULL, "Receive Queue");
3599 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3601 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3602 &stats->vmrxs_ipackets, "Receive packets");
3603 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3604 &stats->vmrxs_ibytes, "Receive bytes");
3605 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3606 &stats->vmrxs_iqdrops, "Receive drops");
3607 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3608 &stats->vmrxs_ierrors, "Receive errors");
3611 * Add statistics reported by the host. These are updated once
3614 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3615 NULL, "Host Statistics");
3616 rxslist = SYSCTL_CHILDREN(rxsnode);
3617 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3618 &rxstats->LRO_packets, "LRO packets");
3619 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3620 &rxstats->LRO_bytes, "LRO bytes");
3621 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3622 &rxstats->ucast_packets, "Unicast packets");
3623 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3624 &rxstats->ucast_bytes, "Unicast bytes");
3625 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3626 &rxstats->mcast_packets, "Multicast packets");
3627 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3628 &rxstats->mcast_bytes, "Multicast bytes");
3629 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3630 &rxstats->bcast_packets, "Broadcast packets");
3631 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3632 &rxstats->bcast_bytes, "Broadcast bytes");
3633 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3634 &rxstats->nobuffer, "No buffer");
3635 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3636 &rxstats->error, "Errors");
3640 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3641 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3643 struct sysctl_oid *node;
3644 struct sysctl_oid_list *list;
3647 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3648 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3650 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3651 "debug", CTLFLAG_RD, NULL, "");
3652 list = SYSCTL_CHILDREN(node);
3654 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3655 &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3656 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3657 &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3658 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3659 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3660 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3661 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3662 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3663 &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3664 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3665 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3666 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3667 &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3670 for (i = 0; i < sc->vmx_nrxqueues; i++) {
3671 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3673 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3674 "debug", CTLFLAG_RD, NULL, "");
3675 list = SYSCTL_CHILDREN(node);
3677 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3678 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3679 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3680 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3681 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3682 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3683 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3684 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3685 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3686 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3687 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3688 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3689 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3690 &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3691 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3692 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3693 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3694 &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3699 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3700 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3704 for (i = 0; i < sc->vmx_ntxqueues; i++)
3705 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3706 for (i = 0; i < sc->vmx_nrxqueues; i++)
3707 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3709 vmxnet3_setup_debug_sysctl(sc, ctx, child);
3713 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3716 struct vmxnet3_statistics *stats;
3717 struct sysctl_ctx_list *ctx;
3718 struct sysctl_oid *tree;
3719 struct sysctl_oid_list *child;
3722 ctx = device_get_sysctl_ctx(dev);
3723 tree = device_get_sysctl_tree(dev);
3724 child = SYSCTL_CHILDREN(tree);
3726 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3727 &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3728 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3729 &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3730 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3731 &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3732 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3733 &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3735 stats = &sc->vmx_stats;
3736 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3737 &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3738 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3739 &stats->vmst_defrag_failed, 0,
3740 "Tx mbuf dropped because defrag failed");
3741 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3742 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3743 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3744 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3746 vmxnet3_setup_queue_sysctl(sc, ctx, child);
3750 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3753 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3757 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3760 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3764 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3767 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3771 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3774 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3778 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3781 vmxnet3_write_cmd(sc, cmd);
3782 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3783 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3784 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3788 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3791 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3795 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3798 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3802 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3806 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3807 for (i = 0; i < sc->vmx_nintrs; i++)
3808 vmxnet3_enable_intr(sc, i);
3812 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3816 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3817 for (i = 0; i < sc->vmx_nintrs; i++)
3818 vmxnet3_disable_intr(sc, i);
3822 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3824 bus_addr_t *baddr = arg;
3827 *baddr = segs->ds_addr;
3831 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3832 struct vmxnet3_dma_alloc *dma)
3838 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3840 error = bus_dma_tag_create(bus_get_dma_tag(dev),
3841 align, 0, /* alignment, bounds */
3842 BUS_SPACE_MAXADDR, /* lowaddr */
3843 BUS_SPACE_MAXADDR, /* highaddr */
3844 NULL, NULL, /* filter, filterarg */
3847 size, /* maxsegsize */
3848 BUS_DMA_ALLOCNOW, /* flags */
3849 NULL, /* lockfunc */
3850 NULL, /* lockfuncarg */
3853 device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3857 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3858 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3860 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3864 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3865 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3867 device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3871 dma->dma_size = size;
3875 vmxnet3_dma_free(sc, dma);
3881 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3884 if (dma->dma_tag != NULL) {
3885 if (dma->dma_map != NULL) {
3886 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3887 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3888 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3891 if (dma->dma_vaddr != NULL) {
3892 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3896 bus_dma_tag_destroy(dma->dma_tag);
3898 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3902 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3906 snprintf(path, sizeof(path),
3907 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3908 TUNABLE_INT_FETCH(path, &def);
3914 * Since this is a purely paravirtualized device, we do not have
3915 * to worry about DMA coherency. But at times, we must make sure
3916 * both the compiler and CPU do not reorder memory operations.
3919 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3923 case VMXNET3_BARRIER_RD:
3926 case VMXNET3_BARRIER_WR:
3929 case VMXNET3_BARRIER_RDWR:
3933 panic("%s: bad barrier type %d", __func__, type);