2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/endian.h>
29 #include <sys/sockio.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
38 #include <net/ethernet.h>
40 #include <net/if_arp.h>
41 #include <net/if_dl.h>
42 #include <net/if_types.h>
43 #include <net/if_media.h>
44 #include <net/if_vlan_var.h>
48 #include <netinet/in_systm.h>
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/udp.h>
54 #include <netinet/tcp.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include "if_vmxreg.h"
65 #include "if_vmxvar.h"
68 #include "opt_inet6.h"
70 /* Always enable for now - useful for queue hangs. */
71 #define VMXNET3_DEBUG_SYSCTL
73 #ifdef VMXNET3_FAILPOINTS
75 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
76 "vmxnet3 fail points");
77 #define VMXNET3_FP _debug_fail_point_vmxnet3
80 static int vmxnet3_probe(device_t);
81 static int vmxnet3_attach(device_t);
82 static int vmxnet3_detach(device_t);
83 static int vmxnet3_shutdown(device_t);
85 static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
86 static void vmxnet3_free_resources(struct vmxnet3_softc *);
87 static int vmxnet3_check_version(struct vmxnet3_softc *);
88 static void vmxnet3_initial_config(struct vmxnet3_softc *);
90 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
91 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
92 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
93 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
94 struct vmxnet3_interrupt *);
95 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
96 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
97 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
98 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
99 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
101 static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
102 struct vmxnet3_interrupt *);
103 static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
105 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
106 static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
107 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
108 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
109 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
110 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
112 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
113 static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
114 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
115 static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
116 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
117 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
118 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
119 static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
120 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
121 static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
122 static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
123 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
124 static int vmxnet3_alloc_data(struct vmxnet3_softc *);
125 static void vmxnet3_free_data(struct vmxnet3_softc *);
126 static int vmxnet3_setup_interface(struct vmxnet3_softc *);
128 static void vmxnet3_evintr(struct vmxnet3_softc *);
129 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
130 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
131 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
132 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
133 struct vmxnet3_rxring *, int);
134 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
135 static void vmxnet3_legacy_intr(void *);
136 static void vmxnet3_txq_intr(void *);
137 static void vmxnet3_rxq_intr(void *);
138 static void vmxnet3_event_intr(void *);
140 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
141 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
142 static void vmxnet3_stop(struct vmxnet3_softc *);
144 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
145 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
146 static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
147 static int vmxnet3_enable_device(struct vmxnet3_softc *);
148 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
149 static int vmxnet3_reinit(struct vmxnet3_softc *);
150 static void vmxnet3_init_locked(struct vmxnet3_softc *);
151 static void vmxnet3_init(void *);
153 static int vmxnet3_txq_offload_ctx(struct mbuf *, int *, int *, int *);
154 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
155 bus_dmamap_t, bus_dma_segment_t [], int *);
156 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
157 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
158 static void vmxnet3_start_locked(struct ifnet *);
159 static void vmxnet3_start(struct ifnet *);
161 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
163 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
164 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
165 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
166 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
167 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
169 static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
170 static void vmxnet3_tick(void *);
171 static void vmxnet3_link_status(struct vmxnet3_softc *);
172 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
173 static int vmxnet3_media_change(struct ifnet *);
174 static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
175 static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
177 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
178 struct sysctl_ctx_list *, struct sysctl_oid_list *);
179 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
180 struct sysctl_ctx_list *, struct sysctl_oid_list *);
181 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
182 struct sysctl_ctx_list *, struct sysctl_oid_list *);
183 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
185 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
187 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
188 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
190 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
191 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
193 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
194 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
195 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
196 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
198 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
199 bus_size_t, struct vmxnet3_dma_alloc *);
200 static void vmxnet3_dma_free(struct vmxnet3_softc *,
201 struct vmxnet3_dma_alloc *);
202 static int vmxnet3_tunable_int(struct vmxnet3_softc *,
208 VMXNET3_BARRIER_RDWR,
211 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
214 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
215 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
216 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
217 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
219 static device_method_t vmxnet3_methods[] = {
220 /* Device interface. */
221 DEVMETHOD(device_probe, vmxnet3_probe),
222 DEVMETHOD(device_attach, vmxnet3_attach),
223 DEVMETHOD(device_detach, vmxnet3_detach),
224 DEVMETHOD(device_shutdown, vmxnet3_shutdown),
229 static driver_t vmxnet3_driver = {
230 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
233 static devclass_t vmxnet3_devclass;
234 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
236 MODULE_DEPEND(vmx, pci, 1, 1, 1);
237 MODULE_DEPEND(vmx, ether, 1, 1, 1);
239 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD
240 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0
243 vmxnet3_probe(device_t dev)
246 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
247 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
248 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
249 return (BUS_PROBE_DEFAULT);
256 vmxnet3_attach(device_t dev)
258 struct vmxnet3_softc *sc;
261 sc = device_get_softc(dev);
264 pci_enable_busmaster(dev);
266 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
267 callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
269 vmxnet3_initial_config(sc);
271 error = vmxnet3_alloc_resources(sc);
275 error = vmxnet3_check_version(sc);
279 error = vmxnet3_alloc_rxtx_queues(sc);
283 error = vmxnet3_alloc_interrupts(sc);
287 error = vmxnet3_alloc_data(sc);
291 error = vmxnet3_setup_interface(sc);
295 error = vmxnet3_setup_interrupts(sc);
297 ether_ifdetach(sc->vmx_ifp);
298 device_printf(dev, "could not set up interrupt\n");
302 vmxnet3_setup_sysctl(sc);
303 vmxnet3_link_status(sc);
313 vmxnet3_detach(device_t dev)
315 struct vmxnet3_softc *sc;
318 sc = device_get_softc(dev);
321 if (device_is_attached(dev)) {
323 VMXNET3_CORE_LOCK(sc);
325 VMXNET3_CORE_UNLOCK(sc);
326 callout_drain(&sc->vmx_tick);
329 if (sc->vmx_vlan_attach != NULL) {
330 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
331 sc->vmx_vlan_attach = NULL;
333 if (sc->vmx_vlan_detach != NULL) {
334 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
335 sc->vmx_vlan_detach = NULL;
338 vmxnet3_free_interrupts(sc);
345 ifmedia_removeall(&sc->vmx_media);
347 vmxnet3_free_data(sc);
348 vmxnet3_free_resources(sc);
349 vmxnet3_free_rxtx_queues(sc);
351 VMXNET3_CORE_LOCK_DESTROY(sc);
357 vmxnet3_shutdown(device_t dev)
364 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
372 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
374 if (sc->vmx_res0 == NULL) {
376 "could not map BAR0 memory\n");
380 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
381 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
384 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
386 if (sc->vmx_res1 == NULL) {
388 "could not map BAR1 memory\n");
392 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
393 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
395 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
397 sc->vmx_msix_res = bus_alloc_resource_any(dev,
398 SYS_RES_MEMORY, &rid, RF_ACTIVE);
401 if (sc->vmx_msix_res == NULL)
402 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
408 vmxnet3_free_resources(struct vmxnet3_softc *sc)
415 if (sc->vmx_res0 != NULL) {
417 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
421 if (sc->vmx_res1 != NULL) {
423 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
427 if (sc->vmx_msix_res != NULL) {
429 bus_release_resource(dev, SYS_RES_MEMORY, rid,
431 sc->vmx_msix_res = NULL;
436 vmxnet3_check_version(struct vmxnet3_softc *sc)
443 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
444 if ((version & 0x01) == 0) {
445 device_printf(dev, "unsupported hardware version %#x\n",
449 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
451 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
452 if ((version & 0x01) == 0) {
453 device_printf(dev, "unsupported UPT version %#x\n", version);
456 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
462 vmxnet3_initial_config(struct vmxnet3_softc *sc)
467 * BMV Much of the work is already done, but this driver does
468 * not support multiqueue yet.
470 sc->vmx_ntxqueues = VMXNET3_TX_QUEUES;
471 sc->vmx_nrxqueues = VMXNET3_RX_QUEUES;
473 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
474 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
475 ndesc = VMXNET3_DEF_TX_NDESC;
476 if (ndesc & VMXNET3_MASK_TX_NDESC)
477 ndesc &= ~VMXNET3_MASK_TX_NDESC;
478 sc->vmx_ntxdescs = ndesc;
480 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
481 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
482 ndesc = VMXNET3_DEF_RX_NDESC;
483 if (ndesc & VMXNET3_MASK_RX_NDESC)
484 ndesc &= ~VMXNET3_MASK_RX_NDESC;
485 sc->vmx_nrxdescs = ndesc;
486 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
490 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
493 int nmsix, cnt, required;
497 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
500 /* Allocate an additional vector for the events interrupt. */
501 required = sc->vmx_nrxqueues + sc->vmx_ntxqueues + 1;
503 nmsix = pci_msix_count(dev);
504 if (nmsix < required)
508 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
509 sc->vmx_nintrs = required;
512 pci_release_msi(dev);
518 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
521 int nmsi, cnt, required;
526 nmsi = pci_msi_count(dev);
531 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
535 pci_release_msi(dev);
541 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
549 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
550 struct vmxnet3_interrupt *intr)
552 struct resource *irq;
554 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
558 intr->vmxi_irq = irq;
559 intr->vmxi_rid = rid;
565 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
567 int i, rid, flags, error;
572 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
573 flags |= RF_SHAREABLE;
577 for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
578 error = vmxnet3_alloc_interrupt(sc, rid, flags,
588 * NOTE: We only support the simple case of each Rx and Tx queue on its
589 * own MSIX vector. This is good enough until we support mulitqueue.
592 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
595 struct vmxnet3_txqueue *txq;
596 struct vmxnet3_rxqueue *rxq;
597 struct vmxnet3_interrupt *intr;
602 intr = &sc->vmx_intrs[0];
603 type = INTR_TYPE_NET | INTR_MPSAFE;
605 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
606 txq = &sc->vmx_txq[i];
607 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
608 vmxnet3_txq_intr, txq, &intr->vmxi_handler);
611 txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
614 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
615 rxq = &sc->vmx_rxq[i];
616 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
617 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
620 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
623 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
624 vmxnet3_event_intr, sc, &intr->vmxi_handler);
627 sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
633 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
635 struct vmxnet3_interrupt *intr;
638 intr = &sc->vmx_intrs[0];
639 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
640 INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
641 &intr->vmxi_handler);
643 for (i = 0; i < sc->vmx_ntxqueues; i++)
644 sc->vmx_txq[i].vxtxq_intr_idx = 0;
645 for (i = 0; i < sc->vmx_nrxqueues; i++)
646 sc->vmx_rxq[i].vxrxq_intr_idx = 0;
647 sc->vmx_event_intr_idx = 0;
653 * XXX BMV Should probably reorganize the attach and just do
654 * this in vmxnet3_init_shared_data().
657 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
659 struct vmxnet3_txqueue *txq;
660 struct vmxnet3_txq_shared *txs;
661 struct vmxnet3_rxqueue *rxq;
662 struct vmxnet3_rxq_shared *rxs;
665 sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
667 for (i = 0; i < sc->vmx_ntxqueues; i++) {
668 txq = &sc->vmx_txq[i];
670 txs->intr_idx = txq->vxtxq_intr_idx;
673 for (i = 0; i < sc->vmx_nrxqueues; i++) {
674 rxq = &sc->vmx_rxq[i];
676 rxs->intr_idx = rxq->vxrxq_intr_idx;
681 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
685 error = vmxnet3_alloc_intr_resources(sc);
689 switch (sc->vmx_intr_type) {
690 case VMXNET3_IT_MSIX:
691 error = vmxnet3_setup_msix_interrupts(sc);
694 case VMXNET3_IT_LEGACY:
695 error = vmxnet3_setup_legacy_interrupt(sc);
698 panic("%s: invalid interrupt type %d", __func__,
703 vmxnet3_set_interrupt_idx(sc);
709 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
716 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
718 sc->vmx_intr_type = config & 0x03;
719 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
721 switch (sc->vmx_intr_type) {
722 case VMXNET3_IT_AUTO:
723 sc->vmx_intr_type = VMXNET3_IT_MSIX;
725 case VMXNET3_IT_MSIX:
726 error = vmxnet3_alloc_msix_interrupts(sc);
729 sc->vmx_intr_type = VMXNET3_IT_MSI;
732 error = vmxnet3_alloc_msi_interrupts(sc);
735 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
737 case VMXNET3_IT_LEGACY:
738 error = vmxnet3_alloc_legacy_interrupts(sc);
743 sc->vmx_intr_type = -1;
744 device_printf(dev, "cannot allocate any interrupt resources\n");
752 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
753 struct vmxnet3_interrupt *intr)
759 if (intr->vmxi_handler != NULL) {
760 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
761 intr->vmxi_handler = NULL;
764 if (intr->vmxi_irq != NULL) {
765 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
767 intr->vmxi_irq = NULL;
773 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
777 for (i = 0; i < sc->vmx_nintrs; i++)
778 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
780 if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
781 sc->vmx_intr_type == VMXNET3_IT_MSIX)
782 pci_release_msi(sc->vmx_dev);
786 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
788 struct vmxnet3_rxqueue *rxq;
789 struct vmxnet3_rxring *rxr;
792 rxq = &sc->vmx_rxq[q];
794 snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
795 device_get_nameunit(sc->vmx_dev), q);
796 mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
801 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
802 rxr = &rxq->vxrxq_cmd_ring[i];
804 rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
805 rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
806 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
807 if (rxr->vxrxr_rxbuf == NULL)
810 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
817 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
819 struct vmxnet3_txqueue *txq;
820 struct vmxnet3_txring *txr;
822 txq = &sc->vmx_txq[q];
823 txr = &txq->vxtxq_cmd_ring;
825 snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
826 device_get_nameunit(sc->vmx_dev), q);
827 mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
832 txr->vxtxr_ndesc = sc->vmx_ntxdescs;
833 txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
834 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
835 if (txr->vxtxr_txbuf == NULL)
838 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
844 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
848 sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
849 sc->vmx_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
850 sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
851 sc->vmx_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
852 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
855 for (i = 0; i < sc->vmx_nrxqueues; i++) {
856 error = vmxnet3_init_rxq(sc, i);
861 for (i = 0; i < sc->vmx_ntxqueues; i++) {
862 error = vmxnet3_init_txq(sc, i);
871 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
873 struct vmxnet3_rxring *rxr;
876 rxq->vxrxq_sc = NULL;
879 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
880 rxr = &rxq->vxrxq_cmd_ring[i];
882 if (rxr->vxrxr_rxbuf != NULL) {
883 free(rxr->vxrxr_rxbuf, M_DEVBUF);
884 rxr->vxrxr_rxbuf = NULL;
888 if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
889 mtx_destroy(&rxq->vxrxq_mtx);
893 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
895 struct vmxnet3_txring *txr;
897 txr = &txq->vxtxq_cmd_ring;
899 txq->vxtxq_sc = NULL;
902 if (txr->vxtxr_txbuf != NULL) {
903 free(txr->vxtxr_txbuf, M_DEVBUF);
904 txr->vxtxr_txbuf = NULL;
907 if (mtx_initialized(&txq->vxtxq_mtx) != 0)
908 mtx_destroy(&txq->vxtxq_mtx);
912 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
916 if (sc->vmx_rxq != NULL) {
917 for (i = 0; i < sc->vmx_nrxqueues; i++)
918 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
919 free(sc->vmx_rxq, M_DEVBUF);
923 if (sc->vmx_txq != NULL) {
924 for (i = 0; i < sc->vmx_ntxqueues; i++)
925 vmxnet3_destroy_txq(&sc->vmx_txq[i]);
926 free(sc->vmx_txq, M_DEVBUF);
932 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
941 size = sizeof(struct vmxnet3_driver_shared);
942 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
944 device_printf(dev, "cannot alloc shared memory\n");
947 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
949 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
950 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
951 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
953 device_printf(dev, "cannot alloc queue shared memory\n");
956 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
959 for (i = 0; i < sc->vmx_ntxqueues; i++) {
960 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
961 kva += sizeof(struct vmxnet3_txq_shared);
963 for (i = 0; i < sc->vmx_nrxqueues; i++) {
964 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
965 kva += sizeof(struct vmxnet3_rxq_shared);
972 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
975 if (sc->vmx_qs != NULL) {
976 vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
980 if (sc->vmx_ds != NULL) {
981 vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
987 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
990 struct vmxnet3_txqueue *txq;
991 struct vmxnet3_txring *txr;
992 struct vmxnet3_comp_ring *txc;
993 size_t descsz, compsz;
998 for (q = 0; q < sc->vmx_ntxqueues; q++) {
999 txq = &sc->vmx_txq[q];
1000 txr = &txq->vxtxq_cmd_ring;
1001 txc = &txq->vxtxq_comp_ring;
1003 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1004 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1006 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1007 1, 0, /* alignment, boundary */
1008 BUS_SPACE_MAXADDR, /* lowaddr */
1009 BUS_SPACE_MAXADDR, /* highaddr */
1010 NULL, NULL, /* filter, filterarg */
1011 VMXNET3_TSO_MAXSIZE, /* maxsize */
1012 VMXNET3_TX_MAXSEGS, /* nsegments */
1013 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
1015 NULL, NULL, /* lockfunc, lockarg */
1019 "unable to create Tx buffer tag for queue %d\n", q);
1023 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1025 device_printf(dev, "cannot alloc Tx descriptors for "
1026 "queue %d error %d\n", q, error);
1030 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1032 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1034 device_printf(dev, "cannot alloc Tx comp descriptors "
1035 "for queue %d error %d\n", q, error);
1039 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1041 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1042 error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1043 &txr->vxtxr_txbuf[i].vtxb_dmamap);
1045 device_printf(dev, "unable to create Tx buf "
1046 "dmamap for queue %d idx %d\n", q, i);
1056 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1059 struct vmxnet3_txqueue *txq;
1060 struct vmxnet3_txring *txr;
1061 struct vmxnet3_comp_ring *txc;
1062 struct vmxnet3_txbuf *txb;
1067 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1068 txq = &sc->vmx_txq[q];
1069 txr = &txq->vxtxq_cmd_ring;
1070 txc = &txq->vxtxq_comp_ring;
1072 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1073 txb = &txr->vxtxr_txbuf[i];
1074 if (txb->vtxb_dmamap != NULL) {
1075 bus_dmamap_destroy(txr->vxtxr_txtag,
1077 txb->vtxb_dmamap = NULL;
1081 if (txc->vxcr_u.txcd != NULL) {
1082 vmxnet3_dma_free(sc, &txc->vxcr_dma);
1083 txc->vxcr_u.txcd = NULL;
1086 if (txr->vxtxr_txd != NULL) {
1087 vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1088 txr->vxtxr_txd = NULL;
1091 if (txr->vxtxr_txtag != NULL) {
1092 bus_dma_tag_destroy(txr->vxtxr_txtag);
1093 txr->vxtxr_txtag = NULL;
1099 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1102 struct vmxnet3_rxqueue *rxq;
1103 struct vmxnet3_rxring *rxr;
1104 struct vmxnet3_comp_ring *rxc;
1110 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1111 rxq = &sc->vmx_rxq[q];
1112 rxc = &rxq->vxrxq_comp_ring;
1115 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1116 rxr = &rxq->vxrxq_cmd_ring[i];
1118 descsz = rxr->vxrxr_ndesc *
1119 sizeof(struct vmxnet3_rxdesc);
1120 compsz += rxr->vxrxr_ndesc *
1121 sizeof(struct vmxnet3_rxcompdesc);
1123 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1124 1, 0, /* alignment, boundary */
1125 BUS_SPACE_MAXADDR, /* lowaddr */
1126 BUS_SPACE_MAXADDR, /* highaddr */
1127 NULL, NULL, /* filter, filterarg */
1128 MJUMPAGESIZE, /* maxsize */
1130 MJUMPAGESIZE, /* maxsegsize */
1132 NULL, NULL, /* lockfunc, lockarg */
1136 "unable to create Rx buffer tag for "
1141 error = vmxnet3_dma_malloc(sc, descsz, 512,
1144 device_printf(dev, "cannot allocate Rx "
1145 "descriptors for queue %d/%d error %d\n",
1150 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1153 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1155 device_printf(dev, "cannot alloc Rx comp descriptors "
1156 "for queue %d error %d\n", q, error);
1160 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1162 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1163 rxr = &rxq->vxrxq_cmd_ring[i];
1165 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1166 &rxr->vxrxr_spare_dmap);
1168 device_printf(dev, "unable to create spare "
1169 "dmamap for queue %d/%d error %d\n",
1174 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1175 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1176 &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1178 device_printf(dev, "unable to create "
1179 "dmamap for queue %d/%d slot %d "
1192 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1195 struct vmxnet3_rxqueue *rxq;
1196 struct vmxnet3_rxring *rxr;
1197 struct vmxnet3_comp_ring *rxc;
1198 struct vmxnet3_rxbuf *rxb;
1203 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1204 rxq = &sc->vmx_rxq[q];
1205 rxc = &rxq->vxrxq_comp_ring;
1207 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1208 rxr = &rxq->vxrxq_cmd_ring[i];
1210 if (rxr->vxrxr_spare_dmap != NULL) {
1211 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1212 rxr->vxrxr_spare_dmap);
1213 rxr->vxrxr_spare_dmap = NULL;
1216 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1217 rxb = &rxr->vxrxr_rxbuf[j];
1218 if (rxb->vrxb_dmamap != NULL) {
1219 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1221 rxb->vrxb_dmamap = NULL;
1226 if (rxc->vxcr_u.rxcd != NULL) {
1227 vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1228 rxc->vxcr_u.rxcd = NULL;
1231 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1232 rxr = &rxq->vxrxq_cmd_ring[i];
1234 if (rxr->vxrxr_rxd != NULL) {
1235 vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1236 rxr->vxrxr_rxd = NULL;
1239 if (rxr->vxrxr_rxtag != NULL) {
1240 bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1241 rxr->vxrxr_rxtag = NULL;
1248 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1252 error = vmxnet3_alloc_txq_data(sc);
1256 error = vmxnet3_alloc_rxq_data(sc);
1264 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1267 if (sc->vmx_rxq != NULL)
1268 vmxnet3_free_rxq_data(sc);
1270 if (sc->vmx_txq != NULL)
1271 vmxnet3_free_txq_data(sc);
1275 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1279 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1280 32, &sc->vmx_mcast_dma);
1282 device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1284 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1290 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1293 if (sc->vmx_mcast != NULL) {
1294 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1295 sc->vmx_mcast = NULL;
1300 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1302 struct vmxnet3_driver_shared *ds;
1303 struct vmxnet3_txqueue *txq;
1304 struct vmxnet3_txq_shared *txs;
1305 struct vmxnet3_rxqueue *rxq;
1306 struct vmxnet3_rxq_shared *rxs;
1312 * Initialize fields of the shared data that remains the same across
1313 * reinits. Note the shared data is zero'd when allocated.
1316 ds->magic = VMXNET3_REV1_MAGIC;
1319 ds->version = VMXNET3_DRIVER_VERSION;
1320 ds->guest = VMXNET3_GOS_FREEBSD |
1326 ds->vmxnet3_revision = 1;
1327 ds->upt_version = 1;
1330 ds->driver_data = vtophys(sc);
1331 ds->driver_data_len = sizeof(struct vmxnet3_softc);
1332 ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1333 ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1334 ds->nrxsg_max = sc->vmx_max_rxsegs;
1336 /* Interrupt control. */
1337 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1338 ds->nintr = sc->vmx_nintrs;
1339 ds->evintr = sc->vmx_event_intr_idx;
1340 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1342 for (i = 0; i < sc->vmx_nintrs; i++)
1343 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1345 /* Receive filter. */
1346 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1347 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1350 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1351 txq = &sc->vmx_txq[i];
1352 txs = txq->vxtxq_ts;
1354 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1355 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1356 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1357 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1358 txs->driver_data = vtophys(txq);
1359 txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1363 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1364 rxq = &sc->vmx_rxq[i];
1365 rxs = rxq->vxrxq_rs;
1367 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1368 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1369 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1370 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1371 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1372 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1373 rxs->driver_data = vtophys(rxq);
1374 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1379 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1385 /* Use the current MAC address. */
1386 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1387 vmxnet3_set_lladdr(sc);
1389 ifp->if_hwassist = 0;
1390 if (ifp->if_capenable & IFCAP_TXCSUM)
1391 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD;
1392 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1393 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1394 if (ifp->if_capenable & IFCAP_TSO4)
1395 ifp->if_hwassist |= CSUM_TSO;
1396 if (ifp->if_capenable & IFCAP_TSO6)
1397 ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
1401 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1404 struct vmxnet3_driver_shared *ds;
1409 ds->upt_features = 0;
1410 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1411 ds->upt_features |= UPT1_F_CSUM;
1412 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1413 ds->upt_features |= UPT1_F_VLAN;
1414 if (ifp->if_capenable & IFCAP_LRO)
1415 ds->upt_features |= UPT1_F_LRO;
1417 ds->mtu = ifp->if_mtu;
1418 ds->ntxqueue = sc->vmx_ntxqueues;
1419 ds->nrxqueue = sc->vmx_nrxqueues;
1421 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1422 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1423 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1427 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1431 error = vmxnet3_alloc_shared_data(sc);
1435 error = vmxnet3_alloc_queue_data(sc);
1439 error = vmxnet3_alloc_mcast_table(sc);
1443 vmxnet3_init_shared_data(sc);
1449 vmxnet3_free_data(struct vmxnet3_softc *sc)
1452 vmxnet3_free_mcast_table(sc);
1453 vmxnet3_free_queue_data(sc);
1454 vmxnet3_free_shared_data(sc);
1458 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1465 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1467 device_printf(dev, "cannot allocate ifnet structure\n");
1471 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1472 #if __FreeBSD_version < 1000025
1473 ifp->if_baudrate = 1000000000;
1475 if_initbaudrate(ifp, IF_Gbps(10));
1478 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1479 ifp->if_init = vmxnet3_init;
1480 ifp->if_ioctl = vmxnet3_ioctl;
1481 ifp->if_start = vmxnet3_start;
1482 ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1483 IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1484 IFQ_SET_READY(&ifp->if_snd);
1486 vmxnet3_get_lladdr(sc);
1487 ether_ifattach(ifp, sc->vmx_lladdr);
1489 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1490 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1491 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1492 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1494 ifp->if_capenable = ifp->if_capabilities;
1496 /* These capabilities are not enabled by default. */
1497 ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1499 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1500 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1501 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1502 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1504 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1505 vmxnet3_media_status);
1506 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1507 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1513 vmxnet3_evintr(struct vmxnet3_softc *sc)
1517 struct vmxnet3_txq_shared *ts;
1518 struct vmxnet3_rxq_shared *rs;
1526 VMXNET3_CORE_LOCK(sc);
1529 event = sc->vmx_ds->event;
1530 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1532 if (event & VMXNET3_EVENT_LINK)
1533 vmxnet3_link_status(sc);
1535 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1537 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1538 ts = sc->vmx_txq[0].vxtxq_ts;
1539 if (ts->stopped != 0)
1540 device_printf(dev, "Tx queue error %#x\n", ts->error);
1541 rs = sc->vmx_rxq[0].vxrxq_rs;
1542 if (rs->stopped != 0)
1543 device_printf(dev, "Rx queue error %#x\n", rs->error);
1544 device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1547 if (event & VMXNET3_EVENT_DIC)
1548 device_printf(dev, "device implementation change event\n");
1549 if (event & VMXNET3_EVENT_DEBUG)
1550 device_printf(dev, "debug event\n");
1553 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1554 vmxnet3_init_locked(sc);
1557 VMXNET3_CORE_UNLOCK(sc);
1561 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1563 struct vmxnet3_softc *sc;
1565 struct vmxnet3_txring *txr;
1566 struct vmxnet3_comp_ring *txc;
1567 struct vmxnet3_txcompdesc *txcd;
1568 struct vmxnet3_txbuf *txb;
1573 txr = &txq->vxtxq_cmd_ring;
1574 txc = &txq->vxtxq_comp_ring;
1576 VMXNET3_TXQ_LOCK_ASSERT(txq);
1579 txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1580 if (txcd->gen != txc->vxcr_gen)
1582 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1584 if (++txc->vxcr_next == txc->vxcr_ndesc) {
1589 sop = txr->vxtxr_next;
1590 txb = &txr->vxtxr_txbuf[sop];
1592 if (txb->vtxb_m != NULL) {
1593 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1594 BUS_DMASYNC_POSTWRITE);
1595 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1597 m_freem(txb->vtxb_m);
1603 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1606 if (txr->vxtxr_head == txr->vxtxr_next)
1607 txq->vxtxq_watchdog = 0;
1611 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1615 struct vmxnet3_rxdesc *rxd;
1616 struct vmxnet3_rxbuf *rxb;
1619 bus_dma_segment_t segs[1];
1620 int idx, clsize, btype, flags, nsegs, error;
1623 tag = rxr->vxrxr_rxtag;
1624 dmap = rxr->vxrxr_spare_dmap;
1625 idx = rxr->vxrxr_fill;
1626 rxd = &rxr->vxrxr_rxd[idx];
1627 rxb = &rxr->vxrxr_rxbuf[idx];
1629 #ifdef VMXNET3_FAILPOINTS
1630 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1631 if (rxr->vxrxr_rid != 0)
1632 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1635 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1638 btype = VMXNET3_BTYPE_HEAD;
1640 #if __FreeBSD_version < 902001
1642 * These mbufs will never be used for the start of a frame.
1643 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1644 * required the mbuf to always be a packet header. Avoid
1645 * unnecessary mbuf initialization in newer versions where
1646 * that is not the case.
1652 clsize = MJUMPAGESIZE;
1653 btype = VMXNET3_BTYPE_BODY;
1656 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1658 sc->vmx_stats.vmst_mgetcl_failed++;
1662 if (btype == VMXNET3_BTYPE_HEAD) {
1663 m->m_len = m->m_pkthdr.len = clsize;
1664 m_adj(m, ETHER_ALIGN);
1668 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1672 sc->vmx_stats.vmst_mbuf_load_failed++;
1676 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1677 #if __FreeBSD_version < 902001
1678 if (btype == VMXNET3_BTYPE_BODY)
1679 m->m_flags &= ~M_PKTHDR;
1682 if (rxb->vrxb_m != NULL) {
1683 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1684 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1687 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1688 rxb->vrxb_dmamap = dmap;
1691 rxd->addr = segs[0].ds_addr;
1692 rxd->len = segs[0].ds_len;
1694 rxd->gen = rxr->vxrxr_gen;
1696 vmxnet3_rxr_increment_fill(rxr);
1701 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1702 struct vmxnet3_rxring *rxr, int idx)
1704 struct vmxnet3_rxdesc *rxd;
1706 rxd = &rxr->vxrxr_rxd[idx];
1707 rxd->gen = rxr->vxrxr_gen;
1708 vmxnet3_rxr_increment_fill(rxr);
1712 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1714 struct vmxnet3_softc *sc;
1715 struct vmxnet3_rxring *rxr;
1716 struct vmxnet3_comp_ring *rxc;
1717 struct vmxnet3_rxcompdesc *rxcd;
1721 rxc = &rxq->vxrxq_comp_ring;
1724 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1725 if (rxcd->gen != rxc->vxcr_gen)
1726 break; /* Not expected. */
1727 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1729 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1734 idx = rxcd->rxd_idx;
1736 if (rxcd->qid < sc->vmx_nrxqueues)
1737 rxr = &rxq->vxrxq_cmd_ring[0];
1739 rxr = &rxq->vxrxq_cmd_ring[1];
1740 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1745 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
1749 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1750 if (rxcd->ipcsum_ok)
1751 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1754 if (!rxcd->fragment) {
1755 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
1756 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1758 m->m_pkthdr.csum_data = 0xFFFF;
1764 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
1765 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
1767 struct vmxnet3_softc *sc;
1780 vmxnet3_rx_csum(rxcd, m);
1782 m->m_flags |= M_VLANTAG;
1783 m->m_pkthdr.ether_vtag = rxcd->vtag;
1787 VMXNET3_RXQ_UNLOCK(rxq);
1788 (*ifp->if_input)(ifp, m);
1789 VMXNET3_RXQ_LOCK(rxq);
1793 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
1795 struct vmxnet3_softc *sc;
1797 struct vmxnet3_rxring *rxr;
1798 struct vmxnet3_comp_ring *rxc;
1799 struct vmxnet3_rxdesc *rxd;
1800 struct vmxnet3_rxcompdesc *rxcd;
1801 struct mbuf *m, *m_head, *m_tail;
1806 rxc = &rxq->vxrxq_comp_ring;
1807 m_head = m_tail = NULL;
1809 VMXNET3_RXQ_LOCK_ASSERT(rxq);
1811 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1815 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1816 if (rxcd->gen != rxc->vxcr_gen)
1818 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1820 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1825 idx = rxcd->rxd_idx;
1827 if (rxcd->qid < sc->vmx_nrxqueues)
1828 rxr = &rxq->vxrxq_cmd_ring[0];
1830 rxr = &rxq->vxrxq_cmd_ring[1];
1831 rxd = &rxr->vxrxr_rxd[idx];
1833 m = rxr->vxrxr_rxbuf[idx].vrxb_m;
1834 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
1835 __func__, rxcd->qid, idx));
1838 * The host may skip descriptors. We detect this when this
1839 * descriptor does not match the previous fill index. Catch
1840 * up with the host now.
1842 if (__predict_false(rxr->vxrxr_fill != idx)) {
1843 while (rxr->vxrxr_fill != idx) {
1844 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
1846 vmxnet3_rxr_increment_fill(rxr);
1851 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
1852 ("%s: start of frame w/o head buffer", __func__));
1853 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
1854 ("%s: start of frame not in ring 0", __func__));
1855 KASSERT((idx % sc->vmx_rx_max_chain) == 0,
1856 ("%s: start of frame at unexcepted index %d (%d)",
1857 __func__, idx, sc->vmx_rx_max_chain));
1858 KASSERT(m_head == NULL,
1859 ("%s: duplicate start of frame?", __func__));
1862 /* Just ignore this descriptor. */
1863 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1867 if (vmxnet3_newbuf(sc, rxr) != 0) {
1869 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1871 vmxnet3_rxq_discard_chain(rxq);
1875 m->m_pkthdr.rcvif = ifp;
1876 m->m_pkthdr.len = m->m_len = length;
1877 m->m_pkthdr.csum_flags = 0;
1878 m_head = m_tail = m;
1881 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
1882 ("%s: non start of frame w/o body buffer", __func__));
1883 KASSERT(m_head != NULL,
1884 ("%s: frame not started?", __func__));
1886 if (vmxnet3_newbuf(sc, rxr) != 0) {
1888 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1890 vmxnet3_rxq_discard_chain(rxq);
1892 m_head = m_tail = NULL;
1897 m_head->m_pkthdr.len += length;
1903 vmxnet3_rxq_input(rxq, rxcd, m_head);
1904 m_head = m_tail = NULL;
1906 /* Must recheck after dropping the Rx lock. */
1907 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1912 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
1913 int qid = rxcd->qid;
1916 idx = (idx + 1) % rxr->vxrxr_ndesc;
1917 if (qid >= sc->vmx_nrxqueues) {
1918 qid -= sc->vmx_nrxqueues;
1919 r = VMXNET3_BAR0_RXH2(qid);
1921 r = VMXNET3_BAR0_RXH1(qid);
1922 vmxnet3_write_bar0(sc, r, idx);
1928 vmxnet3_legacy_intr(void *xsc)
1930 struct vmxnet3_softc *sc;
1931 struct vmxnet3_rxqueue *rxq;
1932 struct vmxnet3_txqueue *txq;
1936 rxq = &sc->vmx_rxq[0];
1937 txq = &sc->vmx_txq[0];
1940 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
1941 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
1944 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1945 vmxnet3_disable_all_intrs(sc);
1947 if (sc->vmx_ds->event != 0)
1950 VMXNET3_RXQ_LOCK(rxq);
1951 vmxnet3_rxq_eof(rxq);
1952 VMXNET3_RXQ_UNLOCK(rxq);
1954 VMXNET3_TXQ_LOCK(txq);
1955 vmxnet3_txq_eof(txq);
1956 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1957 vmxnet3_start_locked(ifp);
1958 VMXNET3_TXQ_UNLOCK(txq);
1960 vmxnet3_enable_all_intrs(sc);
1964 vmxnet3_txq_intr(void *xtxq)
1966 struct vmxnet3_softc *sc;
1967 struct vmxnet3_txqueue *txq;
1974 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1975 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
1977 VMXNET3_TXQ_LOCK(txq);
1978 vmxnet3_txq_eof(txq);
1979 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1980 vmxnet3_start_locked(ifp);
1981 VMXNET3_TXQ_UNLOCK(txq);
1983 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
1987 vmxnet3_rxq_intr(void *xrxq)
1989 struct vmxnet3_softc *sc;
1990 struct vmxnet3_rxqueue *rxq;
1995 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1996 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
1998 VMXNET3_RXQ_LOCK(rxq);
1999 vmxnet3_rxq_eof(rxq);
2000 VMXNET3_RXQ_UNLOCK(rxq);
2002 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2006 vmxnet3_event_intr(void *xsc)
2008 struct vmxnet3_softc *sc;
2012 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2013 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2015 if (sc->vmx_ds->event != 0)
2018 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2022 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2024 struct vmxnet3_txring *txr;
2025 struct vmxnet3_txbuf *txb;
2028 txr = &txq->vxtxq_cmd_ring;
2030 for (i = 0; i < txr->vxtxr_ndesc; i++) {
2031 txb = &txr->vxtxr_txbuf[i];
2033 if (txb->vtxb_m == NULL)
2036 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2037 BUS_DMASYNC_POSTWRITE);
2038 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2039 m_freem(txb->vtxb_m);
2045 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2047 struct vmxnet3_rxring *rxr;
2048 struct vmxnet3_rxbuf *rxb;
2051 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2052 rxr = &rxq->vxrxq_cmd_ring[i];
2054 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2055 rxb = &rxr->vxrxr_rxbuf[j];
2057 if (rxb->vrxb_m == NULL)
2059 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2060 BUS_DMASYNC_POSTREAD);
2061 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2062 m_freem(rxb->vrxb_m);
2069 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2071 struct vmxnet3_rxqueue *rxq;
2072 struct vmxnet3_txqueue *txq;
2075 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2076 rxq = &sc->vmx_rxq[i];
2077 VMXNET3_RXQ_LOCK(rxq);
2078 VMXNET3_RXQ_UNLOCK(rxq);
2081 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2082 txq = &sc->vmx_txq[i];
2083 VMXNET3_TXQ_LOCK(txq);
2084 VMXNET3_TXQ_UNLOCK(txq);
2089 vmxnet3_stop(struct vmxnet3_softc *sc)
2095 VMXNET3_CORE_LOCK_ASSERT(sc);
2097 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2098 sc->vmx_link_active = 0;
2099 callout_stop(&sc->vmx_tick);
2101 /* Disable interrupts. */
2102 vmxnet3_disable_all_intrs(sc);
2103 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2105 vmxnet3_stop_rendezvous(sc);
2107 for (q = 0; q < sc->vmx_ntxqueues; q++)
2108 vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2109 for (q = 0; q < sc->vmx_nrxqueues; q++)
2110 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2112 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2116 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2118 struct vmxnet3_txring *txr;
2119 struct vmxnet3_comp_ring *txc;
2121 txr = &txq->vxtxq_cmd_ring;
2122 txr->vxtxr_head = 0;
2123 txr->vxtxr_next = 0;
2124 txr->vxtxr_gen = VMXNET3_INIT_GEN;
2125 bzero(txr->vxtxr_txd,
2126 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2128 txc = &txq->vxtxq_comp_ring;
2130 txc->vxcr_gen = VMXNET3_INIT_GEN;
2131 bzero(txc->vxcr_u.txcd,
2132 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2136 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2139 struct vmxnet3_rxring *rxr;
2140 struct vmxnet3_comp_ring *rxc;
2141 int i, populate, idx, frame_size, error;
2144 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2148 * If the MTU causes us to exceed what a regular sized cluster can
2149 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2150 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2152 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2153 * our life easier. We do not support changing the ring size after
2156 if (frame_size <= MCLBYTES)
2157 sc->vmx_rx_max_chain = 1;
2159 sc->vmx_rx_max_chain = 2;
2162 * Only populate ring 1 if the configuration will take advantage
2163 * of it. That is either when LRO is enabled or the frame size
2164 * exceeds what ring 0 can contain.
2166 if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2167 frame_size <= MCLBYTES + MJUMPAGESIZE)
2170 populate = VMXNET3_RXRINGS_PERQ;
2172 for (i = 0; i < populate; i++) {
2173 rxr = &rxq->vxrxq_cmd_ring[i];
2174 rxr->vxrxr_fill = 0;
2175 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2176 bzero(rxr->vxrxr_rxd,
2177 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2179 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2180 error = vmxnet3_newbuf(sc, rxr);
2186 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2187 rxr = &rxq->vxrxq_cmd_ring[i];
2188 rxr->vxrxr_fill = 0;
2190 bzero(rxr->vxrxr_rxd,
2191 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2194 rxc = &rxq->vxrxq_comp_ring;
2196 rxc->vxcr_gen = VMXNET3_INIT_GEN;
2197 bzero(rxc->vxcr_u.rxcd,
2198 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2204 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2211 for (q = 0; q < sc->vmx_ntxqueues; q++)
2212 vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2214 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2215 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2217 device_printf(dev, "cannot populate Rx queue %d\n", q);
2226 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2230 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2231 device_printf(sc->vmx_dev, "device enable command failed!\n");
2235 /* Reset the Rx queue heads. */
2236 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2237 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2238 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2245 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2251 vmxnet3_set_rxfilter(sc);
2253 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2254 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2255 sizeof(sc->vmx_ds->vlan_filter));
2257 bzero(sc->vmx_ds->vlan_filter,
2258 sizeof(sc->vmx_ds->vlan_filter));
2259 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2263 vmxnet3_reinit(struct vmxnet3_softc *sc)
2266 vmxnet3_reinit_interface(sc);
2267 vmxnet3_reinit_shared_data(sc);
2269 if (vmxnet3_reinit_queues(sc) != 0)
2272 if (vmxnet3_enable_device(sc) != 0)
2275 vmxnet3_reinit_rxfilters(sc);
2281 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2287 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2292 if (vmxnet3_reinit(sc) != 0) {
2297 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2298 vmxnet3_link_status(sc);
2300 vmxnet3_enable_all_intrs(sc);
2301 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2305 vmxnet3_init(void *xsc)
2307 struct vmxnet3_softc *sc;
2311 VMXNET3_CORE_LOCK(sc);
2312 vmxnet3_init_locked(sc);
2313 VMXNET3_CORE_UNLOCK(sc);
2317 * BMV: Much of this can go away once we finally have offsets in
2318 * the mbuf packet header. Bug andre@.
2321 vmxnet3_txq_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
2323 struct ether_vlan_header *evh;
2326 evh = mtod(m, struct ether_vlan_header *);
2327 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2328 /* BMV: We should handle nested VLAN tags too. */
2329 *etype = ntohs(evh->evl_proto);
2330 offset = sizeof(struct ether_vlan_header);
2332 *etype = ntohs(evh->evl_encap_proto);
2333 offset = sizeof(struct ether_header);
2338 case ETHERTYPE_IP: {
2339 struct ip *ip, iphdr;
2340 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2341 m_copydata(m, offset, sizeof(struct ip),
2345 ip = (struct ip *)(m->m_data + offset);
2347 *start = offset + (ip->ip_hl << 2);
2352 case ETHERTYPE_IPV6:
2354 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2355 /* Assert the network stack sent us a valid packet. */
2356 KASSERT(*start > offset,
2357 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2358 *start, offset, *proto));
2365 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2366 struct tcphdr *tcp, tcphdr;
2368 if (__predict_false(*proto != IPPROTO_TCP)) {
2369 /* Likely failed to correctly parse the mbuf. */
2373 if (m->m_len < *start + sizeof(struct tcphdr)) {
2374 m_copydata(m, offset, sizeof(struct tcphdr),
2378 tcp = (struct tcphdr *)(m->m_data + *start);
2381 * For TSO, the size of the protocol header is also
2382 * included in the descriptor header size.
2384 *start += (tcp->th_off << 2);
2391 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2392 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2394 struct vmxnet3_txring *txr;
2399 txr = &txq->vxtxq_cmd_ring;
2401 tag = txr->vxtxr_txtag;
2402 maxsegs = VMXNET3_TX_MAXSEGS;
2404 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2405 if (error == 0 || error != EFBIG)
2408 m = m_collapse(m, M_NOWAIT, maxsegs);
2411 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2419 txq->vxtxq_sc->vmx_stats.vmst_collapsed++;
2425 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2427 struct vmxnet3_txring *txr;
2429 txr = &txq->vxtxq_cmd_ring;
2430 bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2434 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2436 struct vmxnet3_softc *sc;
2438 struct vmxnet3_txring *txr;
2439 struct vmxnet3_txdesc *txd, *sop;
2442 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2443 int i, gen, nsegs, etype, proto, start, error;
2449 txr = &txq->vxtxq_cmd_ring;
2450 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2452 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2458 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2459 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2461 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2462 txq->vxtxq_stats.vtxrs_full++;
2463 vmxnet3_txq_unload_mbuf(txq, dmap);
2465 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2466 error = vmxnet3_txq_offload_ctx(m, &etype, &proto, &start);
2468 txq->vxtxq_stats.vtxrs_offload_failed++;
2469 vmxnet3_txq_unload_mbuf(txq, dmap);
2476 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m = *m0;
2477 sop = &txr->vxtxr_txd[txr->vxtxr_head];
2478 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
2480 for (i = 0; i < nsegs; i++) {
2481 txd = &txr->vxtxr_txd[txr->vxtxr_head];
2483 txd->addr = segs[i].ds_addr;
2484 txd->len = segs[i].ds_len;
2487 txd->offload_mode = VMXNET3_OM_NONE;
2488 txd->offload_pos = 0;
2495 if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2496 txr->vxtxr_head = 0;
2497 txr->vxtxr_gen ^= 1;
2499 gen = txr->vxtxr_gen;
2504 if (m->m_flags & M_VLANTAG) {
2506 sop->vtag = m->m_pkthdr.ether_vtag;
2509 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2510 sop->offload_mode = VMXNET3_OM_TSO;
2512 sop->offload_pos = m->m_pkthdr.tso_segsz;
2513 } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2514 VMXNET3_CSUM_OFFLOAD_IPV6)) {
2515 sop->offload_mode = VMXNET3_OM_CSUM;
2517 sop->offload_pos = start + m->m_pkthdr.csum_data;
2520 /* Finally, change the ownership. */
2521 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2524 if (++txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2525 txq->vxtxq_ts->npending = 0;
2526 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2534 vmxnet3_start_locked(struct ifnet *ifp)
2536 struct vmxnet3_softc *sc;
2537 struct vmxnet3_txqueue *txq;
2538 struct vmxnet3_txring *txr;
2539 struct mbuf *m_head;
2543 txq = &sc->vmx_txq[0];
2544 txr = &txq->vxtxq_cmd_ring;
2547 VMXNET3_TXQ_LOCK_ASSERT(txq);
2549 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2550 sc->vmx_link_active == 0)
2553 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2554 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2557 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2561 /* Assume worse case if this mbuf is the head of a chain. */
2562 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2563 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2567 if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2569 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2574 ETHER_BPF_MTAP(ifp, m_head);
2578 if (txq->vxtxq_ts->npending > 0) {
2579 txq->vxtxq_ts->npending = 0;
2580 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2583 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2588 vmxnet3_start(struct ifnet *ifp)
2590 struct vmxnet3_softc *sc;
2591 struct vmxnet3_txqueue *txq;
2594 txq = &sc->vmx_txq[0];
2596 VMXNET3_TXQ_LOCK(txq);
2597 vmxnet3_start_locked(ifp);
2598 VMXNET3_TXQ_UNLOCK(txq);
2602 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
2608 idx = (tag >> 5) & 0x7F;
2611 if (tag == 0 || tag > 4095)
2614 VMXNET3_CORE_LOCK(sc);
2616 /* Update our private VLAN bitvector. */
2618 sc->vmx_vlan_filter[idx] |= (1 << bit);
2620 sc->vmx_vlan_filter[idx] &= ~(1 << bit);
2622 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2624 sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
2626 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
2627 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2630 VMXNET3_CORE_UNLOCK(sc);
2634 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2637 if (ifp->if_softc == arg)
2638 vmxnet3_update_vlan_filter(arg, 1, tag);
2642 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2645 if (ifp->if_softc == arg)
2646 vmxnet3_update_vlan_filter(arg, 0, tag);
2650 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
2653 struct vmxnet3_driver_shared *ds;
2654 struct ifmultiaddr *ifma;
2660 mode = VMXNET3_RXMODE_UCAST;
2661 if (ifp->if_flags & IFF_BROADCAST)
2662 mode |= VMXNET3_RXMODE_BCAST;
2663 if (ifp->if_flags & IFF_PROMISC)
2664 mode |= VMXNET3_RXMODE_PROMISC;
2665 if (ifp->if_flags & IFF_ALLMULTI)
2666 mode |= VMXNET3_RXMODE_ALLMULTI;
2668 int cnt = 0, overflow = 0;
2670 if_maddr_rlock(ifp);
2671 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2672 if (ifma->ifma_addr->sa_family != AF_LINK)
2674 else if (cnt == VMXNET3_MULTICAST_MAX) {
2679 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2680 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2683 if_maddr_runlock(ifp);
2685 if (overflow != 0) {
2687 mode |= VMXNET3_RXMODE_ALLMULTI;
2689 mode |= VMXNET3_RXMODE_MCAST;
2690 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
2695 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
2696 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
2700 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
2706 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
2711 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2712 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2713 vmxnet3_init_locked(sc);
2720 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2722 struct vmxnet3_softc *sc;
2724 int reinit, mask, error;
2727 ifr = (struct ifreq *) data;
2732 if (ifp->if_mtu != ifr->ifr_mtu) {
2733 VMXNET3_CORE_LOCK(sc);
2734 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
2735 VMXNET3_CORE_UNLOCK(sc);
2740 VMXNET3_CORE_LOCK(sc);
2741 if (ifp->if_flags & IFF_UP) {
2742 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2743 if ((ifp->if_flags ^ sc->vmx_if_flags) &
2744 (IFF_PROMISC | IFF_ALLMULTI)) {
2745 vmxnet3_set_rxfilter(sc);
2748 vmxnet3_init_locked(sc);
2750 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2753 sc->vmx_if_flags = ifp->if_flags;
2754 VMXNET3_CORE_UNLOCK(sc);
2759 VMXNET3_CORE_LOCK(sc);
2760 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2761 vmxnet3_set_rxfilter(sc);
2762 VMXNET3_CORE_UNLOCK(sc);
2767 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
2771 VMXNET3_CORE_LOCK(sc);
2772 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2774 if (mask & IFCAP_TXCSUM)
2775 ifp->if_capenable ^= IFCAP_TXCSUM;
2776 if (mask & IFCAP_TXCSUM_IPV6)
2777 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2778 if (mask & IFCAP_TSO4)
2779 ifp->if_capenable ^= IFCAP_TSO4;
2780 if (mask & IFCAP_TSO6)
2781 ifp->if_capenable ^= IFCAP_TSO6;
2783 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
2784 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
2785 /* Changing these features requires us to reinit. */
2788 if (mask & IFCAP_RXCSUM)
2789 ifp->if_capenable ^= IFCAP_RXCSUM;
2790 if (mask & IFCAP_RXCSUM_IPV6)
2791 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2792 if (mask & IFCAP_LRO)
2793 ifp->if_capenable ^= IFCAP_LRO;
2794 if (mask & IFCAP_VLAN_HWTAGGING)
2795 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2796 if (mask & IFCAP_VLAN_HWFILTER)
2797 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2801 if (mask & IFCAP_VLAN_HWTSO)
2802 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2804 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2805 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2806 vmxnet3_init_locked(sc);
2809 VMXNET3_CORE_UNLOCK(sc);
2810 VLAN_CAPABILITIES(ifp);
2814 error = ether_ioctl(ifp, cmd, data);
2818 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
2824 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
2826 struct vmxnet3_softc *sc;
2830 VMXNET3_TXQ_LOCK(txq);
2831 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
2832 VMXNET3_TXQ_UNLOCK(txq);
2835 VMXNET3_TXQ_UNLOCK(txq);
2837 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
2843 vmxnet3_refresh_stats(struct vmxnet3_softc *sc)
2846 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
2850 vmxnet3_tick(void *xsc)
2852 struct vmxnet3_softc *sc;
2860 VMXNET3_CORE_LOCK_ASSERT(sc);
2861 vmxnet3_refresh_stats(sc);
2863 for (i = 0; i < sc->vmx_ntxqueues; i++)
2864 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
2866 if (timedout != 0) {
2867 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2868 vmxnet3_init_locked(sc);
2870 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2874 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
2878 /* Also update the link speed while here. */
2879 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
2880 sc->vmx_link_speed = status >> 16;
2881 return !!(status & 0x1);
2885 vmxnet3_link_status(struct vmxnet3_softc *sc)
2891 link = vmxnet3_link_is_up(sc);
2893 if (link != 0 && sc->vmx_link_active == 0) {
2894 sc->vmx_link_active = 1;
2895 if_link_state_change(ifp, LINK_STATE_UP);
2896 } else if (link == 0 && sc->vmx_link_active != 0) {
2897 sc->vmx_link_active = 0;
2898 if_link_state_change(ifp, LINK_STATE_DOWN);
2903 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2905 struct vmxnet3_softc *sc;
2909 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
2910 ifmr->ifm_status = IFM_AVALID;
2912 VMXNET3_CORE_LOCK(sc);
2913 if (vmxnet3_link_is_up(sc) != 0)
2914 ifmr->ifm_status |= IFM_ACTIVE;
2916 ifmr->ifm_status |= IFM_NONE;
2917 VMXNET3_CORE_UNLOCK(sc);
2921 vmxnet3_media_change(struct ifnet *ifp)
2929 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
2933 ml = sc->vmx_lladdr[0];
2934 ml |= sc->vmx_lladdr[1] << 8;
2935 ml |= sc->vmx_lladdr[2] << 16;
2936 ml |= sc->vmx_lladdr[3] << 24;
2937 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
2939 mh = sc->vmx_lladdr[4];
2940 mh |= sc->vmx_lladdr[5] << 8;
2941 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
2945 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
2949 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
2950 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
2952 sc->vmx_lladdr[0] = ml;
2953 sc->vmx_lladdr[1] = ml >> 8;
2954 sc->vmx_lladdr[2] = ml >> 16;
2955 sc->vmx_lladdr[3] = ml >> 24;
2956 sc->vmx_lladdr[4] = mh;
2957 sc->vmx_lladdr[5] = mh >> 8;
2961 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
2962 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2964 struct sysctl_oid *node, *txsnode;
2965 struct sysctl_oid_list *list, *txslist;
2966 struct vmxnet3_txq_stats *stats;
2967 struct UPT1_TxStats *txstats;
2970 stats = &txq->vxtxq_stats;
2971 txstats = &txq->vxtxq_ts->stats;
2973 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
2974 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
2975 NULL, "Transmit Queue");
2976 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
2978 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
2979 &stats->vtxrs_full, "Tx ring full");
2980 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
2981 &stats->vtxrs_offload_failed, "Tx checksum offload failed");
2984 * Add statistics reported by the host. These are updated once
2987 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
2988 NULL, "Host Statistics");
2989 txslist = SYSCTL_CHILDREN(txsnode);
2990 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
2991 &txstats->TSO_packets, "TSO packets");
2992 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
2993 &txstats->TSO_bytes, "TSO bytes");
2994 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
2995 &txstats->ucast_packets, "Unicast packets");
2996 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
2997 &txstats->ucast_bytes, "Unicast bytes");
2998 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
2999 &txstats->mcast_packets, "Multicast packets");
3000 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3001 &txstats->mcast_bytes, "Multicast bytes");
3002 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3003 &txstats->error, "Errors");
3004 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3005 &txstats->discard, "Discards");
3009 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3010 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3012 struct sysctl_oid *node, *rxsnode;
3013 struct sysctl_oid_list *list, *rxslist;
3014 struct vmxnet3_rxq_stats *stats;
3015 struct UPT1_RxStats *rxstats;
3018 stats = &rxq->vxrxq_stats;
3019 rxstats = &rxq->vxrxq_rs->stats;
3021 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3022 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3023 NULL, "Receive Queue");
3024 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3027 * Add statistics reported by the host. These are updated once
3030 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3031 NULL, "Host Statistics");
3032 rxslist = SYSCTL_CHILDREN(rxsnode);
3033 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3034 &rxstats->LRO_packets, "LRO packets");
3035 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3036 &rxstats->LRO_bytes, "LRO bytes");
3037 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3038 &rxstats->ucast_packets, "Unicast packets");
3039 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3040 &rxstats->ucast_bytes, "Unicast bytes");
3041 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3042 &rxstats->mcast_packets, "Multicast packets");
3043 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3044 &rxstats->mcast_bytes, "Multicast bytes");
3045 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3046 &rxstats->bcast_packets, "Broadcast packets");
3047 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3048 &rxstats->bcast_bytes, "Broadcast bytes");
3049 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3050 &rxstats->nobuffer, "No buffer");
3051 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3052 &rxstats->error, "Errors");
3055 #ifdef VMXNET3_DEBUG_SYSCTL
3057 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3058 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3060 struct sysctl_oid *node;
3061 struct sysctl_oid_list *list;
3064 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3065 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3067 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3068 "debug", CTLFLAG_RD, NULL, "");
3069 list = SYSCTL_CHILDREN(node);
3071 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3072 &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3073 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3074 &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3075 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3076 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3077 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3078 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3079 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3080 &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3081 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3082 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3083 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3084 &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3087 for (i = 0; i < sc->vmx_nrxqueues; i++) {
3088 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3090 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3091 "debug", CTLFLAG_RD, NULL, "");
3092 list = SYSCTL_CHILDREN(node);
3094 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3095 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3096 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3097 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3098 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3099 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3100 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3101 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3102 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3103 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3104 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3105 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3106 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3107 &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3108 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3109 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3110 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3111 &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3117 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3118 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3122 for (i = 0; i < sc->vmx_ntxqueues; i++)
3123 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3124 for (i = 0; i < sc->vmx_nrxqueues; i++)
3125 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3127 #ifdef VMXNET3_DEBUG_SYSCTL
3128 vmxnet3_setup_debug_sysctl(sc, ctx, child);
3133 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3136 struct vmxnet3_statistics *stats;
3137 struct sysctl_ctx_list *ctx;
3138 struct sysctl_oid *tree;
3139 struct sysctl_oid_list *child;
3142 ctx = device_get_sysctl_ctx(dev);
3143 tree = device_get_sysctl_tree(dev);
3144 child = SYSCTL_CHILDREN(tree);
3146 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3147 &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3148 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3149 &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3151 stats = &sc->vmx_stats;
3152 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "collapsed", CTLFLAG_RD,
3153 &stats->vmst_collapsed, 0, "Tx mbuf chains collapsed");
3154 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3155 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3156 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3157 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3159 vmxnet3_setup_queue_sysctl(sc, ctx, child);
3163 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3166 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3170 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3173 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3177 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3180 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3184 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3187 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3191 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3194 vmxnet3_write_cmd(sc, cmd);
3195 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3196 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3197 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3201 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3204 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3208 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3211 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3215 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3219 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3220 for (i = 0; i < sc->vmx_nintrs; i++)
3221 vmxnet3_enable_intr(sc, i);
3225 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3229 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3230 for (i = 0; i < sc->vmx_nintrs; i++)
3231 vmxnet3_disable_intr(sc, i);
3235 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3237 bus_addr_t *baddr = arg;
3240 *baddr = segs->ds_addr;
3244 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3245 struct vmxnet3_dma_alloc *dma)
3251 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3253 error = bus_dma_tag_create(bus_get_dma_tag(dev),
3254 align, 0, /* alignment, bounds */
3255 BUS_SPACE_MAXADDR, /* lowaddr */
3256 BUS_SPACE_MAXADDR, /* highaddr */
3257 NULL, NULL, /* filter, filterarg */
3260 size, /* maxsegsize */
3261 BUS_DMA_ALLOCNOW, /* flags */
3262 NULL, /* lockfunc */
3263 NULL, /* lockfuncarg */
3266 device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3270 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3271 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3273 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3277 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3278 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3280 device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3284 dma->dma_size = size;
3288 vmxnet3_dma_free(sc, dma);
3294 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3297 if (dma->dma_tag != NULL) {
3298 if (dma->dma_map != NULL) {
3299 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3300 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3301 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3304 if (dma->dma_vaddr != NULL) {
3305 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3309 bus_dma_tag_destroy(dma->dma_tag);
3311 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3315 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3319 snprintf(path, sizeof(path),
3320 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3321 TUNABLE_INT_FETCH(path, &def);
3327 * Since this is a purely paravirtualized device, we do not have
3328 * to worry about DMA coherency. But at times, we must make sure
3329 * both the compiler and CPU do not reorder memory operations.
3332 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3336 case VMXNET3_BARRIER_RD:
3339 case VMXNET3_BARRIER_WR:
3342 case VMXNET3_BARRIER_RDWR:
3346 panic("%s: bad barrier type %d", __func__, type);