2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/pciio.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pci_private.h>
46 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
54 #include <net/if_types.h>
55 #include <net/if_dl.h>
57 #include "common/t4_hw.h"
58 #include "common/common.h"
59 #include "common/t4_regs.h"
60 #include "common/t4_regs_values.h"
61 #include "common/t4fw_interface.h"
64 /* T4 bus driver interface */
65 static int t4_probe(device_t);
66 static int t4_attach(device_t);
67 static int t4_detach(device_t);
68 static device_method_t t4_methods[] = {
69 DEVMETHOD(device_probe, t4_probe),
70 DEVMETHOD(device_attach, t4_attach),
71 DEVMETHOD(device_detach, t4_detach),
74 DEVMETHOD(bus_print_child, bus_generic_print_child),
75 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
79 static driver_t t4_driver = {
82 sizeof(struct adapter)
86 /* T4 port (cxgbe) interface */
87 static int cxgbe_probe(device_t);
88 static int cxgbe_attach(device_t);
89 static int cxgbe_detach(device_t);
90 static device_method_t cxgbe_methods[] = {
91 DEVMETHOD(device_probe, cxgbe_probe),
92 DEVMETHOD(device_attach, cxgbe_attach),
93 DEVMETHOD(device_detach, cxgbe_detach),
96 static driver_t cxgbe_driver = {
99 sizeof(struct port_info)
102 static d_ioctl_t t4_ioctl;
103 static d_open_t t4_open;
104 static d_close_t t4_close;
106 static struct cdevsw t4_cdevsw = {
107 .d_version = D_VERSION,
115 /* ifnet + media interface */
116 static void cxgbe_init(void *);
117 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
118 static void cxgbe_start(struct ifnet *);
119 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
120 static void cxgbe_qflush(struct ifnet *);
121 static int cxgbe_media_change(struct ifnet *);
122 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
124 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
129 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters");
131 static int force_firmware_install = 0;
132 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
133 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
134 &force_firmware_install, 0, "install firmware on every attach.");
137 * Holdoff timer and packet counter values.
139 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
140 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
143 * Max # of tx and rx queues to use for each 10G and 1G port.
145 static unsigned int max_ntxq_10g = 8;
146 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
147 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
148 &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
150 static unsigned int max_nrxq_10g = 8;
151 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
152 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
153 &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
155 static unsigned int max_ntxq_1g = 2;
156 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
157 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
158 &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
160 static unsigned int max_nrxq_1g = 2;
161 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
162 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
163 &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
166 * Holdoff parameters for 10G and 1G ports.
168 static unsigned int tmr_idx_10g = 1;
169 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
170 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
172 "default timer index for interrupt holdoff (10G ports).");
174 static int pktc_idx_10g = 2;
175 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
176 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
178 "default pkt counter index for interrupt holdoff (10G ports).");
180 static unsigned int tmr_idx_1g = 1;
181 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
182 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
184 "default timer index for interrupt holdoff (1G ports).");
186 static int pktc_idx_1g = 2;
187 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
188 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
190 "default pkt counter index for interrupt holdoff (1G ports).");
193 * Size (# of entries) of each tx and rx queue.
195 static unsigned int qsize_txq = TX_EQ_QSIZE;
196 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
197 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
198 &qsize_txq, 0, "default queue size of NIC tx queues.");
200 static unsigned int qsize_rxq = RX_IQ_QSIZE;
201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
202 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
203 &qsize_rxq, 0, "default queue size of NIC rx queues.");
206 * Interrupt types allowed.
208 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
209 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
210 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
211 "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
214 * Force the driver to use interrupt forwarding.
216 static int intr_fwd = 0;
217 TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd);
218 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN,
219 &intr_fwd, 0, "always use forwarded interrupts");
221 struct intrs_and_queues {
222 int intr_type; /* INTx, MSI, or MSI-X */
223 int nirq; /* Number of vectors */
224 int intr_fwd; /* Interrupts forwarded */
225 int ntxq10g; /* # of NIC txq's for each 10G port */
226 int nrxq10g; /* # of NIC rxq's for each 10G port */
227 int ntxq1g; /* # of NIC txq's for each 1G port */
228 int nrxq1g; /* # of NIC rxq's for each 1G port */
232 MEMWIN0_APERTURE = 2048,
233 MEMWIN0_BASE = 0x1b800,
234 MEMWIN1_APERTURE = 32768,
235 MEMWIN1_BASE = 0x28000,
236 MEMWIN2_APERTURE = 65536,
237 MEMWIN2_BASE = 0x30000,
241 XGMAC_MTU = (1 << 0),
242 XGMAC_PROMISC = (1 << 1),
243 XGMAC_ALLMULTI = (1 << 2),
244 XGMAC_VLANEX = (1 << 3),
245 XGMAC_UCADDR = (1 << 4),
246 XGMAC_MCADDRS = (1 << 5),
251 static int map_bars(struct adapter *);
252 static void setup_memwin(struct adapter *);
253 static int cfg_itype_and_nqueues(struct adapter *, int, int,
254 struct intrs_and_queues *);
255 static int prep_firmware(struct adapter *);
256 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
257 static int get_params(struct adapter *, struct fw_caps_config_cmd *);
258 static void t4_set_desc(struct adapter *);
259 static void build_medialist(struct port_info *);
260 static int update_mac_settings(struct port_info *, int);
261 static int cxgbe_init_locked(struct port_info *);
262 static int cxgbe_init_synchronized(struct port_info *);
263 static int cxgbe_uninit_locked(struct port_info *);
264 static int cxgbe_uninit_synchronized(struct port_info *);
265 static int first_port_up(struct adapter *);
266 static int last_port_down(struct adapter *);
267 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
268 iq_intr_handler_t *, void *, char *);
269 static int t4_free_irq(struct adapter *, struct irq *);
270 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
272 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
273 static void cxgbe_tick(void *);
274 static int t4_sysctls(struct adapter *);
275 static int cxgbe_sysctls(struct port_info *);
276 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
277 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
278 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
279 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
280 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
281 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
282 static inline void txq_start(struct ifnet *, struct sge_txq *);
283 static int t4_mod_event(module_t, int, void *);
290 {0xa000, 0, "Chelsio Terminator 4 FPGA"},
291 {0x4400, 4, "Chelsio T440-dbg"},
292 {0x4401, 4, "Chelsio T420-CR"},
293 {0x4402, 4, "Chelsio T422-CR"},
294 {0x4403, 4, "Chelsio T440-CR"},
295 {0x4404, 4, "Chelsio T420-BCH"},
296 {0x4405, 4, "Chelsio T440-BCH"},
297 {0x4406, 4, "Chelsio T440-CH"},
298 {0x4407, 4, "Chelsio T420-SO"},
299 {0x4408, 4, "Chelsio T420-CX"},
300 {0x4409, 4, "Chelsio T420-BT"},
301 {0x440a, 4, "Chelsio T404-BT"},
305 t4_probe(device_t dev)
308 uint16_t v = pci_get_vendor(dev);
309 uint16_t d = pci_get_device(dev);
311 if (v != PCI_VENDOR_ID_CHELSIO)
314 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
315 if (d == t4_pciids[i].device &&
316 pci_get_function(dev) == t4_pciids[i].mpf) {
317 device_set_desc(dev, t4_pciids[i].desc);
318 return (BUS_PROBE_DEFAULT);
326 t4_attach(device_t dev)
329 int rc = 0, i, n10g, n1g, rqidx, tqidx;
330 struct fw_caps_config_cmd caps;
332 struct intrs_and_queues iaq;
335 sc = device_get_softc(dev);
337 sc->pf = pci_get_function(dev);
340 pci_enable_busmaster(dev);
341 pci_set_max_read_req(dev, 4096);
342 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
343 device_get_nameunit(dev));
344 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
348 goto done; /* error message displayed already */
350 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
352 /* Prepare the adapter for operation */
353 rc = -t4_prep_adapter(sc);
355 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
359 /* Do this really early */
360 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
361 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
362 sc->cdev->si_drv1 = sc;
364 /* Prepare the firmware for operation */
365 rc = prep_firmware(sc);
367 goto done; /* error message displayed already */
369 /* Get device capabilities and select which ones we'll use */
370 rc = get_capabilities(sc, &caps);
373 "failed to initialize adapter capabilities: %d.\n", rc);
377 /* Choose the global RSS mode. */
378 rc = -t4_config_glbl_rss(sc, sc->mbox,
379 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
380 F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
381 F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
384 "failed to select global RSS mode: %d.\n", rc);
388 /* These are total (sum of all ports) limits for a bus driver */
389 rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
390 64, /* max # of egress queues */
391 64, /* max # of egress Ethernet or control queues */
392 64, /* max # of ingress queues with fl/interrupt */
393 0, /* max # of ingress queues without interrupt */
394 0, /* PCIe traffic class */
395 4, /* max # of virtual interfaces */
396 M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
397 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
400 "failed to configure pf/vf resources: %d.\n", rc);
404 /* Need this before sge_init */
405 for (i = 0; i < SGE_NTIMERS; i++)
406 sc->sge.timer_val[i] = min(intr_timer[i], 200U);
407 for (i = 0; i < SGE_NCOUNTERS; i++)
408 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
410 /* Also need the cooked value of cclk before sge_init */
411 p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
412 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
413 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
415 device_printf(sc->dev,
416 "failed to obtain core clock value: %d.\n", rc);
419 sc->params.vpd.cclk = v;
424 * XXX: This is the place to call t4_set_filter_mode()
427 /* get basic stuff going */
428 rc = -t4_early_init(sc, sc->mbox);
430 device_printf(dev, "early init failed: %d.\n", rc);
434 rc = get_params(sc, &caps);
436 goto done; /* error message displayed already */
438 /* These are finalized by FW initialization, load their values now */
439 v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
440 sc->params.tp.tre = G_TIMERRESOLUTION(v);
441 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
442 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
444 /* tweak some settings */
445 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
446 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
447 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
448 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
452 rc = t4_create_dma_tag(sc);
454 goto done; /* error message displayed already */
457 * First pass over all the ports - allocate VIs and initialize some
458 * basic parameters like mac address, port type, etc. We also figure
459 * out whether a port is 10G or 1G and use that information when
460 * calculating how many interrupts to attempt to allocate.
463 for_each_port(sc, i) {
464 struct port_info *pi;
466 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
469 /* These must be set before t4_port_init */
473 /* Allocate the vi and initialize parameters like mac addr */
474 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
476 device_printf(dev, "unable to initialize port %d: %d\n",
479 sc->port[i] = NULL; /* indicates init failed */
483 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
484 device_get_nameunit(dev), i);
485 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
487 if (is_10G_port(pi)) {
489 pi->tmr_idx = tmr_idx_10g;
490 pi->pktc_idx = pktc_idx_10g;
493 pi->tmr_idx = tmr_idx_1g;
494 pi->pktc_idx = pktc_idx_1g;
497 pi->xact_addr_filt = -1;
499 pi->qsize_rxq = max(qsize_rxq, 128);
500 while (pi->qsize_rxq & 7)
502 pi->qsize_txq = max(qsize_txq, 128);
504 if (pi->qsize_rxq != qsize_rxq) {
506 "using %d instead of %d as the rx queue size.\n",
507 pi->qsize_rxq, qsize_rxq);
509 if (pi->qsize_txq != qsize_txq) {
511 "using %d instead of %d as the tx queue size.\n",
512 pi->qsize_txq, qsize_txq);
515 pi->dev = device_add_child(dev, "cxgbe", -1);
516 if (pi->dev == NULL) {
518 "failed to add device for port %d.\n", i);
522 device_set_softc(pi->dev, pi);
524 setbit(&sc->registered_device_map, i);
527 if (sc->registered_device_map == 0) {
528 device_printf(dev, "no usable ports\n");
534 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
536 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
538 goto done; /* error message displayed already */
540 sc->intr_type = iaq.intr_type;
541 sc->intr_count = iaq.nirq;
544 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
545 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
546 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
547 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
549 sc->flags |= INTR_FWD;
550 s->niq += NFIQ(sc); /* forwarded interrupt queues */
551 s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE,
554 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
556 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
558 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
560 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
563 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
569 * Second pass over the ports. This time we know the number of rx and
570 * tx queues that each port should get.
573 for_each_port(sc, i) {
574 struct port_info *pi = sc->port[i];
579 pi->first_rxq = rqidx;
580 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
582 pi->first_txq = tqidx;
583 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
589 rc = bus_generic_attach(dev);
592 "failed to attach all child ports: %d\n", rc);
598 "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
599 sc, sc->params.nports, sc->params.portvec,
600 sc->intr_type, sc->intr_count);
615 t4_detach(device_t dev)
618 struct port_info *pi;
621 sc = device_get_softc(dev);
624 destroy_dev(sc->cdev);
626 bus_generic_detach(dev);
627 for (i = 0; i < MAX_NPORTS; i++) {
630 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
632 device_delete_child(dev, pi->dev);
634 mtx_destroy(&pi->pi_lock);
639 if (sc->flags & FW_OK)
640 t4_fw_bye(sc, sc->mbox);
642 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
643 pci_release_msi(dev);
646 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
650 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
653 free(sc->irq, M_CXGBE);
654 free(sc->sge.rxq, M_CXGBE);
655 free(sc->sge.txq, M_CXGBE);
656 free(sc->sge.fiq, M_CXGBE);
657 free(sc->sge.iqmap, M_CXGBE);
658 free(sc->sge.eqmap, M_CXGBE);
659 t4_destroy_dma_tag(sc);
660 mtx_destroy(&sc->sc_lock);
662 bzero(sc, sizeof(*sc));
669 cxgbe_probe(device_t dev)
672 struct port_info *pi = device_get_softc(dev);
674 snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
675 device_set_desc_copy(dev, buf);
677 return (BUS_PROBE_DEFAULT);
680 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
681 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
683 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
686 cxgbe_attach(device_t dev)
688 struct port_info *pi = device_get_softc(dev);
691 /* Allocate an ifnet and set it up */
692 ifp = if_alloc(IFT_ETHER);
694 device_printf(dev, "Cannot allocate ifnet\n");
700 callout_init(&pi->tick, CALLOUT_MPSAFE);
701 pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
702 taskqueue_thread_enqueue, &pi->tq);
703 if (pi->tq == NULL) {
704 device_printf(dev, "failed to allocate port task queue\n");
708 taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
709 device_get_nameunit(dev));
711 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
712 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
714 ifp->if_init = cxgbe_init;
715 ifp->if_ioctl = cxgbe_ioctl;
716 ifp->if_start = cxgbe_start;
717 ifp->if_transmit = cxgbe_transmit;
718 ifp->if_qflush = cxgbe_qflush;
720 ifp->if_snd.ifq_drv_maxlen = 1024;
721 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
722 IFQ_SET_READY(&ifp->if_snd);
724 ifp->if_capabilities = T4_CAP;
725 ifp->if_capenable = T4_CAP_ENABLE;
726 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
728 /* Initialize ifmedia for this port */
729 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
733 ether_ifattach(ifp, pi->hw_addr);
736 device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
745 cxgbe_detach(device_t dev)
747 struct port_info *pi = device_get_softc(dev);
748 struct adapter *sc = pi->adapter;
751 /* Tell if_ioctl and if_init that the port is going away */
756 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
760 rc = cxgbe_uninit_synchronized(pi);
762 device_printf(dev, "port uninit failed: %d.\n", rc);
764 taskqueue_free(pi->tq);
766 ifmedia_removeall(&pi->media);
767 ether_ifdetach(pi->ifp);
772 wakeup_one(&sc->flags);
779 cxgbe_init(void *arg)
781 struct port_info *pi = arg;
782 struct adapter *sc = pi->adapter;
785 cxgbe_init_locked(pi); /* releases adapter lock */
786 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
790 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
792 int rc = 0, mtu, flags;
793 struct port_info *pi = ifp->if_softc;
794 struct adapter *sc = pi->adapter;
795 struct ifreq *ifr = (struct ifreq *)data;
801 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
809 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
813 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
814 t4_update_fl_bufsize(ifp);
816 rc = update_mac_settings(pi, XGMAC_MTU);
829 if (ifp->if_flags & IFF_UP) {
830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831 flags = pi->if_flags;
832 if ((ifp->if_flags ^ flags) &
833 (IFF_PROMISC | IFF_ALLMULTI)) {
839 rc = update_mac_settings(pi,
840 XGMAC_PROMISC | XGMAC_ALLMULTI);
845 rc = cxgbe_init_locked(pi);
846 pi->if_flags = ifp->if_flags;
847 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
848 rc = cxgbe_uninit_locked(pi);
852 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
856 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
858 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
862 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
864 rc = update_mac_settings(pi, XGMAC_MCADDRS);
872 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
876 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
877 if (mask & IFCAP_TXCSUM) {
878 ifp->if_capenable ^= IFCAP_TXCSUM;
879 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
881 if (IFCAP_TSO & ifp->if_capenable &&
882 !(IFCAP_TXCSUM & ifp->if_capenable)) {
883 ifp->if_capenable &= ~IFCAP_TSO;
884 ifp->if_hwassist &= ~CSUM_TSO;
886 "tso disabled due to -txcsum.\n");
889 if (mask & IFCAP_RXCSUM)
890 ifp->if_capenable ^= IFCAP_RXCSUM;
891 if (mask & IFCAP_TSO4) {
892 ifp->if_capenable ^= IFCAP_TSO4;
894 if (IFCAP_TSO & ifp->if_capenable) {
895 if (IFCAP_TXCSUM & ifp->if_capenable)
896 ifp->if_hwassist |= CSUM_TSO;
898 ifp->if_capenable &= ~IFCAP_TSO;
899 ifp->if_hwassist &= ~CSUM_TSO;
901 "enable txcsum first.\n");
905 ifp->if_hwassist &= ~CSUM_TSO;
907 if (mask & IFCAP_LRO) {
912 ifp->if_capenable ^= IFCAP_LRO;
913 for_each_rxq(pi, i, rxq) {
914 if (ifp->if_capenable & IFCAP_LRO)
915 rxq->flags |= RXQ_LRO_ENABLED;
917 rxq->flags &= ~RXQ_LRO_ENABLED;
921 #ifndef TCP_OFFLOAD_DISABLE
922 if (mask & IFCAP_TOE4) {
926 if (mask & IFCAP_VLAN_HWTAGGING) {
927 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
928 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
930 rc = update_mac_settings(pi, XGMAC_VLANEX);
934 if (mask & IFCAP_VLAN_MTU) {
935 ifp->if_capenable ^= IFCAP_VLAN_MTU;
937 /* Need to find out how to disable auto-mtu-inflation */
939 if (mask & IFCAP_VLAN_HWTSO)
940 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
941 if (mask & IFCAP_VLAN_HWCSUM)
942 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
944 #ifdef VLAN_CAPABILITIES
945 VLAN_CAPABILITIES(ifp);
952 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
956 rc = ether_ioctl(ifp, cmd, data);
963 cxgbe_start(struct ifnet *ifp)
965 struct port_info *pi = ifp->if_softc;
969 for_each_txq(pi, i, txq) {
970 if (TXQ_TRYLOCK(txq)) {
978 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
980 struct port_info *pi = ifp->if_softc;
981 struct adapter *sc = pi->adapter;
982 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
988 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
993 if (m->m_flags & M_FLOWID)
994 txq += (m->m_pkthdr.flowid % pi->ntxq);
997 if (TXQ_TRYLOCK(txq) == 0) {
999 * XXX: make sure that this packet really is sent out. There is
1000 * a small race where t4_eth_tx may stop draining the drbr and
1001 * goes away, just before we enqueued this mbuf.
1004 return (drbr_enqueue(ifp, br, m));
1008 * txq->m is the mbuf that is held up due to a temporary shortage of
1009 * resources and it should be put on the wire first. Then what's in
1010 * drbr and finally the mbuf that was just passed in to us.
1012 * Return code should indicate the fate of the mbuf that was passed in
1016 TXQ_LOCK_ASSERT_OWNED(txq);
1017 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1019 /* Queued for transmission. */
1021 rc = drbr_enqueue(ifp, br, m);
1022 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1023 (void) t4_eth_tx(ifp, txq, m);
1028 /* Direct transmission. */
1029 rc = t4_eth_tx(ifp, txq, m);
1030 if (rc != 0 && txq->m)
1031 rc = 0; /* held, will be transmitted soon (hopefully) */
1038 cxgbe_qflush(struct ifnet *ifp)
1040 struct port_info *pi = ifp->if_softc;
1042 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1046 cxgbe_media_change(struct ifnet *ifp)
1048 struct port_info *pi = ifp->if_softc;
1050 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1052 return (EOPNOTSUPP);
1056 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1058 struct port_info *pi = ifp->if_softc;
1059 struct ifmedia_entry *cur = pi->media.ifm_cur;
1060 int speed = pi->link_cfg.speed;
1061 int data = (pi->port_type << 8) | pi->mod_type;
1063 if (cur->ifm_data != data) {
1064 build_medialist(pi);
1065 cur = pi->media.ifm_cur;
1068 ifmr->ifm_status = IFM_AVALID;
1069 if (!pi->link_cfg.link_ok)
1072 ifmr->ifm_status |= IFM_ACTIVE;
1074 /* active and current will differ iff current media is autoselect. */
1075 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1078 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1079 if (speed == SPEED_10000)
1080 ifmr->ifm_active |= IFM_10G_T;
1081 else if (speed == SPEED_1000)
1082 ifmr->ifm_active |= IFM_1000_T;
1083 else if (speed == SPEED_100)
1084 ifmr->ifm_active |= IFM_100_TX;
1085 else if (speed == SPEED_10)
1086 ifmr->ifm_active |= IFM_10_T;
1088 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1093 t4_fatal_err(struct adapter *sc)
1095 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1096 t4_intr_disable(sc);
1097 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1098 device_get_nameunit(sc->dev));
1102 map_bars(struct adapter *sc)
1104 sc->regs_rid = PCIR_BAR(0);
1105 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1106 &sc->regs_rid, RF_ACTIVE);
1107 if (sc->regs_res == NULL) {
1108 device_printf(sc->dev, "cannot map registers.\n");
1111 sc->bt = rman_get_bustag(sc->regs_res);
1112 sc->bh = rman_get_bushandle(sc->regs_res);
1113 sc->mmio_len = rman_get_size(sc->regs_res);
1115 sc->msix_rid = PCIR_BAR(4);
1116 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1117 &sc->msix_rid, RF_ACTIVE);
1118 if (sc->msix_res == NULL) {
1119 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1127 setup_memwin(struct adapter *sc)
1131 bar0 = rman_get_start(sc->regs_res);
1133 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1134 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1135 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1137 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1138 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1139 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1141 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1142 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1143 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1147 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1148 struct intrs_and_queues *iaq)
1150 int rc, itype, navail, nc, nrxq10g, nrxq1g;
1152 bzero(iaq, sizeof(*iaq));
1153 nc = mp_ncpus; /* our snapshot of the number of CPUs */
1155 for (itype = INTR_MSIX; itype; itype >>= 1) {
1157 if ((itype & intr_types) == 0)
1158 continue; /* not allowed */
1160 if (itype == INTR_MSIX)
1161 navail = pci_msix_count(sc->dev);
1162 else if (itype == INTR_MSI)
1163 navail = pci_msi_count(sc->dev);
1170 iaq->intr_type = itype;
1172 iaq->ntxq10g = min(nc, max_ntxq_10g);
1173 iaq->ntxq1g = min(nc, max_ntxq_1g);
1175 nrxq10g = min(nc, max_nrxq_10g);
1176 nrxq1g = min(nc, max_nrxq_1g);
1178 /* Extra 2 is for a) error interrupt b) firmware event */
1179 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + 2;
1180 if (iaq->nirq <= navail && intr_fwd == 0) {
1182 if (itype == INTR_MSI && !powerof2(iaq->nirq))
1185 /* One for err, one for fwq, and one for each rxq */
1188 iaq->nrxq10g = nrxq10g;
1189 iaq->nrxq1g = nrxq1g;
1196 if (itype == INTR_MSIX)
1199 /* navail is and must remain a pow2 for MSI */
1200 if (itype == INTR_MSI) {
1201 KASSERT(powerof2(navail),
1202 ("%d not power of 2", navail));
1204 while (navail / 2 > nc)
1208 iaq->nirq = navail; /* total # of interrupts */
1211 * If we have multiple vectors available reserve one
1212 * exclusively for errors. The rest will be shared by
1217 iaq->nrxq10g = min(nrxq10g, navail);
1218 iaq->nrxq1g = min(nrxq1g, navail);
1223 if (itype == INTR_MSIX)
1224 rc = pci_alloc_msix(sc->dev, &navail);
1225 else if (itype == INTR_MSI)
1226 rc = pci_alloc_msi(sc->dev, &navail);
1229 if (navail == iaq->nirq)
1233 * Didn't get the number requested. Use whatever number
1234 * the kernel is willing to allocate (it's in navail).
1236 pci_release_msi(sc->dev);
1240 device_printf(sc->dev,
1241 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1242 itype, rc, iaq->nirq, navail);
1245 device_printf(sc->dev,
1246 "failed to find a usable interrupt type. "
1247 "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1248 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1254 * Install a compatible firmware (if required), establish contact with it,
1255 * become the master, and reset the device.
1258 prep_firmware(struct adapter *sc)
1260 const struct firmware *fw;
1262 enum dev_state state;
1264 /* Check firmware version and install a different one if necessary */
1265 rc = t4_check_fw_version(sc);
1266 if (rc != 0 || force_firmware_install) {
1269 fw = firmware_get(T4_FWNAME);
1271 const struct fw_hdr *hdr = (const void *)fw->data;
1273 v = ntohl(hdr->fw_ver);
1276 * The firmware module will not be used if it isn't the
1277 * same major version as what the driver was compiled
1278 * with. This check trumps force_firmware_install.
1280 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1281 device_printf(sc->dev,
1282 "Found firmware image but version %d "
1283 "can not be used with this driver (%d)\n",
1284 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1286 firmware_put(fw, FIRMWARE_UNLOAD);
1291 if (fw == NULL && (rc < 0 || force_firmware_install)) {
1292 device_printf(sc->dev, "No usable firmware. "
1293 "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1294 "force_firmware_install%s set",
1295 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1296 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1297 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1298 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1300 force_firmware_install ? "" : " not");
1305 * Always upgrade, even for minor/micro/build mismatches.
1306 * Downgrade only for a major version mismatch or if
1307 * force_firmware_install was specified.
1309 if (fw != NULL && (rc < 0 || force_firmware_install ||
1310 v > sc->params.fw_vers)) {
1311 device_printf(sc->dev,
1312 "installing firmware %d.%d.%d.%d on card.\n",
1313 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1314 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1316 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1318 device_printf(sc->dev,
1319 "failed to install firmware: %d\n", rc);
1320 firmware_put(fw, FIRMWARE_UNLOAD);
1324 (void) t4_check_fw_version(sc);
1329 firmware_put(fw, FIRMWARE_UNLOAD);
1332 /* Contact firmware, request master */
1333 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1336 device_printf(sc->dev,
1337 "failed to connect to the firmware: %d.\n", rc);
1342 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1344 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1345 if (rc != ETIMEDOUT && rc != EIO)
1346 t4_fw_bye(sc, sc->mbox);
1350 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1351 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1352 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1353 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1354 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1361 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1365 bzero(caps, sizeof(*caps));
1366 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1367 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1368 caps->retval_len16 = htobe32(FW_LEN16(*caps));
1370 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1374 if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1375 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1377 caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1378 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1379 rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1385 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1388 uint32_t params[7], val[7];
1390 #define FW_PARAM_DEV(param) \
1391 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1392 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1393 #define FW_PARAM_PFVF(param) \
1394 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1395 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1397 params[0] = FW_PARAM_DEV(PORTVEC);
1398 params[1] = FW_PARAM_PFVF(IQFLINT_START);
1399 params[2] = FW_PARAM_PFVF(EQ_START);
1400 params[3] = FW_PARAM_PFVF(FILTER_START);
1401 params[4] = FW_PARAM_PFVF(FILTER_END);
1402 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1404 device_printf(sc->dev,
1405 "failed to query parameters: %d.\n", rc);
1409 sc->params.portvec = val[0];
1410 sc->params.nports = 0;
1412 sc->params.nports++;
1413 val[0] &= val[0] - 1;
1416 sc->sge.iq_start = val[1];
1417 sc->sge.eq_start = val[2];
1418 sc->tids.ftid_base = val[3];
1419 sc->tids.nftids = val[4] - val[3] + 1;
1421 if (caps->toecaps) {
1422 /* query offload-related parameters */
1423 params[0] = FW_PARAM_DEV(NTID);
1424 params[1] = FW_PARAM_PFVF(SERVER_START);
1425 params[2] = FW_PARAM_PFVF(SERVER_END);
1426 params[3] = FW_PARAM_PFVF(TDDP_START);
1427 params[4] = FW_PARAM_PFVF(TDDP_END);
1428 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1429 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1431 device_printf(sc->dev,
1432 "failed to query TOE parameters: %d.\n", rc);
1435 sc->tids.ntids = val[0];
1436 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1437 sc->tids.stid_base = val[1];
1438 sc->tids.nstids = val[2] - val[1] + 1;
1439 sc->vres.ddp.start = val[3];
1440 sc->vres.ddp.size = val[4] - val[3] + 1;
1441 sc->params.ofldq_wr_cred = val[5];
1442 sc->params.offload = 1;
1444 if (caps->rdmacaps) {
1445 params[0] = FW_PARAM_PFVF(STAG_START);
1446 params[1] = FW_PARAM_PFVF(STAG_END);
1447 params[2] = FW_PARAM_PFVF(RQ_START);
1448 params[3] = FW_PARAM_PFVF(RQ_END);
1449 params[4] = FW_PARAM_PFVF(PBL_START);
1450 params[5] = FW_PARAM_PFVF(PBL_END);
1451 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1453 device_printf(sc->dev,
1454 "failed to query RDMA parameters: %d.\n", rc);
1457 sc->vres.stag.start = val[0];
1458 sc->vres.stag.size = val[1] - val[0] + 1;
1459 sc->vres.rq.start = val[2];
1460 sc->vres.rq.size = val[3] - val[2] + 1;
1461 sc->vres.pbl.start = val[4];
1462 sc->vres.pbl.size = val[5] - val[4] + 1;
1464 if (caps->iscsicaps) {
1465 params[0] = FW_PARAM_PFVF(ISCSI_START);
1466 params[1] = FW_PARAM_PFVF(ISCSI_END);
1467 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1469 device_printf(sc->dev,
1470 "failed to query iSCSI parameters: %d.\n", rc);
1473 sc->vres.iscsi.start = val[0];
1474 sc->vres.iscsi.size = val[1] - val[0] + 1;
1476 #undef FW_PARAM_PFVF
1484 t4_set_desc(struct adapter *sc)
1487 struct adapter_params *p = &sc->params;
1489 snprintf(buf, sizeof(buf),
1490 "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1491 p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1492 p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1493 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1495 device_set_desc_copy(sc->dev, buf);
1499 build_medialist(struct port_info *pi)
1501 struct ifmedia *media = &pi->media;
1506 ifmedia_removeall(media);
1508 m = IFM_ETHER | IFM_FDX;
1509 data = (pi->port_type << 8) | pi->mod_type;
1511 switch(pi->port_type) {
1512 case FW_PORT_TYPE_BT_XFI:
1513 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1516 case FW_PORT_TYPE_BT_XAUI:
1517 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1520 case FW_PORT_TYPE_BT_SGMII:
1521 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1522 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1523 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1524 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1527 case FW_PORT_TYPE_CX4:
1528 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1529 ifmedia_set(media, m | IFM_10G_CX4);
1532 case FW_PORT_TYPE_SFP:
1533 case FW_PORT_TYPE_FIBER_XFI:
1534 case FW_PORT_TYPE_FIBER_XAUI:
1535 switch (pi->mod_type) {
1537 case FW_PORT_MOD_TYPE_LR:
1538 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1539 ifmedia_set(media, m | IFM_10G_LR);
1542 case FW_PORT_MOD_TYPE_SR:
1543 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1544 ifmedia_set(media, m | IFM_10G_SR);
1547 case FW_PORT_MOD_TYPE_LRM:
1548 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1549 ifmedia_set(media, m | IFM_10G_LRM);
1552 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1553 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1554 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
1555 ifmedia_set(media, m | IFM_10G_TWINAX);
1558 case FW_PORT_MOD_TYPE_NONE:
1560 ifmedia_add(media, m | IFM_NONE, data, NULL);
1561 ifmedia_set(media, m | IFM_NONE);
1564 case FW_PORT_MOD_TYPE_NA:
1565 case FW_PORT_MOD_TYPE_ER:
1567 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1568 ifmedia_set(media, m | IFM_UNKNOWN);
1573 case FW_PORT_TYPE_KX4:
1574 case FW_PORT_TYPE_KX:
1575 case FW_PORT_TYPE_KR:
1577 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1578 ifmedia_set(media, m | IFM_UNKNOWN);
1586 * Program the port's XGMAC based on parameters in ifnet. The caller also
1587 * indicates which parameters should be programmed (the rest are left alone).
1590 update_mac_settings(struct port_info *pi, int flags)
1593 struct ifnet *ifp = pi->ifp;
1594 struct adapter *sc = pi->adapter;
1595 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
1597 PORT_LOCK_ASSERT_OWNED(pi);
1598 KASSERT(flags, ("%s: not told what to update.", __func__));
1600 if (flags & XGMAC_MTU)
1603 if (flags & XGMAC_PROMISC)
1604 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
1606 if (flags & XGMAC_ALLMULTI)
1607 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
1609 if (flags & XGMAC_VLANEX)
1610 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
1612 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
1615 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
1619 if (flags & XGMAC_UCADDR) {
1620 uint8_t ucaddr[ETHER_ADDR_LEN];
1622 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
1623 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
1624 ucaddr, true, true);
1627 if_printf(ifp, "change_mac failed: %d\n", rc);
1630 pi->xact_addr_filt = rc;
1635 if (flags & XGMAC_MCADDRS) {
1636 const uint8_t *mcaddr;
1639 struct ifmultiaddr *ifma;
1641 if_maddr_rlock(ifp);
1642 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1643 if (ifma->ifma_addr->sa_family != AF_LINK)
1645 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1647 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
1648 &mcaddr, NULL, &hash, 0);
1651 if_printf(ifp, "failed to add mc address"
1652 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
1653 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
1654 mcaddr[4], mcaddr[5], rc);
1660 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
1662 if_printf(ifp, "failed to set mc address hash: %d", rc);
1664 if_maddr_runlock(ifp);
1671 cxgbe_init_locked(struct port_info *pi)
1673 struct adapter *sc = pi->adapter;
1676 ADAPTER_LOCK_ASSERT_OWNED(sc);
1678 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1679 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
1684 if (IS_DOOMED(pi)) {
1688 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1690 /* Give up the adapter lock, port init code can sleep. */
1694 rc = cxgbe_init_synchronized(pi);
1698 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1700 wakeup_one(&sc->flags);
1706 cxgbe_init_synchronized(struct port_info *pi)
1708 struct adapter *sc = pi->adapter;
1709 struct ifnet *ifp = pi->ifp;
1712 struct sge_rxq *rxq;
1714 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1716 if (isset(&sc->open_device_map, pi->port_id)) {
1717 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1718 ("mismatch between open_device_map and if_drv_flags"));
1719 return (0); /* already running */
1722 if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
1723 return (rc); /* error message displayed already */
1726 * Allocate tx/rx/fl queues for this port.
1728 rc = t4_setup_eth_queues(pi);
1730 goto done; /* error message displayed already */
1733 * Setup RSS for this port.
1735 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1736 for_each_rxq(pi, i, rxq) {
1737 rss[i] = rxq->iq.abs_id;
1739 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1743 if_printf(ifp, "rss_config failed: %d\n", rc);
1748 rc = update_mac_settings(pi, XGMAC_ALL);
1751 goto done; /* error message displayed already */
1753 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1755 if_printf(ifp, "start_link failed: %d\n", rc);
1759 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1761 if_printf(ifp, "enable_vi failed: %d\n", rc);
1764 pi->flags |= VI_ENABLED;
1767 setbit(&sc->open_device_map, pi->port_id);
1768 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1769 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1771 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1774 cxgbe_uninit_synchronized(pi);
1780 cxgbe_uninit_locked(struct port_info *pi)
1782 struct adapter *sc = pi->adapter;
1785 ADAPTER_LOCK_ASSERT_OWNED(sc);
1787 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1788 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
1793 if (IS_DOOMED(pi)) {
1797 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1801 rc = cxgbe_uninit_synchronized(pi);
1804 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1806 wakeup_one(&sc->flags);
1816 cxgbe_uninit_synchronized(struct port_info *pi)
1818 struct adapter *sc = pi->adapter;
1819 struct ifnet *ifp = pi->ifp;
1823 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1825 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1828 * Clear this port's bit from the open device map, and then drain
1829 * tasks and callouts.
1831 clrbit(&sc->open_device_map, pi->port_id);
1834 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1835 callout_stop(&pi->tick);
1837 callout_drain(&pi->tick);
1840 * Stop and then free the queues' resources, including the queues
1843 * XXX: we could just stop the queues here (on ifconfig down) and free
1844 * them later (on port detach), but having up/down go through the entire
1845 * allocate/activate/deactivate/free sequence is a good way to find
1848 rc = t4_teardown_eth_queues(pi);
1850 if_printf(ifp, "teardown failed: %d\n", rc);
1852 if (pi->flags & VI_ENABLED) {
1853 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1855 if_printf(ifp, "disable_vi failed: %d\n", rc);
1857 pi->flags &= ~VI_ENABLED;
1860 pi->link_cfg.link_ok = 0;
1861 pi->link_cfg.speed = 0;
1862 t4_os_link_changed(sc, pi->port_id, 0);
1864 if (sc->open_device_map == 0)
1870 #define T4_ALLOC_IRQ(sc, irqid, rid, handler, arg, name) do { \
1871 rc = t4_alloc_irq(sc, &sc->irq[irqid], rid, handler, arg, name); \
1876 first_port_up(struct adapter *sc)
1881 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1884 * The firmware event queue and the optional forwarded interrupt queues.
1886 rc = t4_setup_adapter_iqs(sc);
1893 if (sc->intr_count == 1) {
1894 KASSERT(sc->flags & INTR_FWD,
1895 ("%s: single interrupt but not forwarded?", __func__));
1896 T4_ALLOC_IRQ(sc, 0, 0, t4_intr_all, sc, "all");
1898 /* Multiple interrupts. The first one is always error intr */
1899 T4_ALLOC_IRQ(sc, 0, 1, t4_intr_err, sc, "err");
1901 if (sc->flags & INTR_FWD) {
1902 /* The rest are shared by the fwq and all data intr */
1903 for (i = 1; i < sc->intr_count; i++) {
1904 snprintf(name, sizeof(name), "mux%d", i - 1);
1905 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_fwd,
1906 &sc->sge.fiq[i - 1], name);
1909 struct port_info *pi;
1912 T4_ALLOC_IRQ(sc, 1, 2, t4_intr_evt, &sc->sge.fwq,
1917 for (i = 2; i < sc->intr_count; i++) {
1918 snprintf(name, sizeof(name), "p%dq%d", p, q);
1919 if (++q >= pi->nrxq) {
1924 T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_data,
1925 &sc->sge.rxq[i - 2], name);
1931 sc->flags |= FULL_INIT_DONE;
1945 last_port_down(struct adapter *sc)
1949 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1951 t4_intr_disable(sc);
1953 t4_teardown_adapter_iqs(sc);
1955 for (i = 0; i < sc->intr_count; i++)
1956 t4_free_irq(sc, &sc->irq[i]);
1958 sc->flags &= ~FULL_INIT_DONE;
1964 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
1965 iq_intr_handler_t *handler, void *arg, char *name)
1970 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
1971 RF_SHAREABLE | RF_ACTIVE);
1972 if (irq->res == NULL) {
1973 device_printf(sc->dev,
1974 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
1978 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
1979 NULL, handler, arg, &irq->tag);
1981 device_printf(sc->dev,
1982 "failed to setup interrupt for rid %d, name %s: %d\n",
1985 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
1991 t4_free_irq(struct adapter *sc, struct irq *irq)
1994 bus_teardown_intr(sc->dev, irq->res, irq->tag);
1996 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
1998 bzero(irq, sizeof(*irq));
2004 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2007 uint32_t *p = (uint32_t *)(buf + start);
2009 for ( ; start <= end; start += sizeof(uint32_t))
2010 *p++ = t4_read_reg(sc, start);
2014 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2017 static const unsigned int reg_ranges[] = {
2235 regs->version = 4 | (sc->params.rev << 10);
2236 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2237 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2241 cxgbe_tick(void *arg)
2243 struct port_info *pi = arg;
2244 struct ifnet *ifp = pi->ifp;
2245 struct sge_txq *txq;
2247 struct port_stats *s = &pi->stats;
2250 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2252 return; /* without scheduling another callout */
2255 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2257 ifp->if_opackets = s->tx_frames;
2258 ifp->if_ipackets = s->rx_frames;
2259 ifp->if_obytes = s->tx_octets;
2260 ifp->if_ibytes = s->rx_octets;
2261 ifp->if_omcasts = s->tx_mcast_frames;
2262 ifp->if_imcasts = s->rx_mcast_frames;
2263 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2267 for_each_txq(pi, i, txq)
2268 drops += txq->eq.br->br_drops;
2269 ifp->if_snd.ifq_drops = drops;
2271 ifp->if_oerrors = s->tx_error_frames;
2272 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2273 s->rx_fcs_err + s->rx_len_err;
2275 callout_schedule(&pi->tick, hz);
2280 t4_sysctls(struct adapter *sc)
2282 struct sysctl_ctx_list *ctx;
2283 struct sysctl_oid *oid;
2284 struct sysctl_oid_list *children;
2286 ctx = device_get_sysctl_ctx(sc->dev);
2287 oid = device_get_sysctl_tree(sc->dev);
2288 children = SYSCTL_CHILDREN(oid);
2290 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2291 &sc->params.nports, 0, "# of ports");
2293 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2294 &sc->params.rev, 0, "chip hardware revision");
2296 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2297 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2299 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2300 &sc->params.offload, 0, "hardware is capable of TCP offload");
2302 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2303 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2305 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2306 CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2307 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2309 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2310 CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2311 sysctl_int_array, "A", "interrupt holdoff packet counter values");
2317 cxgbe_sysctls(struct port_info *pi)
2319 struct sysctl_ctx_list *ctx;
2320 struct sysctl_oid *oid;
2321 struct sysctl_oid_list *children;
2323 ctx = device_get_sysctl_ctx(pi->dev);
2328 oid = device_get_sysctl_tree(pi->dev);
2329 children = SYSCTL_CHILDREN(oid);
2331 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
2332 &pi->nrxq, 0, "# of rx queues");
2333 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2334 &pi->ntxq, 0, "# of tx queues");
2335 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2336 &pi->first_rxq, 0, "index of first rx queue");
2337 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2338 &pi->first_txq, 0, "index of first tx queue");
2340 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2341 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2342 "holdoff timer index");
2343 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2344 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2345 "holdoff packet counter index");
2347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
2348 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
2350 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
2351 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
2355 * dev.cxgbe.X.stats.
2357 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2358 NULL, "port statistics");
2359 children = SYSCTL_CHILDREN(oid);
2361 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
2362 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
2363 CTLTYPE_QUAD | CTLFLAG_RD, pi->adapter, reg, \
2364 sysctl_handle_t4_reg64, "QU", desc)
2366 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
2367 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
2368 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
2369 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
2370 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
2371 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
2372 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
2373 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
2374 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
2375 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
2376 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
2377 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
2378 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
2379 "# of tx frames in this range",
2380 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
2381 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
2382 "# of tx frames in this range",
2383 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
2384 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
2385 "# of tx frames in this range",
2386 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
2387 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
2388 "# of tx frames in this range",
2389 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
2390 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
2391 "# of tx frames in this range",
2392 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
2393 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
2394 "# of tx frames in this range",
2395 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
2396 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
2397 "# of tx frames in this range",
2398 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
2399 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
2400 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
2401 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
2402 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
2403 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
2404 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
2405 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
2406 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
2407 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
2408 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
2409 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
2410 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
2411 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
2412 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
2413 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
2414 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
2415 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
2416 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
2417 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
2418 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
2420 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
2421 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
2422 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
2423 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
2424 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
2425 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
2426 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
2427 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
2428 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
2429 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
2430 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
2431 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
2432 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
2433 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
2434 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
2435 "# of frames received with bad FCS",
2436 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
2437 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
2438 "# of frames received with length error",
2439 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
2440 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
2441 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
2442 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
2443 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
2444 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
2445 "# of rx frames in this range",
2446 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
2447 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
2448 "# of rx frames in this range",
2449 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
2450 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
2451 "# of rx frames in this range",
2452 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
2453 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
2454 "# of rx frames in this range",
2455 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
2456 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
2457 "# of rx frames in this range",
2458 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
2459 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
2460 "# of rx frames in this range",
2461 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
2462 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
2463 "# of rx frames in this range",
2464 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
2465 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
2466 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
2467 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
2468 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
2469 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
2470 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
2471 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
2472 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
2473 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
2474 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
2475 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
2476 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
2477 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
2478 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
2479 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
2480 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
2481 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
2482 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
2484 #undef SYSCTL_ADD_T4_REG64
2486 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
2487 SYSCTL_ADD_QUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
2488 &pi->stats.name, desc)
2490 /* We get these from port_stats and they may be stale by upto 1s */
2491 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
2492 "# drops due to buffer-group 0 overflows");
2493 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
2494 "# drops due to buffer-group 1 overflows");
2495 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
2496 "# drops due to buffer-group 2 overflows");
2497 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
2498 "# drops due to buffer-group 3 overflows");
2499 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
2500 "# of buffer-group 0 truncated packets");
2501 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
2502 "# of buffer-group 1 truncated packets");
2503 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
2504 "# of buffer-group 2 truncated packets");
2505 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
2506 "# of buffer-group 3 truncated packets");
2508 #undef SYSCTL_ADD_T4_PORTSTAT
2514 sysctl_int_array(SYSCTL_HANDLER_ARGS)
2519 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
2520 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
2521 sbuf_printf(&sb, "%d ", *i);
2524 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2530 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2532 struct port_info *pi = arg1;
2533 struct adapter *sc = pi->adapter;
2534 struct sge_rxq *rxq;
2539 rc = sysctl_handle_int(oidp, &idx, 0, req);
2540 if (rc != 0 || req->newptr == NULL)
2543 if (idx < 0 || idx >= SGE_NTIMERS)
2547 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2549 for_each_rxq(pi, i, rxq) {
2550 rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2551 V_QINTR_CNT_EN(pi->pktc_idx != -1);
2561 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
2563 struct port_info *pi = arg1;
2564 struct adapter *sc = pi->adapter;
2569 rc = sysctl_handle_int(oidp, &idx, 0, req);
2570 if (rc != 0 || req->newptr == NULL)
2573 if (idx < -1 || idx >= SGE_NCOUNTERS)
2577 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2578 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2579 rc = EBUSY; /* can be changed only when port is down */
2589 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
2591 struct port_info *pi = arg1;
2592 struct adapter *sc = pi->adapter;
2595 qsize = pi->qsize_rxq;
2597 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2598 if (rc != 0 || req->newptr == NULL)
2601 if (qsize < 128 || (qsize & 7))
2605 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2606 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2607 rc = EBUSY; /* can be changed only when port is down */
2610 pi->qsize_rxq = qsize;
2617 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
2619 struct port_info *pi = arg1;
2620 struct adapter *sc = pi->adapter;
2623 qsize = pi->qsize_txq;
2625 rc = sysctl_handle_int(oidp, &qsize, 0, req);
2626 if (rc != 0 || req->newptr == NULL)
2633 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2634 if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2635 rc = EBUSY; /* can be changed only when port is down */
2638 pi->qsize_txq = qsize;
2645 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
2647 struct adapter *sc = arg1;
2651 val = t4_read_reg64(sc, reg);
2653 return (sysctl_handle_quad(oidp, &val, 0, req));
2657 txq_start(struct ifnet *ifp, struct sge_txq *txq)
2659 struct buf_ring *br;
2662 TXQ_LOCK_ASSERT_OWNED(txq);
2665 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2667 t4_eth_tx(ifp, txq, m);
2671 cxgbe_txq_start(void *arg, int count)
2673 struct sge_txq *txq = arg;
2676 txq_start(txq->ifp, txq);
2681 t4_os_find_pci_capability(struct adapter *sc, int cap)
2684 struct pci_devinfo *dinfo;
2690 dinfo = device_get_ivars(dev);
2693 status = pci_read_config(dev, PCIR_STATUS, 2);
2694 if (!(status & PCIM_STATUS_CAPPRESENT))
2697 switch (cfg->hdrtype & PCIM_HDRTYPE) {
2703 ptr = PCIR_CAP_PTR_2;
2709 ptr = pci_read_config(dev, ptr, 1);
2712 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
2714 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
2721 t4_os_pci_save_state(struct adapter *sc)
2724 struct pci_devinfo *dinfo;
2727 dinfo = device_get_ivars(dev);
2729 pci_cfg_save(dev, dinfo, 0);
2734 t4_os_pci_restore_state(struct adapter *sc)
2737 struct pci_devinfo *dinfo;
2740 dinfo = device_get_ivars(dev);
2742 pci_cfg_restore(dev, dinfo);
2747 t4_os_portmod_changed(const struct adapter *sc, int idx)
2749 struct port_info *pi = sc->port[idx];
2750 static const char *mod_str[] = {
2751 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2754 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2755 if_printf(pi->ifp, "transceiver unplugged.\n");
2756 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2757 if_printf(pi->ifp, "unknown transceiver inserted.\n");
2758 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2759 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
2760 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
2761 if_printf(pi->ifp, "%s transceiver inserted.\n",
2762 mod_str[pi->mod_type]);
2764 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
2770 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
2772 struct port_info *pi = sc->port[idx];
2773 struct ifnet *ifp = pi->ifp;
2776 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
2777 if_link_state_change(ifp, LINK_STATE_UP);
2779 if_link_state_change(ifp, LINK_STATE_DOWN);
2783 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
2789 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
2795 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
2799 struct adapter *sc = dev->si_drv1;
2801 rc = priv_check(td, PRIV_DRIVER);
2806 case CHELSIO_T4_GETREG32: {
2807 struct t4_reg32 *edata = (struct t4_reg32 *)data;
2808 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2810 edata->val = t4_read_reg(sc, edata->addr);
2813 case CHELSIO_T4_SETREG32: {
2814 struct t4_reg32 *edata = (struct t4_reg32 *)data;
2815 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2817 t4_write_reg(sc, edata->addr, edata->val);
2820 case CHELSIO_T4_REGDUMP: {
2821 struct t4_regdump *regs = (struct t4_regdump *)data;
2822 int reglen = T4_REGDUMP_SIZE;
2825 if (regs->len < reglen) {
2826 regs->len = reglen; /* hint to the caller */
2831 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
2832 t4_get_regs(sc, regs, buf);
2833 rc = copyout(buf, regs->data, reglen);
2845 t4_mod_event(module_t mod, int cmd, void *arg)
2848 if (cmd == MOD_LOAD)
2854 static devclass_t t4_devclass;
2855 static devclass_t cxgbe_devclass;
2857 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
2858 MODULE_VERSION(t4nex, 1);
2860 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
2861 MODULE_VERSION(cxgbe, 1);