2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
36 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/pciio.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pci_private.h>
46 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
54 #include <net/if_types.h>
55 #include <net/if_dl.h>
56 #include <net/if_vlan_var.h>
58 #include "common/common.h"
59 #include "common/t4_msg.h"
60 #include "common/t4_regs.h"
61 #include "common/t4_regs_values.h"
65 /* T4 bus driver interface */
66 static int t4_probe(device_t);
67 static int t4_attach(device_t);
68 static int t4_detach(device_t);
69 static device_method_t t4_methods[] = {
70 DEVMETHOD(device_probe, t4_probe),
71 DEVMETHOD(device_attach, t4_attach),
72 DEVMETHOD(device_detach, t4_detach),
76 static driver_t t4_driver = {
79 sizeof(struct adapter)
83 /* T4 port (cxgbe) interface */
84 static int cxgbe_probe(device_t);
85 static int cxgbe_attach(device_t);
86 static int cxgbe_detach(device_t);
87 static device_method_t cxgbe_methods[] = {
88 DEVMETHOD(device_probe, cxgbe_probe),
89 DEVMETHOD(device_attach, cxgbe_attach),
90 DEVMETHOD(device_detach, cxgbe_detach),
93 static driver_t cxgbe_driver = {
96 sizeof(struct port_info)
99 static d_ioctl_t t4_ioctl;
100 static d_open_t t4_open;
101 static d_close_t t4_close;
103 static struct cdevsw t4_cdevsw = {
104 .d_version = D_VERSION,
112 /* ifnet + media interface */
113 static void cxgbe_init(void *);
114 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
115 static void cxgbe_start(struct ifnet *);
116 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
117 static void cxgbe_qflush(struct ifnet *);
118 static int cxgbe_media_change(struct ifnet *);
119 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
121 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
123 static struct mtx t4_list_lock;
124 static SLIST_HEAD(, adapter) t4_list;
125 #ifndef TCP_OFFLOAD_DISABLE
126 static struct mtx t4_uld_list_lock;
127 static SLIST_HEAD(, uld_info) t4_uld_list;
131 * Tunables. See tweak_tunables() too.
135 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
138 static int t4_ntxq10g = -1;
139 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
142 static int t4_nrxq10g = -1;
143 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
146 static int t4_ntxq1g = -1;
147 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
150 static int t4_nrxq1g = -1;
151 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
153 #ifndef TCP_OFFLOAD_DISABLE
154 #define NOFLDTXQ_10G 8
155 static int t4_nofldtxq10g = -1;
156 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
158 #define NOFLDRXQ_10G 2
159 static int t4_nofldrxq10g = -1;
160 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
162 #define NOFLDTXQ_1G 2
163 static int t4_nofldtxq1g = -1;
164 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
166 #define NOFLDRXQ_1G 1
167 static int t4_nofldrxq1g = -1;
168 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
172 * Holdoff parameters for 10G and 1G ports.
174 #define TMR_IDX_10G 1
175 static int t4_tmr_idx_10g = TMR_IDX_10G;
176 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
178 #define PKTC_IDX_10G 2
179 static int t4_pktc_idx_10g = PKTC_IDX_10G;
180 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
183 static int t4_tmr_idx_1g = TMR_IDX_1G;
184 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
186 #define PKTC_IDX_1G 2
187 static int t4_pktc_idx_1g = PKTC_IDX_1G;
188 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
191 * Size (# of entries) of each tx and rx queue.
193 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
194 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
196 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
197 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
200 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
202 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
203 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
206 * Configuration file.
208 static char t4_cfg_file[32] = "default";
209 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
212 * ASIC features that will be used. Disable the ones you don't want so that the
213 * chip resources aren't wasted on features that will not be used.
215 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
216 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
218 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
219 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
221 static int t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
222 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
224 static int t4_rdmacaps_allowed = 0;
225 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
227 static int t4_iscsicaps_allowed = 0;
228 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
230 static int t4_fcoecaps_allowed = 0;
231 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
233 struct intrs_and_queues {
234 int intr_type; /* INTx, MSI, or MSI-X */
235 int nirq; /* Number of vectors */
237 int ntxq10g; /* # of NIC txq's for each 10G port */
238 int nrxq10g; /* # of NIC rxq's for each 10G port */
239 int ntxq1g; /* # of NIC txq's for each 1G port */
240 int nrxq1g; /* # of NIC rxq's for each 1G port */
241 #ifndef TCP_OFFLOAD_DISABLE
242 int nofldtxq10g; /* # of TOE txq's for each 10G port */
243 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
244 int nofldtxq1g; /* # of TOE txq's for each 1G port */
245 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
249 struct filter_entry {
250 uint32_t valid:1; /* filter allocated and valid */
251 uint32_t locked:1; /* filter is administratively locked */
252 uint32_t pending:1; /* filter action is pending firmware reply */
253 uint32_t smtidx:8; /* Source MAC Table index for smac */
254 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
256 struct t4_filter_specification fs;
260 XGMAC_MTU = (1 << 0),
261 XGMAC_PROMISC = (1 << 1),
262 XGMAC_ALLMULTI = (1 << 2),
263 XGMAC_VLANEX = (1 << 3),
264 XGMAC_UCADDR = (1 << 4),
265 XGMAC_MCADDRS = (1 << 5),
270 static int map_bars(struct adapter *);
271 static void setup_memwin(struct adapter *);
272 static int cfg_itype_and_nqueues(struct adapter *, int, int,
273 struct intrs_and_queues *);
274 static int prep_firmware(struct adapter *);
275 static int upload_config_file(struct adapter *, const struct firmware *,
276 uint32_t *, uint32_t *);
277 static int partition_resources(struct adapter *, const struct firmware *);
278 static int get_params__pre_init(struct adapter *);
279 static int get_params__post_init(struct adapter *);
280 static void t4_set_desc(struct adapter *);
281 static void build_medialist(struct port_info *);
282 static int update_mac_settings(struct port_info *, int);
283 static int cxgbe_init_locked(struct port_info *);
284 static int cxgbe_init_synchronized(struct port_info *);
285 static int cxgbe_uninit_locked(struct port_info *);
286 static int cxgbe_uninit_synchronized(struct port_info *);
287 static int adapter_full_init(struct adapter *);
288 static int adapter_full_uninit(struct adapter *);
289 static int port_full_init(struct port_info *);
290 static int port_full_uninit(struct port_info *);
291 static void quiesce_eq(struct adapter *, struct sge_eq *);
292 static void quiesce_iq(struct adapter *, struct sge_iq *);
293 static void quiesce_fl(struct adapter *, struct sge_fl *);
294 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
295 driver_intr_t *, void *, char *);
296 static int t4_free_irq(struct adapter *, struct irq *);
297 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
299 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
300 static void cxgbe_tick(void *);
301 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
303 static int t4_sysctls(struct adapter *);
304 static int cxgbe_sysctls(struct port_info *);
305 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
306 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
307 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
308 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
309 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
310 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
311 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
313 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
314 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
315 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
316 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
317 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
318 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
319 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
320 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
321 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
322 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
323 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
324 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
325 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
326 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
327 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
329 static inline void txq_start(struct ifnet *, struct sge_txq *);
330 static uint32_t fconf_to_mode(uint32_t);
331 static uint32_t mode_to_fconf(uint32_t);
332 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
333 static int get_filter_mode(struct adapter *, uint32_t *);
334 static int set_filter_mode(struct adapter *, uint32_t);
335 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
336 static int get_filter(struct adapter *, struct t4_filter *);
337 static int set_filter(struct adapter *, struct t4_filter *);
338 static int del_filter(struct adapter *, struct t4_filter *);
339 static void clear_filter(struct filter_entry *);
340 static int set_filter_wr(struct adapter *, int);
341 static int del_filter_wr(struct adapter *, int);
342 static int filter_rpl(struct sge_iq *, const struct rss_header *,
344 static int get_sge_context(struct adapter *, struct t4_sge_context *);
345 static int read_card_mem(struct adapter *, struct t4_mem_range *);
346 #ifndef TCP_OFFLOAD_DISABLE
347 static int toe_capability(struct port_info *, int);
348 static int activate_uld(struct adapter *, int, struct uld_softc *);
349 static int deactivate_uld(struct uld_softc *);
351 static int t4_mod_event(module_t, int, void *);
358 {0xa000, 0, "Chelsio Terminator 4 FPGA"},
359 {0x4400, 4, "Chelsio T440-dbg"},
360 {0x4401, 4, "Chelsio T420-CR"},
361 {0x4402, 4, "Chelsio T422-CR"},
362 {0x4403, 4, "Chelsio T440-CR"},
363 {0x4404, 4, "Chelsio T420-BCH"},
364 {0x4405, 4, "Chelsio T440-BCH"},
365 {0x4406, 4, "Chelsio T440-CH"},
366 {0x4407, 4, "Chelsio T420-SO"},
367 {0x4408, 4, "Chelsio T420-CX"},
368 {0x4409, 4, "Chelsio T420-BT"},
369 {0x440a, 4, "Chelsio T404-BT"},
372 #ifndef TCP_OFFLOAD_DISABLE
373 /* This is used in service_iq() to get to the fl associated with an iq. */
374 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
378 t4_probe(device_t dev)
381 uint16_t v = pci_get_vendor(dev);
382 uint16_t d = pci_get_device(dev);
384 if (v != PCI_VENDOR_ID_CHELSIO)
387 for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
388 if (d == t4_pciids[i].device &&
389 pci_get_function(dev) == t4_pciids[i].mpf) {
390 device_set_desc(dev, t4_pciids[i].desc);
391 return (BUS_PROBE_DEFAULT);
399 t4_attach(device_t dev)
402 int rc = 0, i, n10g, n1g, rqidx, tqidx;
403 struct intrs_and_queues iaq;
405 #ifndef TCP_OFFLOAD_DISABLE
406 int ofld_rqidx, ofld_tqidx;
409 sc = device_get_softc(dev);
411 sc->pf = pci_get_function(dev);
414 pci_enable_busmaster(dev);
415 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
418 pci_set_max_read_req(dev, 4096);
419 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
420 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
421 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
424 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
425 device_get_nameunit(dev));
426 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
427 mtx_lock(&t4_list_lock);
428 SLIST_INSERT_HEAD(&t4_list, sc, link);
429 mtx_unlock(&t4_list_lock);
431 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
432 TAILQ_INIT(&sc->sfl);
433 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
437 goto done; /* error message displayed already */
439 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
440 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++)
441 sc->cpl_handler[i] = cpl_not_handled;
442 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, filter_rpl);
444 /* Prepare the adapter for operation */
445 rc = -t4_prep_adapter(sc);
447 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
452 * Do this really early, with the memory windows set up even before the
453 * character device. The userland tool's register i/o and mem read
454 * will work even in "recovery mode".
457 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
458 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
459 sc->cdev->si_drv1 = sc;
461 /* Go no further if recovery mode has been requested. */
462 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
463 device_printf(dev, "recovery mode.\n");
467 /* Prepare the firmware for operation */
468 rc = prep_firmware(sc);
470 goto done; /* error message displayed already */
472 rc = get_params__pre_init(sc);
474 goto done; /* error message displayed already */
476 rc = t4_sge_init(sc);
478 goto done; /* error message displayed already */
480 if (sc->flags & MASTER_PF) {
481 /* get basic stuff going */
482 rc = -t4_fw_initialize(sc, sc->mbox);
484 device_printf(dev, "early init failed: %d.\n", rc);
489 rc = get_params__post_init(sc);
491 goto done; /* error message displayed already */
493 if (sc->flags & MASTER_PF) {
495 /* final tweaks to some settings */
497 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
499 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
500 t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
501 F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
502 t4_set_reg_field(sc, A_TP_PARA_REG5,
503 V_INDICATESIZE(M_INDICATESIZE) |
504 F_REARMDDPOFFSET | F_RESETDDPOFFSET,
505 V_INDICATESIZE(M_INDICATESIZE) |
506 F_REARMDDPOFFSET | F_RESETDDPOFFSET);
509 * XXX: Verify that we can live with whatever the master driver
510 * has done so far, and hope that it doesn't change any global
511 * setting from underneath us in the future.
515 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
518 for (i = 0; i < NCHAN; i++)
519 sc->params.tp.tx_modq[i] = i;
521 rc = t4_create_dma_tag(sc);
523 goto done; /* error message displayed already */
526 * First pass over all the ports - allocate VIs and initialize some
527 * basic parameters like mac address, port type, etc. We also figure
528 * out whether a port is 10G or 1G and use that information when
529 * calculating how many interrupts to attempt to allocate.
532 for_each_port(sc, i) {
533 struct port_info *pi;
535 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
538 /* These must be set before t4_port_init */
542 /* Allocate the vi and initialize parameters like mac addr */
543 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
545 device_printf(dev, "unable to initialize port %d: %d\n",
552 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
553 device_get_nameunit(dev), i);
554 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
556 if (is_10G_port(pi)) {
558 pi->tmr_idx = t4_tmr_idx_10g;
559 pi->pktc_idx = t4_pktc_idx_10g;
562 pi->tmr_idx = t4_tmr_idx_1g;
563 pi->pktc_idx = t4_pktc_idx_1g;
566 pi->xact_addr_filt = -1;
568 pi->qsize_rxq = t4_qsize_rxq;
569 pi->qsize_txq = t4_qsize_txq;
571 pi->dev = device_add_child(dev, "cxgbe", -1);
572 if (pi->dev == NULL) {
574 "failed to add device for port %d.\n", i);
578 device_set_softc(pi->dev, pi);
582 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
584 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
586 goto done; /* error message displayed already */
588 sc->intr_type = iaq.intr_type;
589 sc->intr_count = iaq.nirq;
590 sc->flags |= iaq.intr_flags;
593 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
594 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
595 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
596 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
597 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
599 #ifndef TCP_OFFLOAD_DISABLE
600 if (is_offload(sc)) {
602 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
603 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
604 s->neq += s->nofldtxq + s->nofldrxq;
605 s->niq += s->nofldrxq;
607 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
608 M_CXGBE, M_ZERO | M_WAITOK);
609 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
610 M_CXGBE, M_ZERO | M_WAITOK);
614 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
616 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
618 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
620 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
622 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
625 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
628 t4_init_l2t(sc, M_WAITOK);
631 * Second pass over the ports. This time we know the number of rx and
632 * tx queues that each port should get.
635 #ifndef TCP_OFFLOAD_DISABLE
636 ofld_rqidx = ofld_tqidx = 0;
638 for_each_port(sc, i) {
639 struct port_info *pi = sc->port[i];
644 pi->first_rxq = rqidx;
645 pi->first_txq = tqidx;
646 if (is_10G_port(pi)) {
647 pi->nrxq = iaq.nrxq10g;
648 pi->ntxq = iaq.ntxq10g;
650 pi->nrxq = iaq.nrxq1g;
651 pi->ntxq = iaq.ntxq1g;
657 #ifndef TCP_OFFLOAD_DISABLE
658 if (is_offload(sc)) {
659 pi->first_ofld_rxq = ofld_rqidx;
660 pi->first_ofld_txq = ofld_tqidx;
661 if (is_10G_port(pi)) {
662 pi->nofldrxq = iaq.nofldrxq10g;
663 pi->nofldtxq = iaq.nofldtxq10g;
665 pi->nofldrxq = iaq.nofldrxq1g;
666 pi->nofldtxq = iaq.nofldtxq1g;
668 ofld_rqidx += pi->nofldrxq;
669 ofld_tqidx += pi->nofldtxq;
674 rc = bus_generic_attach(dev);
677 "failed to attach all child ports: %d\n", rc);
682 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
683 sc->params.pci.width, sc->params.nports, sc->intr_count,
684 sc->intr_type == INTR_MSIX ? "MSI-X" :
685 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
686 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
691 if (rc != 0 && sc->cdev) {
692 /* cdev was created and so cxgbetool works; recover that way. */
694 "error during attach, adapter is now in recovery mode.\n");
710 t4_detach(device_t dev)
713 struct port_info *pi;
716 sc = device_get_softc(dev);
718 if (sc->flags & FULL_INIT_DONE)
722 destroy_dev(sc->cdev);
726 rc = bus_generic_detach(dev);
729 "failed to detach child devices: %d\n", rc);
733 for (i = 0; i < MAX_NPORTS; i++) {
736 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
738 device_delete_child(dev, pi->dev);
740 mtx_destroy(&pi->pi_lock);
745 if (sc->flags & FULL_INIT_DONE)
746 adapter_full_uninit(sc);
748 if (sc->flags & FW_OK)
749 t4_fw_bye(sc, sc->mbox);
751 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
752 pci_release_msi(dev);
755 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
759 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
763 t4_free_l2t(sc->l2t);
765 #ifndef TCP_OFFLOAD_DISABLE
766 free(sc->sge.ofld_rxq, M_CXGBE);
767 free(sc->sge.ofld_txq, M_CXGBE);
769 free(sc->irq, M_CXGBE);
770 free(sc->sge.rxq, M_CXGBE);
771 free(sc->sge.txq, M_CXGBE);
772 free(sc->sge.ctrlq, M_CXGBE);
773 free(sc->sge.iqmap, M_CXGBE);
774 free(sc->sge.eqmap, M_CXGBE);
775 free(sc->tids.ftid_tab, M_CXGBE);
776 t4_destroy_dma_tag(sc);
777 if (mtx_initialized(&sc->sc_lock)) {
778 mtx_lock(&t4_list_lock);
779 SLIST_REMOVE(&t4_list, sc, adapter, link);
780 mtx_unlock(&t4_list_lock);
781 mtx_destroy(&sc->sc_lock);
784 if (mtx_initialized(&sc->sfl_lock))
785 mtx_destroy(&sc->sfl_lock);
787 bzero(sc, sizeof(*sc));
794 cxgbe_probe(device_t dev)
797 struct port_info *pi = device_get_softc(dev);
799 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
800 device_set_desc_copy(dev, buf);
802 return (BUS_PROBE_DEFAULT);
805 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
806 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
808 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
811 cxgbe_attach(device_t dev)
813 struct port_info *pi = device_get_softc(dev);
816 /* Allocate an ifnet and set it up */
817 ifp = if_alloc(IFT_ETHER);
819 device_printf(dev, "Cannot allocate ifnet\n");
825 callout_init(&pi->tick, CALLOUT_MPSAFE);
827 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
828 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
830 ifp->if_init = cxgbe_init;
831 ifp->if_ioctl = cxgbe_ioctl;
832 ifp->if_start = cxgbe_start;
833 ifp->if_transmit = cxgbe_transmit;
834 ifp->if_qflush = cxgbe_qflush;
836 ifp->if_snd.ifq_drv_maxlen = 1024;
837 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
838 IFQ_SET_READY(&ifp->if_snd);
840 ifp->if_capabilities = T4_CAP;
841 #ifndef TCP_OFFLOAD_DISABLE
842 if (is_offload(pi->adapter))
843 ifp->if_capabilities |= IFCAP_TOE4;
845 ifp->if_capenable = T4_CAP_ENABLE;
846 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
848 /* Initialize ifmedia for this port */
849 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
853 ether_ifattach(ifp, pi->hw_addr);
855 #ifndef TCP_OFFLOAD_DISABLE
856 if (is_offload(pi->adapter)) {
858 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
859 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
862 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
870 cxgbe_detach(device_t dev)
872 struct port_info *pi = device_get_softc(dev);
873 struct adapter *sc = pi->adapter;
874 struct ifnet *ifp = pi->ifp;
876 /* Tell if_ioctl and if_init that the port is going away */
881 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
886 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
887 callout_stop(&pi->tick);
889 callout_drain(&pi->tick);
891 /* Let detach proceed even if these fail. */
892 cxgbe_uninit_synchronized(pi);
893 port_full_uninit(pi);
895 ifmedia_removeall(&pi->media);
896 ether_ifdetach(pi->ifp);
901 wakeup_one(&sc->flags);
908 cxgbe_init(void *arg)
910 struct port_info *pi = arg;
911 struct adapter *sc = pi->adapter;
914 cxgbe_init_locked(pi); /* releases adapter lock */
915 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
919 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
921 int rc = 0, mtu, flags;
922 struct port_info *pi = ifp->if_softc;
923 struct adapter *sc = pi->adapter;
924 struct ifreq *ifr = (struct ifreq *)data;
930 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
938 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
942 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
943 t4_update_fl_bufsize(ifp);
945 rc = update_mac_settings(pi, XGMAC_MTU);
958 if (ifp->if_flags & IFF_UP) {
959 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
960 flags = pi->if_flags;
961 if ((ifp->if_flags ^ flags) &
962 (IFF_PROMISC | IFF_ALLMULTI)) {
968 rc = update_mac_settings(pi,
969 XGMAC_PROMISC | XGMAC_ALLMULTI);
974 rc = cxgbe_init_locked(pi);
975 pi->if_flags = ifp->if_flags;
976 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
977 rc = cxgbe_uninit_locked(pi);
981 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
985 case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
987 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
991 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
993 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1001 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1005 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1006 if (mask & IFCAP_TXCSUM) {
1007 ifp->if_capenable ^= IFCAP_TXCSUM;
1008 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1010 if (IFCAP_TSO & ifp->if_capenable &&
1011 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1012 ifp->if_capenable &= ~IFCAP_TSO;
1013 ifp->if_hwassist &= ~CSUM_TSO;
1015 "tso disabled due to -txcsum.\n");
1018 if (mask & IFCAP_RXCSUM)
1019 ifp->if_capenable ^= IFCAP_RXCSUM;
1020 if (mask & IFCAP_TSO4) {
1021 ifp->if_capenable ^= IFCAP_TSO4;
1023 if (IFCAP_TSO & ifp->if_capenable) {
1024 if (IFCAP_TXCSUM & ifp->if_capenable)
1025 ifp->if_hwassist |= CSUM_TSO;
1027 ifp->if_capenable &= ~IFCAP_TSO;
1028 ifp->if_hwassist &= ~CSUM_TSO;
1030 "enable txcsum first.\n");
1035 ifp->if_hwassist &= ~CSUM_TSO;
1037 if (mask & IFCAP_LRO) {
1040 struct sge_rxq *rxq;
1042 ifp->if_capenable ^= IFCAP_LRO;
1043 for_each_rxq(pi, i, rxq) {
1044 if (ifp->if_capenable & IFCAP_LRO)
1045 rxq->iq.flags |= IQ_LRO_ENABLED;
1047 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1051 #ifndef TCP_OFFLOAD_DISABLE
1052 if (mask & IFCAP_TOE) {
1053 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1055 rc = toe_capability(pi, enable);
1059 ifp->if_capenable ^= mask;
1062 if (mask & IFCAP_VLAN_HWTAGGING) {
1063 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1064 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1066 rc = update_mac_settings(pi, XGMAC_VLANEX);
1070 if (mask & IFCAP_VLAN_MTU) {
1071 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1073 /* Need to find out how to disable auto-mtu-inflation */
1075 if (mask & IFCAP_VLAN_HWTSO)
1076 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1077 if (mask & IFCAP_VLAN_HWCSUM)
1078 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1080 #ifdef VLAN_CAPABILITIES
1081 VLAN_CAPABILITIES(ifp);
1088 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1092 rc = ether_ioctl(ifp, cmd, data);
1099 cxgbe_start(struct ifnet *ifp)
1101 struct port_info *pi = ifp->if_softc;
1102 struct sge_txq *txq;
1105 for_each_txq(pi, i, txq) {
1106 if (TXQ_TRYLOCK(txq)) {
1107 txq_start(ifp, txq);
1114 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1116 struct port_info *pi = ifp->if_softc;
1117 struct adapter *sc = pi->adapter;
1118 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1119 struct buf_ring *br;
1124 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1129 if (m->m_flags & M_FLOWID)
1130 txq += (m->m_pkthdr.flowid % pi->ntxq);
1133 if (TXQ_TRYLOCK(txq) == 0) {
1134 struct sge_eq *eq = &txq->eq;
1137 * It is possible that t4_eth_tx finishes up and releases the
1138 * lock between the TRYLOCK above and the drbr_enqueue here. We
1139 * need to make sure that this mbuf doesn't just sit there in
1143 rc = drbr_enqueue(ifp, br, m);
1144 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1145 !(eq->flags & EQ_DOOMED))
1146 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1151 * txq->m is the mbuf that is held up due to a temporary shortage of
1152 * resources and it should be put on the wire first. Then what's in
1153 * drbr and finally the mbuf that was just passed in to us.
1155 * Return code should indicate the fate of the mbuf that was passed in
1159 TXQ_LOCK_ASSERT_OWNED(txq);
1160 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1162 /* Queued for transmission. */
1164 rc = drbr_enqueue(ifp, br, m);
1165 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1166 (void) t4_eth_tx(ifp, txq, m);
1171 /* Direct transmission. */
1172 rc = t4_eth_tx(ifp, txq, m);
1173 if (rc != 0 && txq->m)
1174 rc = 0; /* held, will be transmitted soon (hopefully) */
1181 cxgbe_qflush(struct ifnet *ifp)
1183 struct port_info *pi = ifp->if_softc;
1184 struct sge_txq *txq;
1188 /* queues do not exist if !PORT_INIT_DONE. */
1189 if (pi->flags & PORT_INIT_DONE) {
1190 for_each_txq(pi, i, txq) {
1194 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1203 cxgbe_media_change(struct ifnet *ifp)
1205 struct port_info *pi = ifp->if_softc;
1207 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1209 return (EOPNOTSUPP);
1213 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1215 struct port_info *pi = ifp->if_softc;
1216 struct ifmedia_entry *cur = pi->media.ifm_cur;
1217 int speed = pi->link_cfg.speed;
1218 int data = (pi->port_type << 8) | pi->mod_type;
1220 if (cur->ifm_data != data) {
1221 build_medialist(pi);
1222 cur = pi->media.ifm_cur;
1225 ifmr->ifm_status = IFM_AVALID;
1226 if (!pi->link_cfg.link_ok)
1229 ifmr->ifm_status |= IFM_ACTIVE;
1231 /* active and current will differ iff current media is autoselect. */
1232 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1235 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1236 if (speed == SPEED_10000)
1237 ifmr->ifm_active |= IFM_10G_T;
1238 else if (speed == SPEED_1000)
1239 ifmr->ifm_active |= IFM_1000_T;
1240 else if (speed == SPEED_100)
1241 ifmr->ifm_active |= IFM_100_TX;
1242 else if (speed == SPEED_10)
1243 ifmr->ifm_active |= IFM_10_T;
1245 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1250 t4_fatal_err(struct adapter *sc)
1252 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1253 t4_intr_disable(sc);
1254 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1255 device_get_nameunit(sc->dev));
1259 map_bars(struct adapter *sc)
1261 sc->regs_rid = PCIR_BAR(0);
1262 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1263 &sc->regs_rid, RF_ACTIVE);
1264 if (sc->regs_res == NULL) {
1265 device_printf(sc->dev, "cannot map registers.\n");
1268 sc->bt = rman_get_bustag(sc->regs_res);
1269 sc->bh = rman_get_bushandle(sc->regs_res);
1270 sc->mmio_len = rman_get_size(sc->regs_res);
1272 sc->msix_rid = PCIR_BAR(4);
1273 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1274 &sc->msix_rid, RF_ACTIVE);
1275 if (sc->msix_res == NULL) {
1276 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1284 setup_memwin(struct adapter *sc)
1288 bar0 = rman_get_start(sc->regs_res);
1290 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1291 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1292 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1294 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1295 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1296 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1298 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1299 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1300 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1304 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1305 struct intrs_and_queues *iaq)
1307 int rc, itype, navail, nrxq10g, nrxq1g, n;
1308 int nofldrxq10g = 0, nofldrxq1g = 0;
1310 bzero(iaq, sizeof(*iaq));
1312 iaq->ntxq10g = t4_ntxq10g;
1313 iaq->ntxq1g = t4_ntxq1g;
1314 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1315 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1316 #ifndef TCP_OFFLOAD_DISABLE
1317 iaq->nofldtxq10g = t4_nofldtxq10g;
1318 iaq->nofldtxq1g = t4_nofldtxq1g;
1319 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1320 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1323 for (itype = INTR_MSIX; itype; itype >>= 1) {
1325 if ((itype & t4_intr_types) == 0)
1326 continue; /* not allowed */
1328 if (itype == INTR_MSIX)
1329 navail = pci_msix_count(sc->dev);
1330 else if (itype == INTR_MSI)
1331 navail = pci_msi_count(sc->dev);
1338 iaq->intr_type = itype;
1339 iaq->intr_flags = 0;
1342 * Best option: an interrupt vector for errors, one for the
1343 * firmware event queue, and one each for each rxq (NIC as well
1346 iaq->nirq = T4_EXTRA_INTR;
1347 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1348 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1349 if (iaq->nirq <= navail &&
1350 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1351 iaq->intr_flags |= INTR_DIRECT;
1356 * Second best option: an interrupt vector for errors, one for
1357 * the firmware event queue, and one each for either NIC or
1360 iaq->nirq = T4_EXTRA_INTR;
1361 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1362 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1363 if (iaq->nirq <= navail &&
1364 (itype != INTR_MSI || powerof2(iaq->nirq)))
1368 * Next best option: an interrupt vector for errors, one for the
1369 * firmware event queue, and at least one per port. At this
1370 * point we know we'll have to downsize nrxq or nofldrxq to fit
1371 * what's available to us.
1373 iaq->nirq = T4_EXTRA_INTR;
1374 iaq->nirq += n10g + n1g;
1375 if (iaq->nirq <= navail) {
1376 int leftover = navail - iaq->nirq;
1379 int target = max(nrxq10g, nofldrxq10g);
1382 while (n < target && leftover >= n10g) {
1387 iaq->nrxq10g = min(n, nrxq10g);
1388 #ifndef TCP_OFFLOAD_DISABLE
1389 iaq->nofldrxq10g = min(n, nofldrxq10g);
1394 int target = max(nrxq1g, nofldrxq1g);
1397 while (n < target && leftover >= n1g) {
1402 iaq->nrxq1g = min(n, nrxq1g);
1403 #ifndef TCP_OFFLOAD_DISABLE
1404 iaq->nofldrxq1g = min(n, nofldrxq1g);
1408 if (itype != INTR_MSI || powerof2(iaq->nirq))
1413 * Least desirable option: one interrupt vector for everything.
1415 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1416 #ifndef TCP_OFFLOAD_DISABLE
1417 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1423 if (itype == INTR_MSIX)
1424 rc = pci_alloc_msix(sc->dev, &navail);
1425 else if (itype == INTR_MSI)
1426 rc = pci_alloc_msi(sc->dev, &navail);
1429 if (navail == iaq->nirq)
1433 * Didn't get the number requested. Use whatever number
1434 * the kernel is willing to allocate (it's in navail).
1436 device_printf(sc->dev, "fewer vectors than requested, "
1437 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1438 itype, iaq->nirq, navail);
1439 pci_release_msi(sc->dev);
1443 device_printf(sc->dev,
1444 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1445 itype, rc, iaq->nirq, navail);
1448 device_printf(sc->dev,
1449 "failed to find a usable interrupt type. "
1450 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1451 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1457 * Install a compatible firmware (if required), establish contact with it (by
1458 * saying hello), and reset the device. If we end up as the master driver,
1459 * partition adapter resources by providing a configuration file to the
1463 prep_firmware(struct adapter *sc)
1465 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
1467 enum dev_state state;
1469 default_cfg = firmware_get(T4_CFGNAME);
1471 /* Check firmware version and install a different one if necessary */
1472 rc = t4_check_fw_version(sc);
1476 fw = firmware_get(T4_FWNAME);
1478 const struct fw_hdr *hdr = (const void *)fw->data;
1480 v = ntohl(hdr->fw_ver);
1483 * The firmware module will not be used if it isn't the
1484 * same major version as what the driver was compiled
1487 if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1488 device_printf(sc->dev,
1489 "Found firmware image but version %d "
1490 "can not be used with this driver (%d)\n",
1491 G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1493 firmware_put(fw, FIRMWARE_UNLOAD);
1498 if (fw == NULL && rc < 0) {
1499 device_printf(sc->dev, "No usable firmware. "
1500 "card has %d.%d.%d, driver compiled with %d.%d.%d",
1501 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1502 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1503 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1504 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1511 * Always upgrade, even for minor/micro/build mismatches.
1512 * Downgrade only for a major version mismatch or if
1513 * force_firmware_install was specified.
1515 if (fw != NULL && (rc < 0 || v > sc->params.fw_vers)) {
1516 device_printf(sc->dev,
1517 "installing firmware %d.%d.%d.%d on card.\n",
1518 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1519 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1521 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1523 device_printf(sc->dev,
1524 "failed to install firmware: %d\n", rc);
1528 (void) t4_check_fw_version(sc);
1533 /* Contact firmware. */
1534 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1537 device_printf(sc->dev,
1538 "failed to connect to the firmware: %d.\n", rc);
1542 sc->flags |= MASTER_PF;
1545 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1547 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1548 if (rc != ETIMEDOUT && rc != EIO)
1549 t4_fw_bye(sc, sc->mbox);
1553 /* Partition adapter resources as specified in the config file. */
1554 if (sc->flags & MASTER_PF) {
1555 if (strncmp(t4_cfg_file, "default", sizeof(t4_cfg_file))) {
1558 snprintf(s, sizeof(s), "t4fw_cfg_%s", t4_cfg_file);
1559 cfg = firmware_get(s);
1561 device_printf(sc->dev,
1562 "unable to locate %s module, "
1563 "will use default config file.\n", s);
1567 rc = partition_resources(sc, cfg ? cfg : default_cfg);
1569 goto done; /* error message displayed already */
1572 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1573 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1574 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1575 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1576 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1581 firmware_put(fw, FIRMWARE_UNLOAD);
1583 firmware_put(cfg, FIRMWARE_UNLOAD);
1584 if (default_cfg != NULL)
1585 firmware_put(default_cfg, FIRMWARE_UNLOAD);
1590 #define FW_PARAM_DEV(param) \
1591 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1592 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1593 #define FW_PARAM_PFVF(param) \
1594 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1595 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1598 * Upload configuration file to card's memory.
1601 upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
1605 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1608 /* Figure out where the firmware wants us to upload it. */
1609 param = FW_PARAM_DEV(CF);
1610 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1612 /* Firmwares without config file support will fail this way */
1613 device_printf(sc->dev,
1614 "failed to query config file location: %d.\n", rc);
1617 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1618 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1621 device_printf(sc->dev,
1622 "cannot upload config file (type %u, addr %x).\n",
1627 /* Translate mtype/maddr to an address suitable for the PCIe window */
1628 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1629 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1631 case FW_MEMTYPE_CF_EDC0:
1632 if (!(val & F_EDRAM0_ENABLE))
1634 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1635 maddr += G_EDRAM0_BASE(bar) << 20;
1638 case FW_MEMTYPE_CF_EDC1:
1639 if (!(val & F_EDRAM1_ENABLE))
1641 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1642 maddr += G_EDRAM1_BASE(bar) << 20;
1645 case FW_MEMTYPE_CF_EXTMEM:
1646 if (!(val & F_EXT_MEM_ENABLE))
1648 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1649 maddr += G_EXT_MEM_BASE(bar) << 20;
1654 device_printf(sc->dev,
1655 "cannot upload config file (type %u, enabled %u).\n",
1661 * Position the PCIe window (we use memwin2) to the 16B aligned area
1662 * just at/before the upload location.
1665 off = maddr - win; /* offset from the start of the window. */
1666 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1667 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1669 remaining = fw->datasize;
1670 if (remaining > FLASH_CFG_MAX_SIZE ||
1671 remaining > MEMWIN2_APERTURE - off) {
1672 device_printf(sc->dev, "cannot upload config file all at once "
1673 "(size %u, max %u, room %u).\n",
1674 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1679 * XXX: sheer laziness. We deliberately added 4 bytes of useless
1680 * stuffing/comments at the end of the config file so it's ok to simply
1681 * throw away the last remaining bytes when the config file is not an
1682 * exact multiple of 4.
1685 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1686 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1692 * Partition chip resources for use between various PFs, VFs, etc. This is done
1693 * by uploading the firmware configuration file to the adapter and instructing
1694 * the firmware to process it.
1697 partition_resources(struct adapter *sc, const struct firmware *cfg)
1700 struct fw_caps_config_cmd caps;
1701 uint32_t mtype, maddr, finicsum, cfcsum;
1703 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
1705 mtype = FW_MEMTYPE_CF_FLASH;
1706 maddr = t4_flash_cfg_addr(sc);
1709 bzero(&caps, sizeof(caps));
1710 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1711 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1712 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1713 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1714 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1715 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1717 device_printf(sc->dev,
1718 "failed to pre-process config file: %d.\n", rc);
1722 finicsum = be32toh(caps.finicsum);
1723 cfcsum = be32toh(caps.cfcsum);
1724 if (finicsum != cfcsum) {
1725 device_printf(sc->dev,
1726 "WARNING: config file checksum mismatch: %08x %08x\n",
1729 sc->cfcsum = cfcsum;
1731 #define LIMIT_CAPS(x) do { \
1732 caps.x &= htobe16(t4_##x##_allowed); \
1733 sc->x = htobe16(caps.x); \
1737 * Let the firmware know what features will (not) be used so it can tune
1738 * things accordingly.
1740 LIMIT_CAPS(linkcaps);
1741 LIMIT_CAPS(niccaps);
1742 LIMIT_CAPS(toecaps);
1743 LIMIT_CAPS(rdmacaps);
1744 LIMIT_CAPS(iscsicaps);
1745 LIMIT_CAPS(fcoecaps);
1748 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1749 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1750 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1751 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
1753 device_printf(sc->dev,
1754 "failed to process config file: %d.\n", rc);
1762 * Retrieve parameters that are needed (or nice to have) prior to calling
1763 * t4_sge_init and t4_fw_initialize.
1766 get_params__pre_init(struct adapter *sc)
1769 uint32_t param[2], val[2];
1770 struct fw_devlog_cmd cmd;
1771 struct devlog_params *dlog = &sc->params.devlog;
1773 param[0] = FW_PARAM_DEV(PORTVEC);
1774 param[1] = FW_PARAM_DEV(CCLK);
1775 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1777 device_printf(sc->dev,
1778 "failed to query parameters (pre_init): %d.\n", rc);
1782 sc->params.portvec = val[0];
1783 sc->params.nports = 0;
1785 sc->params.nports++;
1786 val[0] &= val[0] - 1;
1789 sc->params.vpd.cclk = val[1];
1791 /* Read device log parameters. */
1792 bzero(&cmd, sizeof(cmd));
1793 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1794 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1795 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
1796 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
1798 device_printf(sc->dev,
1799 "failed to get devlog parameters: %d.\n", rc);
1800 bzero(dlog, sizeof (*dlog));
1801 rc = 0; /* devlog isn't critical for device operation */
1803 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
1804 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1805 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1806 dlog->size = be32toh(cmd.memsize_devlog);
1813 * Retrieve various parameters that are of interest to the driver. The device
1814 * has been initialized by the firmware at this point.
1817 get_params__post_init(struct adapter *sc)
1820 uint32_t param[7], val[7];
1821 struct fw_caps_config_cmd caps;
1823 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1824 param[1] = FW_PARAM_PFVF(EQ_START);
1825 param[2] = FW_PARAM_PFVF(FILTER_START);
1826 param[3] = FW_PARAM_PFVF(FILTER_END);
1827 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
1829 device_printf(sc->dev,
1830 "failed to query parameters (post_init): %d.\n", rc);
1834 sc->sge.iq_start = val[0];
1835 sc->sge.eq_start = val[1];
1836 sc->tids.ftid_base = val[2];
1837 sc->tids.nftids = val[3] - val[2] + 1;
1839 /* get capabilites */
1840 bzero(&caps, sizeof(caps));
1841 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1842 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1843 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1844 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1846 device_printf(sc->dev,
1847 "failed to get card capabilities: %d.\n", rc);
1852 /* query offload-related parameters */
1853 param[0] = FW_PARAM_DEV(NTID);
1854 param[1] = FW_PARAM_PFVF(SERVER_START);
1855 param[2] = FW_PARAM_PFVF(SERVER_END);
1856 param[3] = FW_PARAM_PFVF(TDDP_START);
1857 param[4] = FW_PARAM_PFVF(TDDP_END);
1858 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1859 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1861 device_printf(sc->dev,
1862 "failed to query TOE parameters: %d.\n", rc);
1865 sc->tids.ntids = val[0];
1866 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1867 sc->tids.stid_base = val[1];
1868 sc->tids.nstids = val[2] - val[1] + 1;
1869 sc->vres.ddp.start = val[3];
1870 sc->vres.ddp.size = val[4] - val[3] + 1;
1871 sc->params.ofldq_wr_cred = val[5];
1872 sc->params.offload = 1;
1874 if (caps.rdmacaps) {
1875 param[0] = FW_PARAM_PFVF(STAG_START);
1876 param[1] = FW_PARAM_PFVF(STAG_END);
1877 param[2] = FW_PARAM_PFVF(RQ_START);
1878 param[3] = FW_PARAM_PFVF(RQ_END);
1879 param[4] = FW_PARAM_PFVF(PBL_START);
1880 param[5] = FW_PARAM_PFVF(PBL_END);
1881 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1883 device_printf(sc->dev,
1884 "failed to query RDMA parameters(1): %d.\n", rc);
1887 sc->vres.stag.start = val[0];
1888 sc->vres.stag.size = val[1] - val[0] + 1;
1889 sc->vres.rq.start = val[2];
1890 sc->vres.rq.size = val[3] - val[2] + 1;
1891 sc->vres.pbl.start = val[4];
1892 sc->vres.pbl.size = val[5] - val[4] + 1;
1894 param[0] = FW_PARAM_PFVF(SQRQ_START);
1895 param[1] = FW_PARAM_PFVF(SQRQ_END);
1896 param[2] = FW_PARAM_PFVF(CQ_START);
1897 param[3] = FW_PARAM_PFVF(CQ_END);
1898 param[4] = FW_PARAM_PFVF(OCQ_START);
1899 param[5] = FW_PARAM_PFVF(OCQ_END);
1900 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
1902 device_printf(sc->dev,
1903 "failed to query RDMA parameters(2): %d.\n", rc);
1906 sc->vres.qp.start = val[0];
1907 sc->vres.qp.size = val[1] - val[0] + 1;
1908 sc->vres.cq.start = val[2];
1909 sc->vres.cq.size = val[3] - val[2] + 1;
1910 sc->vres.ocq.start = val[4];
1911 sc->vres.ocq.size = val[5] - val[4] + 1;
1913 if (caps.iscsicaps) {
1914 param[0] = FW_PARAM_PFVF(ISCSI_START);
1915 param[1] = FW_PARAM_PFVF(ISCSI_END);
1916 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1918 device_printf(sc->dev,
1919 "failed to query iSCSI parameters: %d.\n", rc);
1922 sc->vres.iscsi.start = val[0];
1923 sc->vres.iscsi.size = val[1] - val[0] + 1;
1926 /* These are finalized by FW initialization, load their values now */
1927 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1928 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1929 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1930 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1935 #undef FW_PARAM_PFVF
1939 t4_set_desc(struct adapter *sc)
1942 struct adapter_params *p = &sc->params;
1944 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
1945 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
1947 device_set_desc_copy(sc->dev, buf);
1951 build_medialist(struct port_info *pi)
1953 struct ifmedia *media = &pi->media;
1958 ifmedia_removeall(media);
1960 m = IFM_ETHER | IFM_FDX;
1961 data = (pi->port_type << 8) | pi->mod_type;
1963 switch(pi->port_type) {
1964 case FW_PORT_TYPE_BT_XFI:
1965 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1968 case FW_PORT_TYPE_BT_XAUI:
1969 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1972 case FW_PORT_TYPE_BT_SGMII:
1973 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1974 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1975 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1976 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1979 case FW_PORT_TYPE_CX4:
1980 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1981 ifmedia_set(media, m | IFM_10G_CX4);
1984 case FW_PORT_TYPE_SFP:
1985 case FW_PORT_TYPE_FIBER_XFI:
1986 case FW_PORT_TYPE_FIBER_XAUI:
1987 switch (pi->mod_type) {
1989 case FW_PORT_MOD_TYPE_LR:
1990 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1991 ifmedia_set(media, m | IFM_10G_LR);
1994 case FW_PORT_MOD_TYPE_SR:
1995 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1996 ifmedia_set(media, m | IFM_10G_SR);
1999 case FW_PORT_MOD_TYPE_LRM:
2000 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2001 ifmedia_set(media, m | IFM_10G_LRM);
2004 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2005 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2006 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2007 ifmedia_set(media, m | IFM_10G_TWINAX);
2010 case FW_PORT_MOD_TYPE_NONE:
2012 ifmedia_add(media, m | IFM_NONE, data, NULL);
2013 ifmedia_set(media, m | IFM_NONE);
2016 case FW_PORT_MOD_TYPE_NA:
2017 case FW_PORT_MOD_TYPE_ER:
2019 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2020 ifmedia_set(media, m | IFM_UNKNOWN);
2025 case FW_PORT_TYPE_KX4:
2026 case FW_PORT_TYPE_KX:
2027 case FW_PORT_TYPE_KR:
2029 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2030 ifmedia_set(media, m | IFM_UNKNOWN);
2038 * Program the port's XGMAC based on parameters in ifnet. The caller also
2039 * indicates which parameters should be programmed (the rest are left alone).
2042 update_mac_settings(struct port_info *pi, int flags)
2045 struct ifnet *ifp = pi->ifp;
2046 struct adapter *sc = pi->adapter;
2047 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2049 PORT_LOCK_ASSERT_OWNED(pi);
2050 KASSERT(flags, ("%s: not told what to update.", __func__));
2052 if (flags & XGMAC_MTU)
2055 if (flags & XGMAC_PROMISC)
2056 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2058 if (flags & XGMAC_ALLMULTI)
2059 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2061 if (flags & XGMAC_VLANEX)
2062 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2064 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2067 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2071 if (flags & XGMAC_UCADDR) {
2072 uint8_t ucaddr[ETHER_ADDR_LEN];
2074 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2075 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2076 ucaddr, true, true);
2079 if_printf(ifp, "change_mac failed: %d\n", rc);
2082 pi->xact_addr_filt = rc;
2087 if (flags & XGMAC_MCADDRS) {
2088 const uint8_t *mcaddr;
2091 struct ifmultiaddr *ifma;
2093 if_maddr_rlock(ifp);
2094 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2095 if (ifma->ifma_addr->sa_family != AF_LINK)
2097 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2099 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
2100 &mcaddr, NULL, &hash, 0);
2103 if_printf(ifp, "failed to add mc address"
2104 " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
2105 mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
2106 mcaddr[4], mcaddr[5], rc);
2112 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2114 if_printf(ifp, "failed to set mc address hash: %d", rc);
2116 if_maddr_runlock(ifp);
2123 cxgbe_init_locked(struct port_info *pi)
2125 struct adapter *sc = pi->adapter;
2128 ADAPTER_LOCK_ASSERT_OWNED(sc);
2130 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
2131 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
2136 if (IS_DOOMED(pi)) {
2140 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2142 /* Give up the adapter lock, port init code can sleep. */
2146 rc = cxgbe_init_synchronized(pi);
2150 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2152 wakeup_one(&sc->flags);
2158 cxgbe_init_synchronized(struct port_info *pi)
2160 struct adapter *sc = pi->adapter;
2161 struct ifnet *ifp = pi->ifp;
2164 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2166 if (isset(&sc->open_device_map, pi->port_id)) {
2167 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2168 ("mismatch between open_device_map and if_drv_flags"));
2169 return (0); /* already running */
2172 if (!(sc->flags & FULL_INIT_DONE) &&
2173 ((rc = adapter_full_init(sc)) != 0))
2174 return (rc); /* error message displayed already */
2176 if (!(pi->flags & PORT_INIT_DONE) &&
2177 ((rc = port_full_init(pi)) != 0))
2178 return (rc); /* error message displayed already */
2181 rc = update_mac_settings(pi, XGMAC_ALL);
2184 goto done; /* error message displayed already */
2186 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2188 if_printf(ifp, "start_link failed: %d\n", rc);
2192 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2194 if_printf(ifp, "enable_vi failed: %d\n", rc);
2199 setbit(&sc->open_device_map, pi->port_id);
2200 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2202 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2205 cxgbe_uninit_synchronized(pi);
2211 cxgbe_uninit_locked(struct port_info *pi)
2213 struct adapter *sc = pi->adapter;
2216 ADAPTER_LOCK_ASSERT_OWNED(sc);
2218 while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
2219 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
2224 if (IS_DOOMED(pi)) {
2228 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2232 rc = cxgbe_uninit_synchronized(pi);
2235 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2237 wakeup_one(&sc->flags);
2247 cxgbe_uninit_synchronized(struct port_info *pi)
2249 struct adapter *sc = pi->adapter;
2250 struct ifnet *ifp = pi->ifp;
2253 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2256 * Disable the VI so that all its data in either direction is discarded
2257 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2258 * tick) intact as the TP can deliver negative advice or data that it's
2259 * holding in its RAM (for an offloaded connection) even after the VI is
2262 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2264 if_printf(ifp, "disable_vi failed: %d\n", rc);
2268 clrbit(&sc->open_device_map, pi->port_id);
2269 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2271 pi->link_cfg.link_ok = 0;
2272 pi->link_cfg.speed = 0;
2273 t4_os_link_changed(sc, pi->port_id, 0);
2278 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
2279 rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
2285 adapter_full_init(struct adapter *sc)
2287 int rc, i, rid, p, q;
2290 struct port_info *pi;
2291 struct sge_rxq *rxq;
2292 #ifndef TCP_OFFLOAD_DISABLE
2293 struct sge_ofld_rxq *ofld_rxq;
2296 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2297 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
2298 ("%s: FULL_INIT_DONE already", __func__));
2301 * queues that belong to the adapter (not any particular port).
2303 rc = t4_setup_adapter_queues(sc);
2307 for (i = 0; i < ARRAY_SIZE(sc->tq); i++) {
2308 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
2309 taskqueue_thread_enqueue, &sc->tq[i]);
2310 if (sc->tq[i] == NULL) {
2311 device_printf(sc->dev,
2312 "failed to allocate task queue %d\n", i);
2316 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
2317 device_get_nameunit(sc->dev), i);
2324 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2325 if (sc->intr_count == 1) {
2326 KASSERT(!(sc->flags & INTR_DIRECT),
2327 ("%s: single interrupt && INTR_DIRECT?", __func__));
2329 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
2331 /* Multiple interrupts. */
2332 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2333 ("%s: too few intr.", __func__));
2335 /* The first one is always error intr */
2336 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2340 /* The second one is always the firmware event queue */
2341 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
2346 * Note that if INTR_DIRECT is not set then either the NIC rx
2347 * queues or (exclusive or) the TOE rx queueus will be taking
2348 * direct interrupts.
2350 * There is no need to check for is_offload(sc) as nofldrxq
2351 * will be 0 if offload is disabled.
2353 for_each_port(sc, p) {
2356 #ifndef TCP_OFFLOAD_DISABLE
2358 * Skip over the NIC queues if they aren't taking direct
2361 if (!(sc->flags & INTR_DIRECT) &&
2362 pi->nofldrxq > pi->nrxq)
2365 rxq = &sc->sge.rxq[pi->first_rxq];
2366 for (q = 0; q < pi->nrxq; q++, rxq++) {
2367 snprintf(s, sizeof(s), "%d.%d", p, q);
2368 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, rxq, s);
2373 #ifndef TCP_OFFLOAD_DISABLE
2375 * Skip over the offload queues if they aren't taking
2376 * direct interrupts.
2378 if (!(sc->flags & INTR_DIRECT))
2381 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2382 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2383 snprintf(s, sizeof(s), "%d,%d", p, q);
2384 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, ofld_rxq, s);
2393 sc->flags |= FULL_INIT_DONE;
2396 adapter_full_uninit(sc);
2403 adapter_full_uninit(struct adapter *sc)
2407 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2409 t4_teardown_adapter_queues(sc);
2411 for (i = 0; i < sc->intr_count; i++)
2412 t4_free_irq(sc, &sc->irq[i]);
2414 for (i = 0; i < ARRAY_SIZE(sc->tq) && sc->tq[i]; i++) {
2415 taskqueue_free(sc->tq[i]);
2419 sc->flags &= ~FULL_INIT_DONE;
2425 port_full_init(struct port_info *pi)
2427 struct adapter *sc = pi->adapter;
2428 struct ifnet *ifp = pi->ifp;
2430 struct sge_rxq *rxq;
2433 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2434 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
2435 ("%s: PORT_INIT_DONE already", __func__));
2437 sysctl_ctx_init(&pi->ctx);
2438 pi->flags |= PORT_SYSCTL_CTX;
2441 * Allocate tx/rx/fl queues for this port.
2443 rc = t4_setup_port_queues(pi);
2445 goto done; /* error message displayed already */
2448 * Setup RSS for this port.
2450 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
2452 for_each_rxq(pi, i, rxq) {
2453 rss[i] = rxq->iq.abs_id;
2455 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2456 pi->rss_size, rss, pi->nrxq);
2459 if_printf(ifp, "rss_config failed: %d\n", rc);
2463 pi->flags |= PORT_INIT_DONE;
2466 port_full_uninit(pi);
2475 port_full_uninit(struct port_info *pi)
2477 struct adapter *sc = pi->adapter;
2479 struct sge_rxq *rxq;
2480 struct sge_txq *txq;
2481 #ifndef TCP_OFFLOAD_DISABLE
2482 struct sge_ofld_rxq *ofld_rxq;
2483 struct sge_wrq *ofld_txq;
2486 if (pi->flags & PORT_INIT_DONE) {
2488 /* Need to quiesce queues. XXX: ctrl queues? */
2490 for_each_txq(pi, i, txq) {
2491 quiesce_eq(sc, &txq->eq);
2494 #ifndef TCP_OFFLOAD_DISABLE
2495 for_each_ofld_txq(pi, i, ofld_txq) {
2496 quiesce_eq(sc, &ofld_txq->eq);
2500 for_each_rxq(pi, i, rxq) {
2501 quiesce_iq(sc, &rxq->iq);
2502 quiesce_fl(sc, &rxq->fl);
2505 #ifndef TCP_OFFLOAD_DISABLE
2506 for_each_ofld_rxq(pi, i, ofld_rxq) {
2507 quiesce_iq(sc, &ofld_rxq->iq);
2508 quiesce_fl(sc, &ofld_rxq->fl);
2513 t4_teardown_port_queues(pi);
2514 pi->flags &= ~PORT_INIT_DONE;
2520 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
2523 eq->flags |= EQ_DOOMED;
2526 * Wait for the response to a credit flush if one's
2529 while (eq->flags & EQ_CRFLUSHED)
2530 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
2533 callout_drain(&eq->tx_callout); /* XXX: iffy */
2534 pause("callout", 10); /* Still iffy */
2536 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
2540 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
2542 (void) sc; /* unused */
2544 /* Synchronize with the interrupt handler */
2545 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
2550 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
2552 mtx_lock(&sc->sfl_lock);
2554 fl->flags |= FL_DOOMED;
2556 mtx_unlock(&sc->sfl_lock);
2558 callout_drain(&sc->sfl_callout);
2559 KASSERT((fl->flags & FL_STARVING) == 0,
2560 ("%s: still starving", __func__));
2564 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2565 driver_intr_t *handler, void *arg, char *name)
2570 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2571 RF_SHAREABLE | RF_ACTIVE);
2572 if (irq->res == NULL) {
2573 device_printf(sc->dev,
2574 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2578 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2579 NULL, handler, arg, &irq->tag);
2581 device_printf(sc->dev,
2582 "failed to setup interrupt for rid %d, name %s: %d\n",
2585 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2591 t4_free_irq(struct adapter *sc, struct irq *irq)
2594 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2596 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2598 bzero(irq, sizeof(*irq));
2604 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2607 uint32_t *p = (uint32_t *)(buf + start);
2609 for ( ; start <= end; start += sizeof(uint32_t))
2610 *p++ = t4_read_reg(sc, start);
2614 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2617 static const unsigned int reg_ranges[] = {
2835 regs->version = 4 | (sc->params.rev << 10);
2836 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2837 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2841 cxgbe_tick(void *arg)
2843 struct port_info *pi = arg;
2844 struct ifnet *ifp = pi->ifp;
2845 struct sge_txq *txq;
2847 struct port_stats *s = &pi->stats;
2850 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2852 return; /* without scheduling another callout */
2855 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2857 ifp->if_opackets = s->tx_frames - s->tx_pause;
2858 ifp->if_ipackets = s->rx_frames - s->rx_pause;
2859 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
2860 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
2861 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
2862 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
2863 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2867 for_each_txq(pi, i, txq)
2868 drops += txq->br->br_drops;
2869 ifp->if_snd.ifq_drops = drops;
2871 ifp->if_oerrors = s->tx_error_frames;
2872 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2873 s->rx_fcs_err + s->rx_len_err;
2875 callout_schedule(&pi->tick, hz);
2880 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2883 panic("%s: opcode %02x on iq %p with payload %p",
2884 __func__, rss->opcode, iq, m);
2886 log(LOG_ERR, "%s: opcode %02x on iq %p with payload %p",
2887 __func__, rss->opcode, iq, m);
2894 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2896 uintptr_t *loc, new;
2898 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2901 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
2902 loc = (uintptr_t *) &sc->cpl_handler[opcode];
2903 atomic_store_rel_ptr(loc, new);
2909 t4_sysctls(struct adapter *sc)
2911 struct sysctl_ctx_list *ctx;
2912 struct sysctl_oid *oid;
2913 struct sysctl_oid_list *children, *c0;
2914 static char *caps[] = {
2915 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
2916 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
2917 "\20\1TOE", /* caps[2] toecaps */
2918 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
2919 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
2920 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
2921 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
2922 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
2925 ctx = device_get_sysctl_ctx(sc->dev);
2930 oid = device_get_sysctl_tree(sc->dev);
2931 c0 = children = SYSCTL_CHILDREN(oid);
2933 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2934 &sc->params.nports, 0, "# of ports");
2936 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2937 &sc->params.rev, 0, "chip hardware revision");
2939 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2940 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2942 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
2943 CTLFLAG_RD, &t4_cfg_file, 0, "configuration file");
2945 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
2946 &sc->cfcsum, 0, "config file checksum");
2948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
2949 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
2950 sysctl_bitfield, "A", "available link capabilities");
2952 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
2953 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
2954 sysctl_bitfield, "A", "available NIC capabilities");
2956 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
2957 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
2958 sysctl_bitfield, "A", "available TCP offload capabilities");
2960 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
2961 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
2962 sysctl_bitfield, "A", "available RDMA capabilities");
2964 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
2965 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
2966 sysctl_bitfield, "A", "available iSCSI capabilities");
2968 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
2969 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
2970 sysctl_bitfield, "A", "available FCoE capabilities");
2972 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2973 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2975 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2976 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
2977 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
2978 "interrupt holdoff timer values (us)");
2980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2981 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
2982 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
2983 "interrupt holdoff packet counter values");
2987 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
2989 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
2990 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
2991 "logs and miscellaneous information");
2992 children = SYSCTL_CHILDREN(oid);
2994 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
2995 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2996 sysctl_cctrl, "A", "congestion control");
2998 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
2999 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3000 sysctl_cpl_stats, "A", "CPL statistics");
3002 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
3003 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3004 sysctl_ddp_stats, "A", "DDP statistics");
3006 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
3007 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3008 sysctl_devlog, "A", "firmware's device log");
3010 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
3011 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3012 sysctl_fcoe_stats, "A", "FCoE statistics");
3014 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
3015 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3016 sysctl_hw_sched, "A", "hardware scheduler ");
3018 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
3019 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3020 sysctl_l2t, "A", "hardware L2 table");
3022 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
3023 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3024 sysctl_lb_stats, "A", "loopback statistics");
3026 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
3027 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3028 sysctl_meminfo, "A", "memory regions");
3030 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
3031 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3032 sysctl_path_mtus, "A", "path MTUs");
3034 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
3035 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3036 sysctl_pm_stats, "A", "PM statistics");
3038 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
3039 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3040 sysctl_rdma_stats, "A", "RDMA statistics");
3042 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
3043 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3044 sysctl_tcp_stats, "A", "TCP statistics");
3046 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
3047 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3048 sysctl_tids, "A", "TID information");
3050 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
3051 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3052 sysctl_tp_err_stats, "A", "TP error statistics");
3054 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
3055 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3056 sysctl_tx_rate, "A", "Tx rate");
3059 #ifndef TCP_OFFLOAD_DISABLE
3060 if (is_offload(sc)) {
3064 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
3065 NULL, "TOE parameters");
3066 children = SYSCTL_CHILDREN(oid);
3068 sc->tt.sndbuf = 256 * 1024;
3069 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
3070 &sc->tt.sndbuf, 0, "max hardware send buffer size");
3073 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
3074 &sc->tt.ddp, 0, "DDP allowed");
3075 sc->tt.indsz = M_INDICATESIZE;
3076 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
3077 &sc->tt.indsz, 0, "DDP max indicate size allowed");
3078 sc->tt.ddp_thres = 3*4096;
3079 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
3080 &sc->tt.ddp_thres, 0, "DDP threshold");
3089 cxgbe_sysctls(struct port_info *pi)
3091 struct sysctl_ctx_list *ctx;
3092 struct sysctl_oid *oid;
3093 struct sysctl_oid_list *children;
3095 ctx = device_get_sysctl_ctx(pi->dev);
3100 oid = device_get_sysctl_tree(pi->dev);
3101 children = SYSCTL_CHILDREN(oid);
3103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
3104 &pi->nrxq, 0, "# of rx queues");
3105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
3106 &pi->ntxq, 0, "# of tx queues");
3107 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
3108 &pi->first_rxq, 0, "index of first rx queue");
3109 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
3110 &pi->first_txq, 0, "index of first tx queue");
3112 #ifndef TCP_OFFLOAD_DISABLE
3113 if (is_offload(pi->adapter)) {
3114 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
3116 "# of rx queues for offloaded TCP connections");
3117 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
3119 "# of tx queues for offloaded TCP connections");
3120 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
3121 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
3122 "index of first TOE rx queue");
3123 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
3124 CTLFLAG_RD, &pi->first_ofld_txq, 0,
3125 "index of first TOE tx queue");
3129 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
3130 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
3131 "holdoff timer index");
3132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
3133 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
3134 "holdoff packet counter index");
3136 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
3137 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
3139 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
3140 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
3144 * dev.cxgbe.X.stats.
3146 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
3147 NULL, "port statistics");
3148 children = SYSCTL_CHILDREN(oid);
3150 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
3151 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
3152 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
3153 sysctl_handle_t4_reg64, "QU", desc)
3155 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
3156 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
3157 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
3158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
3159 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
3160 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
3161 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
3162 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
3163 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
3164 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
3165 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
3166 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
3167 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
3168 "# of tx frames in this range",
3169 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
3170 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
3171 "# of tx frames in this range",
3172 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
3173 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
3174 "# of tx frames in this range",
3175 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
3176 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
3177 "# of tx frames in this range",
3178 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
3179 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
3180 "# of tx frames in this range",
3181 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
3182 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
3183 "# of tx frames in this range",
3184 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
3185 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
3186 "# of tx frames in this range",
3187 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
3188 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
3189 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
3190 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
3191 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
3192 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
3193 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
3194 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
3195 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
3196 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
3197 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
3198 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
3199 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
3200 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
3201 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
3202 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
3203 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
3204 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
3205 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
3206 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
3207 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
3209 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
3210 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
3211 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
3212 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
3213 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
3214 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
3215 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
3216 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
3217 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
3218 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
3219 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
3220 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
3221 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
3222 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
3223 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
3224 "# of frames received with bad FCS",
3225 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
3226 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
3227 "# of frames received with length error",
3228 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
3229 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
3230 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
3231 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
3232 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
3233 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
3234 "# of rx frames in this range",
3235 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
3236 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
3237 "# of rx frames in this range",
3238 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
3239 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
3240 "# of rx frames in this range",
3241 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
3242 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
3243 "# of rx frames in this range",
3244 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
3245 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
3246 "# of rx frames in this range",
3247 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
3248 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
3249 "# of rx frames in this range",
3250 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
3251 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
3252 "# of rx frames in this range",
3253 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
3254 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
3255 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
3256 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
3257 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
3258 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
3259 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
3260 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
3261 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
3262 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
3263 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
3264 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
3265 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
3266 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
3267 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
3268 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
3269 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
3270 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
3271 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
3273 #undef SYSCTL_ADD_T4_REG64
3275 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
3276 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
3277 &pi->stats.name, desc)
3279 /* We get these from port_stats and they may be stale by upto 1s */
3280 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
3281 "# drops due to buffer-group 0 overflows");
3282 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
3283 "# drops due to buffer-group 1 overflows");
3284 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
3285 "# drops due to buffer-group 2 overflows");
3286 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
3287 "# drops due to buffer-group 3 overflows");
3288 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
3289 "# of buffer-group 0 truncated packets");
3290 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
3291 "# of buffer-group 1 truncated packets");
3292 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
3293 "# of buffer-group 2 truncated packets");
3294 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
3295 "# of buffer-group 3 truncated packets");
3297 #undef SYSCTL_ADD_T4_PORTSTAT
3303 sysctl_int_array(SYSCTL_HANDLER_ARGS)
3308 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
3309 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
3310 sbuf_printf(&sb, "%d ", *i);
3313 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3319 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
3324 rc = sysctl_wire_old_buffer(req, 0);
3328 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3332 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
3333 rc = sbuf_finish(sb);
3340 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
3342 struct port_info *pi = arg1;
3343 struct adapter *sc = pi->adapter;
3348 rc = sysctl_handle_int(oidp, &idx, 0, req);
3349 if (rc != 0 || req->newptr == NULL)
3352 if (idx < 0 || idx >= SGE_NTIMERS)
3356 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3358 struct sge_rxq *rxq;
3361 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
3362 for_each_rxq(pi, i, rxq) {
3363 #ifdef atomic_store_rel_8
3364 atomic_store_rel_8(&rxq->iq.intr_params, v);
3366 rxq->iq.intr_params = v;
3377 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
3379 struct port_info *pi = arg1;
3380 struct adapter *sc = pi->adapter;
3385 rc = sysctl_handle_int(oidp, &idx, 0, req);
3386 if (rc != 0 || req->newptr == NULL)
3389 if (idx < -1 || idx >= SGE_NCOUNTERS)
3393 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3394 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3395 rc = EBUSY; /* cannot be changed once the queues are created */
3405 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
3407 struct port_info *pi = arg1;
3408 struct adapter *sc = pi->adapter;
3411 qsize = pi->qsize_rxq;
3413 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3414 if (rc != 0 || req->newptr == NULL)
3417 if (qsize < 128 || (qsize & 7))
3421 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3422 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3423 rc = EBUSY; /* cannot be changed once the queues are created */
3426 pi->qsize_rxq = qsize;
3433 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
3435 struct port_info *pi = arg1;
3436 struct adapter *sc = pi->adapter;
3439 qsize = pi->qsize_txq;
3441 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3442 if (rc != 0 || req->newptr == NULL)
3449 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
3450 if (rc == 0 && pi->flags & PORT_INIT_DONE)
3451 rc = EBUSY; /* cannot be changed once the queues are created */
3454 pi->qsize_txq = qsize;
3461 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
3463 struct adapter *sc = arg1;
3467 val = t4_read_reg64(sc, reg);
3469 return (sysctl_handle_64(oidp, &val, 0, req));
3474 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
3476 struct adapter *sc = arg1;
3479 uint16_t incr[NMTUS][NCCTRL_WIN];
3480 static const char *dec_fac[] = {
3481 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
3485 rc = sysctl_wire_old_buffer(req, 0);
3489 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3493 t4_read_cong_tbl(sc, incr);
3495 for (i = 0; i < NCCTRL_WIN; ++i) {
3496 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
3497 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
3498 incr[5][i], incr[6][i], incr[7][i]);
3499 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
3500 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
3501 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
3502 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
3505 rc = sbuf_finish(sb);
3512 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
3514 struct adapter *sc = arg1;
3517 struct tp_cpl_stats stats;
3519 rc = sysctl_wire_old_buffer(req, 0);
3523 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3527 t4_tp_get_cpl_stats(sc, &stats);
3529 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
3531 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
3532 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
3533 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
3534 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
3536 rc = sbuf_finish(sb);
3543 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
3545 struct adapter *sc = arg1;
3548 struct tp_usm_stats stats;
3550 rc = sysctl_wire_old_buffer(req, 0);
3554 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3558 t4_get_usm_stats(sc, &stats);
3560 sbuf_printf(sb, "Frames: %u\n", stats.frames);
3561 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
3562 sbuf_printf(sb, "Drops: %u", stats.drops);
3564 rc = sbuf_finish(sb);
3570 const char *devlog_level_strings[] = {
3571 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
3572 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
3573 [FW_DEVLOG_LEVEL_ERR] = "ERR",
3574 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
3575 [FW_DEVLOG_LEVEL_INFO] = "INFO",
3576 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
3579 const char *devlog_facility_strings[] = {
3580 [FW_DEVLOG_FACILITY_CORE] = "CORE",
3581 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
3582 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
3583 [FW_DEVLOG_FACILITY_RES] = "RES",
3584 [FW_DEVLOG_FACILITY_HW] = "HW",
3585 [FW_DEVLOG_FACILITY_FLR] = "FLR",
3586 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
3587 [FW_DEVLOG_FACILITY_PHY] = "PHY",
3588 [FW_DEVLOG_FACILITY_MAC] = "MAC",
3589 [FW_DEVLOG_FACILITY_PORT] = "PORT",
3590 [FW_DEVLOG_FACILITY_VI] = "VI",
3591 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
3592 [FW_DEVLOG_FACILITY_ACL] = "ACL",
3593 [FW_DEVLOG_FACILITY_TM] = "TM",
3594 [FW_DEVLOG_FACILITY_QFC] = "QFC",
3595 [FW_DEVLOG_FACILITY_DCB] = "DCB",
3596 [FW_DEVLOG_FACILITY_ETH] = "ETH",
3597 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
3598 [FW_DEVLOG_FACILITY_RI] = "RI",
3599 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
3600 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
3601 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
3602 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
3606 sysctl_devlog(SYSCTL_HANDLER_ARGS)
3608 struct adapter *sc = arg1;
3609 struct devlog_params *dparams = &sc->params.devlog;
3610 struct fw_devlog_e *buf, *e;
3611 int i, j, rc, nentries, first = 0;
3613 uint64_t ftstamp = UINT64_MAX;
3615 if (dparams->start == 0)
3618 nentries = dparams->size / sizeof(struct fw_devlog_e);
3620 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
3624 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
3629 for (i = 0; i < nentries; i++) {
3632 if (e->timestamp == 0)
3635 e->timestamp = be64toh(e->timestamp);
3636 e->seqno = be32toh(e->seqno);
3637 for (j = 0; j < 8; j++)
3638 e->params[j] = be32toh(e->params[j]);
3640 if (e->timestamp < ftstamp) {
3641 ftstamp = e->timestamp;
3646 if (buf[first].timestamp == 0)
3647 goto done; /* nothing in the log */
3649 rc = sysctl_wire_old_buffer(req, 0);
3653 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3658 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
3659 "Seq#", "Tstamp", "Level", "Facility", "Message");
3664 if (e->timestamp == 0)
3667 sbuf_printf(sb, "%10d %15ju %8s %8s ",
3668 e->seqno, e->timestamp,
3669 (e->level < ARRAY_SIZE(devlog_level_strings) ?
3670 devlog_level_strings[e->level] : "UNKNOWN"),
3671 (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
3672 devlog_facility_strings[e->facility] : "UNKNOWN"));
3673 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
3674 e->params[2], e->params[3], e->params[4],
3675 e->params[5], e->params[6], e->params[7]);
3677 if (++i == nentries)
3679 } while (i != first);
3681 rc = sbuf_finish(sb);
3689 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
3691 struct adapter *sc = arg1;
3694 struct tp_fcoe_stats stats[4];
3696 rc = sysctl_wire_old_buffer(req, 0);
3700 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3704 t4_get_fcoe_stats(sc, 0, &stats[0]);
3705 t4_get_fcoe_stats(sc, 1, &stats[1]);
3706 t4_get_fcoe_stats(sc, 2, &stats[2]);
3707 t4_get_fcoe_stats(sc, 3, &stats[3]);
3709 sbuf_printf(sb, " channel 0 channel 1 "
3710 "channel 2 channel 3\n");
3711 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
3712 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
3713 stats[3].octetsDDP);
3714 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
3715 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
3716 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
3717 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
3718 stats[3].framesDrop);
3720 rc = sbuf_finish(sb);
3727 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
3729 struct adapter *sc = arg1;
3732 unsigned int map, kbps, ipg, mode;
3733 unsigned int pace_tab[NTX_SCHED];
3735 rc = sysctl_wire_old_buffer(req, 0);
3739 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
3743 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
3744 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
3745 t4_read_pace_tbl(sc, pace_tab);
3747 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
3748 "Class IPG (0.1 ns) Flow IPG (us)");
3750 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
3751 t4_get_tx_sched(sc, i, &kbps, &ipg);
3752 sbuf_printf(sb, "\n %u %-5s %u ", i,
3753 (mode & (1 << i)) ? "flow" : "class", map & 3);
3755 sbuf_printf(sb, "%9u ", kbps);
3757 sbuf_printf(sb, " disabled ");
3760 sbuf_printf(sb, "%13u ", ipg);
3762 sbuf_printf(sb, " disabled ");
3765 sbuf_printf(sb, "%10u", pace_tab[i]);
3767 sbuf_printf(sb, " disabled");
3770 rc = sbuf_finish(sb);
3777 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
3779 struct adapter *sc = arg1;
3783 struct lb_port_stats s[2];
3784 static const char *stat_name[] = {
3785 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
3786 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
3787 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
3788 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
3789 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
3790 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
3791 "BG2FramesTrunc:", "BG3FramesTrunc:"
3794 rc = sysctl_wire_old_buffer(req, 0);
3798 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3802 memset(s, 0, sizeof(s));
3804 for (i = 0; i < 4; i += 2) {
3805 t4_get_lb_stats(sc, i, &s[0]);
3806 t4_get_lb_stats(sc, i + 1, &s[1]);
3810 sbuf_printf(sb, "%s Loopback %u"
3811 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
3813 for (j = 0; j < ARRAY_SIZE(stat_name); j++)
3814 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
3818 rc = sbuf_finish(sb);
3831 mem_desc_cmp(const void *a, const void *b)
3833 return ((const struct mem_desc *)a)->base -
3834 ((const struct mem_desc *)b)->base;
3838 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
3843 size = to - from + 1;
3847 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
3848 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
3852 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
3854 struct adapter *sc = arg1;
3858 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
3859 static const char *region[] = {
3860 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
3861 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
3862 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
3863 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
3864 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
3865 "ULPTX state:", "On-chip queues:"
3867 struct mem_desc avail[3];
3868 struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
3869 struct mem_desc *md = mem;
3871 rc = sysctl_wire_old_buffer(req, 0);
3875 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3879 for (i = 0; i < ARRAY_SIZE(mem); i++) {
3884 /* Find and sort the populated memory ranges */
3886 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3887 if (lo & F_EDRAM0_ENABLE) {
3888 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3889 avail[i].base = G_EDRAM0_BASE(hi) << 20;
3890 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
3894 if (lo & F_EDRAM1_ENABLE) {
3895 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3896 avail[i].base = G_EDRAM1_BASE(hi) << 20;
3897 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
3901 if (lo & F_EXT_MEM_ENABLE) {
3902 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3903 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
3904 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
3908 if (!i) /* no memory available */
3910 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
3912 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
3913 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
3914 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
3915 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3916 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
3917 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
3918 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
3919 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
3920 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
3922 /* the next few have explicit upper bounds */
3923 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
3924 md->limit = md->base - 1 +
3925 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
3926 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
3929 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
3930 md->limit = md->base - 1 +
3931 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
3932 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
3935 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
3936 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
3937 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
3938 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
3941 md->idx = ARRAY_SIZE(region); /* hide it */
3945 #define ulp_region(reg) \
3946 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
3947 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
3949 ulp_region(RX_ISCSI);
3950 ulp_region(RX_TDDP);
3952 ulp_region(RX_STAG);
3954 ulp_region(RX_RQUDP);
3959 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
3960 md->limit = md->base + sc->tids.ntids - 1;
3962 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
3963 md->limit = md->base + sc->tids.ntids - 1;
3966 md->base = sc->vres.ocq.start;
3967 if (sc->vres.ocq.size)
3968 md->limit = md->base + sc->vres.ocq.size - 1;
3970 md->idx = ARRAY_SIZE(region); /* hide it */
3973 /* add any address-space holes, there can be up to 3 */
3974 for (n = 0; n < i - 1; n++)
3975 if (avail[n].limit < avail[n + 1].base)
3976 (md++)->base = avail[n].limit;
3978 (md++)->base = avail[n].limit;
3981 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
3983 for (lo = 0; lo < i; lo++)
3984 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
3985 avail[lo].limit - 1);
3987 sbuf_printf(sb, "\n");
3988 for (i = 0; i < n; i++) {
3989 if (mem[i].idx >= ARRAY_SIZE(region))
3990 continue; /* skip holes */
3992 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
3993 mem_region_show(sb, region[mem[i].idx], mem[i].base,
3997 sbuf_printf(sb, "\n");
3998 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
3999 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
4000 mem_region_show(sb, "uP RAM:", lo, hi);
4002 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
4003 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
4004 mem_region_show(sb, "uP Extmem2:", lo, hi);
4006 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
4007 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
4009 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
4010 (lo & F_PMRXNUMCHN) ? 2 : 1);
4012 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
4013 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
4014 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
4016 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
4017 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
4018 sbuf_printf(sb, "%u p-structs\n",
4019 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
4021 for (i = 0; i < 4; i++) {
4022 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
4023 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
4024 i, G_USED(lo), G_ALLOC(lo));
4026 for (i = 0; i < 4; i++) {
4027 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
4029 "\nLoopback %d using %u pages out of %u allocated",
4030 i, G_USED(lo), G_ALLOC(lo));
4033 rc = sbuf_finish(sb);
4040 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
4042 struct adapter *sc = arg1;
4045 uint16_t mtus[NMTUS];
4047 rc = sysctl_wire_old_buffer(req, 0);
4051 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4055 t4_read_mtu_tbl(sc, mtus, NULL);
4057 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
4058 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
4059 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
4060 mtus[14], mtus[15]);
4062 rc = sbuf_finish(sb);
4069 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
4071 struct adapter *sc = arg1;
4074 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
4075 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
4076 static const char *pm_stats[] = {
4077 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
4080 rc = sysctl_wire_old_buffer(req, 0);
4084 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4088 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
4089 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
4091 sbuf_printf(sb, " Tx count Tx cycles "
4092 "Rx count Rx cycles");
4093 for (i = 0; i < PM_NSTATS; i++)
4094 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
4095 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
4097 rc = sbuf_finish(sb);
4104 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
4106 struct adapter *sc = arg1;
4109 struct tp_rdma_stats stats;
4111 rc = sysctl_wire_old_buffer(req, 0);
4115 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4119 t4_tp_get_rdma_stats(sc, &stats);
4120 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
4121 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
4123 rc = sbuf_finish(sb);
4130 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
4132 struct adapter *sc = arg1;
4135 struct tp_tcp_stats v4, v6;
4137 rc = sysctl_wire_old_buffer(req, 0);
4141 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4145 t4_tp_get_tcp_stats(sc, &v4, &v6);
4148 sbuf_printf(sb, "OutRsts: %20u %20u\n",
4149 v4.tcpOutRsts, v6.tcpOutRsts);
4150 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
4151 v4.tcpInSegs, v6.tcpInSegs);
4152 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
4153 v4.tcpOutSegs, v6.tcpOutSegs);
4154 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
4155 v4.tcpRetransSegs, v6.tcpRetransSegs);
4157 rc = sbuf_finish(sb);
4164 sysctl_tids(SYSCTL_HANDLER_ARGS)
4166 struct adapter *sc = arg1;
4169 struct tid_info *t = &sc->tids;
4171 rc = sysctl_wire_old_buffer(req, 0);
4175 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4180 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
4185 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4186 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
4189 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
4190 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4193 sbuf_printf(sb, "TID range: %u-%u",
4194 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4198 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
4199 sbuf_printf(sb, ", in use: %u\n",
4200 atomic_load_acq_int(&t->tids_in_use));
4204 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
4205 t->stid_base + t->nstids - 1, t->stids_in_use);
4209 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
4210 t->ftid_base + t->nftids - 1);
4213 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
4214 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
4215 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
4217 rc = sbuf_finish(sb);
4224 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
4226 struct adapter *sc = arg1;
4229 struct tp_err_stats stats;
4231 rc = sysctl_wire_old_buffer(req, 0);
4235 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4239 t4_tp_get_err_stats(sc, &stats);
4241 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4243 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
4244 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
4245 stats.macInErrs[3]);
4246 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
4247 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
4248 stats.hdrInErrs[3]);
4249 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
4250 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
4251 stats.tcpInErrs[3]);
4252 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
4253 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
4254 stats.tcp6InErrs[3]);
4255 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
4256 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
4257 stats.tnlCongDrops[3]);
4258 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
4259 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
4260 stats.tnlTxDrops[3]);
4261 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
4262 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
4263 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
4264 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
4265 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
4266 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
4267 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
4268 stats.ofldNoNeigh, stats.ofldCongDefer);
4270 rc = sbuf_finish(sb);
4277 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
4279 struct adapter *sc = arg1;
4282 u64 nrate[NCHAN], orate[NCHAN];
4284 rc = sysctl_wire_old_buffer(req, 0);
4288 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4292 t4_get_chan_txrate(sc, nrate, orate);
4293 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4295 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
4296 nrate[0], nrate[1], nrate[2], nrate[3]);
4297 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
4298 orate[0], orate[1], orate[2], orate[3]);
4300 rc = sbuf_finish(sb);
4308 txq_start(struct ifnet *ifp, struct sge_txq *txq)
4310 struct buf_ring *br;
4313 TXQ_LOCK_ASSERT_OWNED(txq);
4316 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
4318 t4_eth_tx(ifp, txq, m);
4322 t4_tx_callout(void *arg)
4324 struct sge_eq *eq = arg;
4327 if (EQ_TRYLOCK(eq) == 0)
4330 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
4333 if (__predict_true(!(eq->flags && EQ_DOOMED)))
4334 callout_schedule(&eq->tx_callout, 1);
4338 EQ_LOCK_ASSERT_OWNED(eq);
4340 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
4342 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4343 struct sge_txq *txq = arg;
4344 struct port_info *pi = txq->ifp->if_softc;
4348 struct sge_wrq *wrq = arg;
4353 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4360 t4_tx_task(void *arg, int count)
4362 struct sge_eq *eq = arg;
4365 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4366 struct sge_txq *txq = arg;
4367 txq_start(txq->ifp, txq);
4369 struct sge_wrq *wrq = arg;
4370 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
4376 fconf_to_mode(uint32_t fconf)
4380 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
4381 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
4383 if (fconf & F_FRAGMENTATION)
4384 mode |= T4_FILTER_IP_FRAGMENT;
4386 if (fconf & F_MPSHITTYPE)
4387 mode |= T4_FILTER_MPS_HIT_TYPE;
4389 if (fconf & F_MACMATCH)
4390 mode |= T4_FILTER_MAC_IDX;
4392 if (fconf & F_ETHERTYPE)
4393 mode |= T4_FILTER_ETH_TYPE;
4395 if (fconf & F_PROTOCOL)
4396 mode |= T4_FILTER_IP_PROTO;
4399 mode |= T4_FILTER_IP_TOS;
4402 mode |= T4_FILTER_VLAN;
4404 if (fconf & F_VNIC_ID)
4405 mode |= T4_FILTER_VNIC;
4408 mode |= T4_FILTER_PORT;
4411 mode |= T4_FILTER_FCoE;
4417 mode_to_fconf(uint32_t mode)
4421 if (mode & T4_FILTER_IP_FRAGMENT)
4422 fconf |= F_FRAGMENTATION;
4424 if (mode & T4_FILTER_MPS_HIT_TYPE)
4425 fconf |= F_MPSHITTYPE;
4427 if (mode & T4_FILTER_MAC_IDX)
4428 fconf |= F_MACMATCH;
4430 if (mode & T4_FILTER_ETH_TYPE)
4431 fconf |= F_ETHERTYPE;
4433 if (mode & T4_FILTER_IP_PROTO)
4434 fconf |= F_PROTOCOL;
4436 if (mode & T4_FILTER_IP_TOS)
4439 if (mode & T4_FILTER_VLAN)
4442 if (mode & T4_FILTER_VNIC)
4445 if (mode & T4_FILTER_PORT)
4448 if (mode & T4_FILTER_FCoE)
4455 fspec_to_fconf(struct t4_filter_specification *fs)
4459 if (fs->val.frag || fs->mask.frag)
4460 fconf |= F_FRAGMENTATION;
4462 if (fs->val.matchtype || fs->mask.matchtype)
4463 fconf |= F_MPSHITTYPE;
4465 if (fs->val.macidx || fs->mask.macidx)
4466 fconf |= F_MACMATCH;
4468 if (fs->val.ethtype || fs->mask.ethtype)
4469 fconf |= F_ETHERTYPE;
4471 if (fs->val.proto || fs->mask.proto)
4472 fconf |= F_PROTOCOL;
4474 if (fs->val.tos || fs->mask.tos)
4477 if (fs->val.vlan_vld || fs->mask.vlan_vld)
4480 if (fs->val.vnic_vld || fs->mask.vnic_vld)
4483 if (fs->val.iport || fs->mask.iport)
4486 if (fs->val.fcoe || fs->mask.fcoe)
4493 get_filter_mode(struct adapter *sc, uint32_t *mode)
4497 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
4500 if (sc->filter_mode != fconf) {
4501 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
4502 device_get_nameunit(sc->dev), sc->filter_mode, fconf);
4503 sc->filter_mode = fconf;
4506 *mode = fconf_to_mode(sc->filter_mode);
4512 set_filter_mode(struct adapter *sc, uint32_t mode)
4517 fconf = mode_to_fconf(mode);
4525 if (sc->tids.ftids_in_use > 0) {
4530 #ifndef TCP_OFFLOAD_DISABLE
4531 if (sc->offload_map) {
4538 rc = -t4_set_filter_mode(sc, fconf);
4540 sc->filter_mode = fconf;
4550 static inline uint64_t
4551 get_filter_hits(struct adapter *sc, uint32_t fid)
4553 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
4556 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
4557 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
4558 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
4559 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
4561 return (be64toh(hits));
4565 get_filter(struct adapter *sc, struct t4_filter *t)
4567 int i, nfilters = sc->tids.nftids;
4568 struct filter_entry *f;
4570 ADAPTER_LOCK_ASSERT_OWNED(sc);
4575 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
4576 t->idx >= nfilters) {
4577 t->idx = 0xffffffff;
4581 f = &sc->tids.ftid_tab[t->idx];
4582 for (i = t->idx; i < nfilters; i++, f++) {
4585 t->l2tidx = f->l2t ? f->l2t->idx : 0;
4586 t->smtidx = f->smtidx;
4588 t->hits = get_filter_hits(sc, t->idx);
4590 t->hits = UINT64_MAX;
4597 t->idx = 0xffffffff;
4602 set_filter(struct adapter *sc, struct t4_filter *t)
4604 unsigned int nfilters, nports;
4605 struct filter_entry *f;
4608 ADAPTER_LOCK_ASSERT_OWNED(sc);
4610 nfilters = sc->tids.nftids;
4611 nports = sc->params.nports;
4616 if (!(sc->flags & FULL_INIT_DONE))
4619 if (t->idx >= nfilters)
4622 /* Validate against the global filter mode */
4623 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode)
4626 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
4629 if (t->fs.val.iport >= nports)
4632 /* Can't specify an iq if not steering to it */
4633 if (!t->fs.dirsteer && t->fs.iq)
4636 /* IPv6 filter idx must be 4 aligned */
4637 if (t->fs.type == 1 &&
4638 ((t->idx & 0x3) || t->idx + 4 >= nfilters))
4641 if (sc->tids.ftid_tab == NULL) {
4642 KASSERT(sc->tids.ftids_in_use == 0,
4643 ("%s: no memory allocated but filters_in_use > 0",
4646 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
4647 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
4648 if (sc->tids.ftid_tab == NULL)
4652 for (i = 0; i < 4; i++) {
4653 f = &sc->tids.ftid_tab[t->idx + i];
4655 if (f->pending || f->valid)
4660 if (t->fs.type == 0)
4664 f = &sc->tids.ftid_tab[t->idx];
4667 return set_filter_wr(sc, t->idx);
4671 del_filter(struct adapter *sc, struct t4_filter *t)
4673 unsigned int nfilters;
4674 struct filter_entry *f;
4676 ADAPTER_LOCK_ASSERT_OWNED(sc);
4681 nfilters = sc->tids.nftids;
4686 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
4690 if (!(sc->flags & FULL_INIT_DONE))
4693 f = &sc->tids.ftid_tab[t->idx];
4701 t->fs = f->fs; /* extra info for the caller */
4702 return del_filter_wr(sc, t->idx);
4709 clear_filter(struct filter_entry *f)
4712 t4_l2t_release(f->l2t);
4714 bzero(f, sizeof (*f));
4718 set_filter_wr(struct adapter *sc, int fidx)
4720 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4722 struct fw_filter_wr *fwr;
4725 ADAPTER_LOCK_ASSERT_OWNED(sc);
4727 if (f->fs.newdmac || f->fs.newvlan) {
4728 /* This filter needs an L2T entry; allocate one. */
4729 f->l2t = t4_l2t_alloc_switching(sc->l2t);
4732 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
4734 t4_l2t_release(f->l2t);
4740 ftid = sc->tids.ftid_base + fidx;
4742 m = m_gethdr(M_NOWAIT, MT_DATA);
4746 fwr = mtod(m, struct fw_filter_wr *);
4747 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4748 bzero(fwr, sizeof (*fwr));
4750 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
4751 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
4753 htobe32(V_FW_FILTER_WR_TID(ftid) |
4754 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
4755 V_FW_FILTER_WR_NOREPLY(0) |
4756 V_FW_FILTER_WR_IQ(f->fs.iq));
4757 fwr->del_filter_to_l2tix =
4758 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
4759 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
4760 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
4761 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
4762 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
4763 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
4764 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
4765 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
4766 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
4767 f->fs.newvlan == VLAN_REWRITE) |
4768 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
4769 f->fs.newvlan == VLAN_REWRITE) |
4770 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
4771 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
4772 V_FW_FILTER_WR_PRIO(f->fs.prio) |
4773 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
4774 fwr->ethtype = htobe16(f->fs.val.ethtype);
4775 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
4776 fwr->frag_to_ovlan_vldm =
4777 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
4778 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
4779 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
4780 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
4781 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
4782 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
4784 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
4785 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
4786 fwr->maci_to_matchtypem =
4787 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
4788 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
4789 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
4790 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
4791 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
4792 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
4793 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
4794 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
4795 fwr->ptcl = f->fs.val.proto;
4796 fwr->ptclm = f->fs.mask.proto;
4797 fwr->ttyp = f->fs.val.tos;
4798 fwr->ttypm = f->fs.mask.tos;
4799 fwr->ivlan = htobe16(f->fs.val.vlan);
4800 fwr->ivlanm = htobe16(f->fs.mask.vlan);
4801 fwr->ovlan = htobe16(f->fs.val.vnic);
4802 fwr->ovlanm = htobe16(f->fs.mask.vnic);
4803 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
4804 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
4805 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
4806 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
4807 fwr->lp = htobe16(f->fs.val.dport);
4808 fwr->lpm = htobe16(f->fs.mask.dport);
4809 fwr->fp = htobe16(f->fs.val.sport);
4810 fwr->fpm = htobe16(f->fs.mask.sport);
4812 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
4815 sc->tids.ftids_in_use++;
4822 del_filter_wr(struct adapter *sc, int fidx)
4824 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
4826 struct fw_filter_wr *fwr;
4829 ADAPTER_LOCK_ASSERT_OWNED(sc);
4831 ftid = sc->tids.ftid_base + fidx;
4833 m = m_gethdr(M_NOWAIT, MT_DATA);
4837 fwr = mtod(m, struct fw_filter_wr *);
4838 m->m_len = m->m_pkthdr.len = sizeof(*fwr);
4839 bzero(fwr, sizeof (*fwr));
4841 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
4849 filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4851 struct adapter *sc = iq->adapter;
4852 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
4853 unsigned int idx = GET_TID(rpl);
4855 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
4858 if (idx >= sc->tids.ftid_base &&
4859 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
4860 unsigned int rc = G_COOKIE(rpl->cookie);
4861 struct filter_entry *f = &sc->tids.ftid_tab[idx];
4863 if (rc == FW_FILTER_WR_FLT_ADDED) {
4864 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
4865 f->pending = 0; /* asynchronous setup completed */
4870 if (rc != FW_FILTER_WR_FLT_DELETED) {
4871 /* Add or delete failed, need to display an error */
4872 device_printf(sc->dev,
4873 "filter %u setup failed with error %u\n", idx, rc);
4878 sc->tids.ftids_in_use--;
4886 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
4890 if (cntxt->cid > M_CTXTQID)
4893 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
4894 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
4897 if (sc->flags & FW_OK) {
4898 ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */
4899 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
4905 /* Read via firmware failed or wasn't even attempted */
4907 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
4915 read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
4917 uint32_t base, size, lo, hi, win, off, remaining, i, n;
4921 /* reads are in multiples of 32 bits */
4922 if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
4926 * We don't want to deal with potential holes so we mandate that the
4927 * requested region must lie entirely within one of the 3 memories.
4929 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4930 if (lo & F_EDRAM0_ENABLE) {
4931 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4932 base = G_EDRAM0_BASE(hi) << 20;
4933 size = G_EDRAM0_SIZE(hi) << 20;
4935 mr->addr >= base && mr->addr < base + size &&
4936 mr->addr + mr->len <= base + size)
4939 if (lo & F_EDRAM1_ENABLE) {
4940 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4941 base = G_EDRAM1_BASE(hi) << 20;
4942 size = G_EDRAM1_SIZE(hi) << 20;
4944 mr->addr >= base && mr->addr < base + size &&
4945 mr->addr + mr->len <= base + size)
4948 if (lo & F_EXT_MEM_ENABLE) {
4949 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4950 base = G_EXT_MEM_BASE(hi) << 20;
4951 size = G_EXT_MEM_SIZE(hi) << 20;
4953 mr->addr >= base && mr->addr < base + size &&
4954 mr->addr + mr->len <= base + size)
4960 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
4963 * Position the PCIe window (we use memwin2) to the 16B aligned area
4964 * just at/before the requested region.
4966 win = mr->addr & ~0xf;
4967 off = mr->addr - win; /* offset of the requested region in the win */
4968 remaining = mr->len;
4972 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
4974 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
4976 /* number of bytes that we'll copy in the inner loop */
4977 n = min(remaining, MEMWIN2_APERTURE - off);
4979 for (i = 0; i < n; i += 4, remaining -= 4)
4980 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
4982 win += MEMWIN2_APERTURE;
4986 rc = copyout(buf, mr->data, mr->len);
4993 t4_os_find_pci_capability(struct adapter *sc, int cap)
4997 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
5001 t4_os_pci_save_state(struct adapter *sc)
5004 struct pci_devinfo *dinfo;
5007 dinfo = device_get_ivars(dev);
5009 pci_cfg_save(dev, dinfo, 0);
5014 t4_os_pci_restore_state(struct adapter *sc)
5017 struct pci_devinfo *dinfo;
5020 dinfo = device_get_ivars(dev);
5022 pci_cfg_restore(dev, dinfo);
5027 t4_os_portmod_changed(const struct adapter *sc, int idx)
5029 struct port_info *pi = sc->port[idx];
5030 static const char *mod_str[] = {
5031 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
5034 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
5035 if_printf(pi->ifp, "transceiver unplugged.\n");
5036 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
5037 if_printf(pi->ifp, "unknown transceiver inserted.\n");
5038 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
5039 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
5040 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
5041 if_printf(pi->ifp, "%s transceiver inserted.\n",
5042 mod_str[pi->mod_type]);
5044 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
5050 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
5052 struct port_info *pi = sc->port[idx];
5053 struct ifnet *ifp = pi->ifp;
5056 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
5057 if_link_state_change(ifp, LINK_STATE_UP);
5059 if_link_state_change(ifp, LINK_STATE_DOWN);
5063 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
5067 mtx_lock(&t4_list_lock);
5068 SLIST_FOREACH(sc, &t4_list, link) {
5070 * func should not make any assumptions about what state sc is
5071 * in - the only guarantee is that sc->sc_lock is a valid lock.
5075 mtx_unlock(&t4_list_lock);
5079 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
5085 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
5091 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
5095 struct adapter *sc = dev->si_drv1;
5097 rc = priv_check(td, PRIV_DRIVER);
5102 case CHELSIO_T4_GETREG: {
5103 struct t4_reg *edata = (struct t4_reg *)data;
5105 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5108 if (edata->size == 4)
5109 edata->val = t4_read_reg(sc, edata->addr);
5110 else if (edata->size == 8)
5111 edata->val = t4_read_reg64(sc, edata->addr);
5117 case CHELSIO_T4_SETREG: {
5118 struct t4_reg *edata = (struct t4_reg *)data;
5120 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5123 if (edata->size == 4) {
5124 if (edata->val & 0xffffffff00000000)
5126 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
5127 } else if (edata->size == 8)
5128 t4_write_reg64(sc, edata->addr, edata->val);
5133 case CHELSIO_T4_REGDUMP: {
5134 struct t4_regdump *regs = (struct t4_regdump *)data;
5135 int reglen = T4_REGDUMP_SIZE;
5138 if (regs->len < reglen) {
5139 regs->len = reglen; /* hint to the caller */
5144 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
5145 t4_get_regs(sc, regs, buf);
5146 rc = copyout(buf, regs->data, reglen);
5150 case CHELSIO_T4_GET_FILTER_MODE:
5151 rc = get_filter_mode(sc, (uint32_t *)data);
5153 case CHELSIO_T4_SET_FILTER_MODE:
5154 rc = set_filter_mode(sc, *(uint32_t *)data);
5156 case CHELSIO_T4_GET_FILTER:
5158 rc = get_filter(sc, (struct t4_filter *)data);
5161 case CHELSIO_T4_SET_FILTER:
5163 rc = set_filter(sc, (struct t4_filter *)data);
5166 case CHELSIO_T4_DEL_FILTER:
5168 rc = del_filter(sc, (struct t4_filter *)data);
5171 case CHELSIO_T4_GET_SGE_CONTEXT:
5172 rc = get_sge_context(sc, (struct t4_sge_context *)data);
5174 case CHELSIO_T4_LOAD_FW: {
5175 struct t4_data *fw = (struct t4_data *)data;
5178 if (sc->flags & FULL_INIT_DONE)
5181 fw_data = malloc(fw->len, M_CXGBE, M_NOWAIT);
5182 if (fw_data == NULL)
5185 rc = copyin(fw->data, fw_data, fw->len);
5187 rc = -t4_load_fw(sc, fw_data, fw->len);
5189 free(fw_data, M_CXGBE);
5192 case CHELSIO_T4_GET_MEM:
5193 rc = read_card_mem(sc, (struct t4_mem_range *)data);
5202 #ifndef TCP_OFFLOAD_DISABLE
5204 toe_capability(struct port_info *pi, int enable)
5207 struct adapter *sc = pi->adapter;
5209 ADAPTER_LOCK_ASSERT_OWNED(sc);
5211 if (!is_offload(sc))
5215 if (isset(&sc->offload_map, pi->port_id))
5218 if (sc->offload_map == 0) {
5219 rc = activate_uld(sc, ULD_TOM, &sc->tom);
5224 setbit(&sc->offload_map, pi->port_id);
5226 if (!isset(&sc->offload_map, pi->port_id))
5229 clrbit(&sc->offload_map, pi->port_id);
5231 if (sc->offload_map == 0) {
5232 rc = deactivate_uld(&sc->tom);
5234 setbit(&sc->offload_map, pi->port_id);
5244 * Add an upper layer driver to the global list.
5247 t4_register_uld(struct uld_info *ui)
5252 mtx_lock(&t4_uld_list_lock);
5253 SLIST_FOREACH(u, &t4_uld_list, link) {
5254 if (u->uld_id == ui->uld_id) {
5260 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
5263 mtx_unlock(&t4_uld_list_lock);
5268 t4_unregister_uld(struct uld_info *ui)
5273 mtx_lock(&t4_uld_list_lock);
5275 SLIST_FOREACH(u, &t4_uld_list, link) {
5277 if (ui->refcount > 0) {
5282 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
5288 mtx_unlock(&t4_uld_list_lock);
5293 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
5296 struct uld_info *ui;
5298 mtx_lock(&t4_uld_list_lock);
5300 SLIST_FOREACH(ui, &t4_uld_list, link) {
5301 if (ui->uld_id == id) {
5302 rc = ui->attach(sc, &usc->softc);
5304 KASSERT(usc->softc != NULL,
5305 ("%s: ULD %d has no state", __func__, id));
5313 mtx_unlock(&t4_uld_list_lock);
5319 deactivate_uld(struct uld_softc *usc)
5323 mtx_lock(&t4_uld_list_lock);
5325 if (usc->uld == NULL || usc->softc == NULL) {
5330 rc = usc->uld->detach(usc->softc);
5332 KASSERT(usc->uld->refcount > 0,
5333 ("%s: ULD has bad refcount", __func__));
5334 usc->uld->refcount--;
5339 mtx_unlock(&t4_uld_list_lock);
5346 * Come up with reasonable defaults for some of the tunables, provided they're
5347 * not set by the user (in which case we'll use the values as is).
5350 tweak_tunables(void)
5352 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
5355 t4_ntxq10g = min(nc, NTXQ_10G);
5358 t4_ntxq1g = min(nc, NTXQ_1G);
5361 t4_nrxq10g = min(nc, NRXQ_10G);
5364 t4_nrxq1g = min(nc, NRXQ_1G);
5366 #ifndef TCP_OFFLOAD_DISABLE
5367 if (t4_nofldtxq10g < 1)
5368 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
5370 if (t4_nofldtxq1g < 1)
5371 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
5373 if (t4_nofldrxq10g < 1)
5374 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
5376 if (t4_nofldrxq1g < 1)
5377 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
5380 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
5381 t4_tmr_idx_10g = TMR_IDX_10G;
5383 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
5384 t4_pktc_idx_10g = PKTC_IDX_10G;
5386 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
5387 t4_tmr_idx_1g = TMR_IDX_1G;
5389 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
5390 t4_pktc_idx_1g = PKTC_IDX_1G;
5392 if (t4_qsize_txq < 128)
5395 if (t4_qsize_rxq < 128)
5397 while (t4_qsize_rxq & 7)
5400 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
5404 t4_mod_event(module_t mod, int cmd, void *arg)
5411 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
5412 SLIST_INIT(&t4_list);
5413 #ifndef TCP_OFFLOAD_DISABLE
5414 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
5415 SLIST_INIT(&t4_uld_list);
5421 #ifndef TCP_OFFLOAD_DISABLE
5422 mtx_lock(&t4_uld_list_lock);
5423 if (!SLIST_EMPTY(&t4_uld_list)) {
5425 mtx_unlock(&t4_uld_list_lock);
5428 mtx_unlock(&t4_uld_list_lock);
5429 mtx_destroy(&t4_uld_list_lock);
5431 mtx_lock(&t4_list_lock);
5432 if (!SLIST_EMPTY(&t4_list)) {
5434 mtx_unlock(&t4_list_lock);
5437 mtx_unlock(&t4_list_lock);
5438 mtx_destroy(&t4_list_lock);
5445 static devclass_t t4_devclass;
5446 static devclass_t cxgbe_devclass;
5448 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
5449 MODULE_VERSION(t4nex, 1);
5451 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
5452 MODULE_VERSION(cxgbe, 1);