2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
59 #include "common/common.h"
60 #include "common/t4_msg.h"
61 #include "common/t4_regs.h"
62 #include "common/t4_regs_values.h"
66 /* T4 bus driver interface */
67 static int t4_probe(device_t);
68 static int t4_attach(device_t);
69 static int t4_detach(device_t);
70 static device_method_t t4_methods[] = {
71 DEVMETHOD(device_probe, t4_probe),
72 DEVMETHOD(device_attach, t4_attach),
73 DEVMETHOD(device_detach, t4_detach),
77 static driver_t t4_driver = {
80 sizeof(struct adapter)
84 /* T4 port (cxgbe) interface */
85 static int cxgbe_probe(device_t);
86 static int cxgbe_attach(device_t);
87 static int cxgbe_detach(device_t);
88 static device_method_t cxgbe_methods[] = {
89 DEVMETHOD(device_probe, cxgbe_probe),
90 DEVMETHOD(device_attach, cxgbe_attach),
91 DEVMETHOD(device_detach, cxgbe_detach),
94 static driver_t cxgbe_driver = {
97 sizeof(struct port_info)
100 static d_ioctl_t t4_ioctl;
101 static d_open_t t4_open;
102 static d_close_t t4_close;
104 static struct cdevsw t4_cdevsw = {
105 .d_version = D_VERSION,
113 /* ifnet + media interface */
114 static void cxgbe_init(void *);
115 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
116 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
117 static void cxgbe_qflush(struct ifnet *);
118 static int cxgbe_media_change(struct ifnet *);
119 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
121 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
124 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
125 * then ADAPTER_LOCK, then t4_uld_list_lock.
127 static struct mtx t4_list_lock;
128 static SLIST_HEAD(, adapter) t4_list;
130 static struct mtx t4_uld_list_lock;
131 static SLIST_HEAD(, uld_info) t4_uld_list;
135 * Tunables. See tweak_tunables() too.
139 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
142 static int t4_ntxq10g = -1;
143 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
146 static int t4_nrxq10g = -1;
147 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
150 static int t4_ntxq1g = -1;
151 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
154 static int t4_nrxq1g = -1;
155 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
158 #define NOFLDTXQ_10G 8
159 static int t4_nofldtxq10g = -1;
160 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
162 #define NOFLDRXQ_10G 2
163 static int t4_nofldrxq10g = -1;
164 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
166 #define NOFLDTXQ_1G 2
167 static int t4_nofldtxq1g = -1;
168 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
170 #define NOFLDRXQ_1G 1
171 static int t4_nofldrxq1g = -1;
172 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
176 * Holdoff parameters for 10G and 1G ports.
178 #define TMR_IDX_10G 1
179 static int t4_tmr_idx_10g = TMR_IDX_10G;
180 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
182 #define PKTC_IDX_10G (-1)
183 static int t4_pktc_idx_10g = PKTC_IDX_10G;
184 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
187 static int t4_tmr_idx_1g = TMR_IDX_1G;
188 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
190 #define PKTC_IDX_1G (-1)
191 static int t4_pktc_idx_1g = PKTC_IDX_1G;
192 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
195 * Size (# of entries) of each tx and rx queue.
197 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
198 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
200 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
201 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
204 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
206 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
207 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
210 * Configuration file.
212 static char t4_cfg_file[32] = "default";
213 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
216 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
217 * encouraged respectively).
219 static unsigned int t4_fw_install = 1;
220 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
223 * ASIC features that will be used. Disable the ones you don't want so that the
224 * chip resources aren't wasted on features that will not be used.
226 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
227 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
229 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
230 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
232 static int t4_toecaps_allowed = -1;
233 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
235 static int t4_rdmacaps_allowed = 0;
236 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
238 static int t4_iscsicaps_allowed = 0;
239 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
241 static int t4_fcoecaps_allowed = 0;
242 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
244 struct intrs_and_queues {
245 int intr_type; /* INTx, MSI, or MSI-X */
246 int nirq; /* Number of vectors */
248 int ntxq10g; /* # of NIC txq's for each 10G port */
249 int nrxq10g; /* # of NIC rxq's for each 10G port */
250 int ntxq1g; /* # of NIC txq's for each 1G port */
251 int nrxq1g; /* # of NIC rxq's for each 1G port */
253 int nofldtxq10g; /* # of TOE txq's for each 10G port */
254 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
255 int nofldtxq1g; /* # of TOE txq's for each 1G port */
256 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
260 struct filter_entry {
261 uint32_t valid:1; /* filter allocated and valid */
262 uint32_t locked:1; /* filter is administratively locked */
263 uint32_t pending:1; /* filter action is pending firmware reply */
264 uint32_t smtidx:8; /* Source MAC Table index for smac */
265 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
267 struct t4_filter_specification fs;
271 XGMAC_MTU = (1 << 0),
272 XGMAC_PROMISC = (1 << 1),
273 XGMAC_ALLMULTI = (1 << 2),
274 XGMAC_VLANEX = (1 << 3),
275 XGMAC_UCADDR = (1 << 4),
276 XGMAC_MCADDRS = (1 << 5),
281 static int map_bars(struct adapter *);
282 static void setup_memwin(struct adapter *);
283 static int cfg_itype_and_nqueues(struct adapter *, int, int,
284 struct intrs_and_queues *);
285 static int prep_firmware(struct adapter *);
286 static int upload_config_file(struct adapter *, const struct firmware *,
287 uint32_t *, uint32_t *);
288 static int partition_resources(struct adapter *, const struct firmware *);
289 static int get_params__pre_init(struct adapter *);
290 static int get_params__post_init(struct adapter *);
291 static int set_params__post_init(struct adapter *);
292 static void t4_set_desc(struct adapter *);
293 static void build_medialist(struct port_info *);
294 static int update_mac_settings(struct port_info *, int);
295 static int cxgbe_init_synchronized(struct port_info *);
296 static int cxgbe_uninit_synchronized(struct port_info *);
297 static int setup_intr_handlers(struct adapter *);
298 static int adapter_full_init(struct adapter *);
299 static int adapter_full_uninit(struct adapter *);
300 static int port_full_init(struct port_info *);
301 static int port_full_uninit(struct port_info *);
302 static void quiesce_eq(struct adapter *, struct sge_eq *);
303 static void quiesce_iq(struct adapter *, struct sge_iq *);
304 static void quiesce_fl(struct adapter *, struct sge_fl *);
305 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
306 driver_intr_t *, void *, char *);
307 static int t4_free_irq(struct adapter *, struct irq *);
308 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
310 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
311 static void cxgbe_tick(void *);
312 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
313 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
315 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
316 static int fw_msg_not_handled(struct adapter *, const __be64 *);
317 static int t4_sysctls(struct adapter *);
318 static int cxgbe_sysctls(struct port_info *);
319 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
320 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
321 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
322 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
323 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
324 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
325 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
327 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
328 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
329 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
330 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
331 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
332 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
333 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
334 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
335 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
336 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
337 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
338 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
339 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
340 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
341 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
342 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
343 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
344 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
346 static inline void txq_start(struct ifnet *, struct sge_txq *);
347 static uint32_t fconf_to_mode(uint32_t);
348 static uint32_t mode_to_fconf(uint32_t);
349 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
350 static int get_filter_mode(struct adapter *, uint32_t *);
351 static int set_filter_mode(struct adapter *, uint32_t);
352 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
353 static int get_filter(struct adapter *, struct t4_filter *);
354 static int set_filter(struct adapter *, struct t4_filter *);
355 static int del_filter(struct adapter *, struct t4_filter *);
356 static void clear_filter(struct filter_entry *);
357 static int set_filter_wr(struct adapter *, int);
358 static int del_filter_wr(struct adapter *, int);
359 static int get_sge_context(struct adapter *, struct t4_sge_context *);
360 static int load_fw(struct adapter *, struct t4_data *);
361 static int read_card_mem(struct adapter *, struct t4_mem_range *);
362 static int read_i2c(struct adapter *, struct t4_i2c_data *);
364 static int toe_capability(struct port_info *, int);
366 static int t4_mod_event(module_t, int, void *);
372 {0xa000, "Chelsio Terminator 4 FPGA"},
373 {0x4400, "Chelsio T440-dbg"},
374 {0x4401, "Chelsio T420-CR"},
375 {0x4402, "Chelsio T422-CR"},
376 {0x4403, "Chelsio T440-CR"},
377 {0x4404, "Chelsio T420-BCH"},
378 {0x4405, "Chelsio T440-BCH"},
379 {0x4406, "Chelsio T440-CH"},
380 {0x4407, "Chelsio T420-SO"},
381 {0x4408, "Chelsio T420-CX"},
382 {0x4409, "Chelsio T420-BT"},
383 {0x440a, "Chelsio T404-BT"},
384 {0x440e, "Chelsio T440-LP-CR"},
389 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
390 * exactly the same for both rxq and ofld_rxq.
392 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
393 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
396 /* No easy way to include t4_msg.h before adapter.h so we check this way */
397 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
398 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
401 t4_probe(device_t dev)
404 uint16_t v = pci_get_vendor(dev);
405 uint16_t d = pci_get_device(dev);
406 uint8_t f = pci_get_function(dev);
408 if (v != PCI_VENDOR_ID_CHELSIO)
411 /* Attach only to PF0 of the FPGA */
412 if (d == 0xa000 && f != 0)
415 for (i = 0; i < nitems(t4_pciids); i++) {
416 if (d == t4_pciids[i].device) {
417 device_set_desc(dev, t4_pciids[i].desc);
418 return (BUS_PROBE_DEFAULT);
426 t4_attach(device_t dev)
429 int rc = 0, i, n10g, n1g, rqidx, tqidx;
430 struct intrs_and_queues iaq;
433 int ofld_rqidx, ofld_tqidx;
436 sc = device_get_softc(dev);
439 pci_enable_busmaster(dev);
440 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
443 pci_set_max_read_req(dev, 4096);
444 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
445 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
446 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
449 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
450 device_get_nameunit(dev));
451 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
452 mtx_lock(&t4_list_lock);
453 SLIST_INSERT_HEAD(&t4_list, sc, link);
454 mtx_unlock(&t4_list_lock);
456 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
457 TAILQ_INIT(&sc->sfl);
458 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
462 goto done; /* error message displayed already */
465 * This is the real PF# to which we're attaching. Works from within PCI
466 * passthrough environments too, where pci_get_function() could return a
467 * different PF# depending on the passthrough configuration. We need to
468 * use the real PF# in all our communication with the firmware.
470 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
473 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
474 sc->an_handler = an_not_handled;
475 for (i = 0; i < nitems(sc->cpl_handler); i++)
476 sc->cpl_handler[i] = cpl_not_handled;
477 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
478 sc->fw_msg_handler[i] = fw_msg_not_handled;
479 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
481 /* Prepare the adapter for operation */
482 rc = -t4_prep_adapter(sc);
484 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
489 * Do this really early, with the memory windows set up even before the
490 * character device. The userland tool's register i/o and mem read
491 * will work even in "recovery mode".
494 sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
495 GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
496 sc->cdev->si_drv1 = sc;
498 /* Go no further if recovery mode has been requested. */
499 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
500 device_printf(dev, "recovery mode.\n");
504 /* Prepare the firmware for operation */
505 rc = prep_firmware(sc);
507 goto done; /* error message displayed already */
509 rc = get_params__pre_init(sc);
511 goto done; /* error message displayed already */
513 rc = t4_sge_init(sc);
515 goto done; /* error message displayed already */
517 if (sc->flags & MASTER_PF) {
518 /* get basic stuff going */
519 rc = -t4_fw_initialize(sc, sc->mbox);
521 device_printf(dev, "early init failed: %d.\n", rc);
526 rc = get_params__post_init(sc);
528 goto done; /* error message displayed already */
530 rc = set_params__post_init(sc);
532 goto done; /* error message displayed already */
534 if (sc->flags & MASTER_PF) {
535 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
537 /* final tweaks to some settings */
539 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
541 /* 4K, 16K, 64K, 256K DDP "page sizes" */
542 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(0) | V_HPZ1(2) |
543 V_HPZ2(4) | V_HPZ3(6));
544 t4_set_reg_field(sc, A_ULP_RX_CTL, F_TDDPTAGTCB, F_TDDPTAGTCB);
545 t4_set_reg_field(sc, A_TP_PARA_REG5,
546 V_INDICATESIZE(M_INDICATESIZE) |
547 F_REARMDDPOFFSET | F_RESETDDPOFFSET,
548 V_INDICATESIZE(indsz) |
549 F_REARMDDPOFFSET | F_RESETDDPOFFSET);
552 * XXX: Verify that we can live with whatever the master driver
553 * has done so far, and hope that it doesn't change any global
554 * setting from underneath us in the future.
558 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
561 for (i = 0; i < NCHAN; i++)
562 sc->params.tp.tx_modq[i] = i;
564 rc = t4_create_dma_tag(sc);
566 goto done; /* error message displayed already */
569 * First pass over all the ports - allocate VIs and initialize some
570 * basic parameters like mac address, port type, etc. We also figure
571 * out whether a port is 10G or 1G and use that information when
572 * calculating how many interrupts to attempt to allocate.
575 for_each_port(sc, i) {
576 struct port_info *pi;
578 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
581 /* These must be set before t4_port_init */
585 /* Allocate the vi and initialize parameters like mac addr */
586 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
588 device_printf(dev, "unable to initialize port %d: %d\n",
595 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
596 device_get_nameunit(dev), i);
597 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
599 if (is_10G_port(pi)) {
601 pi->tmr_idx = t4_tmr_idx_10g;
602 pi->pktc_idx = t4_pktc_idx_10g;
605 pi->tmr_idx = t4_tmr_idx_1g;
606 pi->pktc_idx = t4_pktc_idx_1g;
609 pi->xact_addr_filt = -1;
611 pi->qsize_rxq = t4_qsize_rxq;
612 pi->qsize_txq = t4_qsize_txq;
614 pi->dev = device_add_child(dev, "cxgbe", -1);
615 if (pi->dev == NULL) {
617 "failed to add device for port %d.\n", i);
621 device_set_softc(pi->dev, pi);
625 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
627 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
629 goto done; /* error message displayed already */
631 sc->intr_type = iaq.intr_type;
632 sc->intr_count = iaq.nirq;
633 sc->flags |= iaq.intr_flags;
636 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
637 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
638 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
639 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
640 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
643 if (is_offload(sc)) {
645 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
646 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
647 s->neq += s->nofldtxq + s->nofldrxq;
648 s->niq += s->nofldrxq;
650 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
651 M_CXGBE, M_ZERO | M_WAITOK);
652 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
653 M_CXGBE, M_ZERO | M_WAITOK);
657 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
659 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
661 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
663 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
665 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
668 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
671 t4_init_l2t(sc, M_WAITOK);
674 * Second pass over the ports. This time we know the number of rx and
675 * tx queues that each port should get.
679 ofld_rqidx = ofld_tqidx = 0;
681 for_each_port(sc, i) {
682 struct port_info *pi = sc->port[i];
687 pi->first_rxq = rqidx;
688 pi->first_txq = tqidx;
689 if (is_10G_port(pi)) {
690 pi->nrxq = iaq.nrxq10g;
691 pi->ntxq = iaq.ntxq10g;
693 pi->nrxq = iaq.nrxq1g;
694 pi->ntxq = iaq.ntxq1g;
701 if (is_offload(sc)) {
702 pi->first_ofld_rxq = ofld_rqidx;
703 pi->first_ofld_txq = ofld_tqidx;
704 if (is_10G_port(pi)) {
705 pi->nofldrxq = iaq.nofldrxq10g;
706 pi->nofldtxq = iaq.nofldtxq10g;
708 pi->nofldrxq = iaq.nofldrxq1g;
709 pi->nofldtxq = iaq.nofldtxq1g;
711 ofld_rqidx += pi->nofldrxq;
712 ofld_tqidx += pi->nofldtxq;
717 rc = setup_intr_handlers(sc);
720 "failed to setup interrupt handlers: %d\n", rc);
724 rc = bus_generic_attach(dev);
727 "failed to attach all child ports: %d\n", rc);
732 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
733 sc->params.pci.width, sc->params.nports, sc->intr_count,
734 sc->intr_type == INTR_MSIX ? "MSI-X" :
735 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
736 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
741 if (rc != 0 && sc->cdev) {
742 /* cdev was created and so cxgbetool works; recover that way. */
744 "error during attach, adapter is now in recovery mode.\n");
760 t4_detach(device_t dev)
763 struct port_info *pi;
766 sc = device_get_softc(dev);
768 if (sc->flags & FULL_INIT_DONE)
772 destroy_dev(sc->cdev);
776 rc = bus_generic_detach(dev);
779 "failed to detach child devices: %d\n", rc);
783 for (i = 0; i < sc->intr_count; i++)
784 t4_free_irq(sc, &sc->irq[i]);
786 for (i = 0; i < MAX_NPORTS; i++) {
789 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
791 device_delete_child(dev, pi->dev);
793 mtx_destroy(&pi->pi_lock);
798 if (sc->flags & FULL_INIT_DONE)
799 adapter_full_uninit(sc);
801 if (sc->flags & FW_OK)
802 t4_fw_bye(sc, sc->mbox);
804 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
805 pci_release_msi(dev);
808 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
812 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
816 t4_free_l2t(sc->l2t);
819 free(sc->sge.ofld_rxq, M_CXGBE);
820 free(sc->sge.ofld_txq, M_CXGBE);
822 free(sc->irq, M_CXGBE);
823 free(sc->sge.rxq, M_CXGBE);
824 free(sc->sge.txq, M_CXGBE);
825 free(sc->sge.ctrlq, M_CXGBE);
826 free(sc->sge.iqmap, M_CXGBE);
827 free(sc->sge.eqmap, M_CXGBE);
828 free(sc->tids.ftid_tab, M_CXGBE);
829 t4_destroy_dma_tag(sc);
830 if (mtx_initialized(&sc->sc_lock)) {
831 mtx_lock(&t4_list_lock);
832 SLIST_REMOVE(&t4_list, sc, adapter, link);
833 mtx_unlock(&t4_list_lock);
834 mtx_destroy(&sc->sc_lock);
837 if (mtx_initialized(&sc->tids.ftid_lock))
838 mtx_destroy(&sc->tids.ftid_lock);
839 if (mtx_initialized(&sc->sfl_lock))
840 mtx_destroy(&sc->sfl_lock);
842 bzero(sc, sizeof(*sc));
849 cxgbe_probe(device_t dev)
852 struct port_info *pi = device_get_softc(dev);
854 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
855 device_set_desc_copy(dev, buf);
857 return (BUS_PROBE_DEFAULT);
860 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
861 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
862 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
863 #define T4_CAP_ENABLE (T4_CAP)
866 cxgbe_attach(device_t dev)
868 struct port_info *pi = device_get_softc(dev);
871 /* Allocate an ifnet and set it up */
872 ifp = if_alloc(IFT_ETHER);
874 device_printf(dev, "Cannot allocate ifnet\n");
880 callout_init(&pi->tick, CALLOUT_MPSAFE);
882 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
885 ifp->if_init = cxgbe_init;
886 ifp->if_ioctl = cxgbe_ioctl;
887 ifp->if_transmit = cxgbe_transmit;
888 ifp->if_qflush = cxgbe_qflush;
890 ifp->if_capabilities = T4_CAP;
892 if (is_offload(pi->adapter))
893 ifp->if_capabilities |= IFCAP_TOE;
895 ifp->if_capenable = T4_CAP_ENABLE;
896 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
897 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
899 /* Initialize ifmedia for this port */
900 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
904 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
905 EVENTHANDLER_PRI_ANY);
907 ether_ifattach(ifp, pi->hw_addr);
910 if (is_offload(pi->adapter)) {
912 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
913 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
916 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
924 cxgbe_detach(device_t dev)
926 struct port_info *pi = device_get_softc(dev);
927 struct adapter *sc = pi->adapter;
928 struct ifnet *ifp = pi->ifp;
930 /* Tell if_ioctl and if_init that the port is going away */
935 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
938 sc->last_op = "t4detach";
939 sc->last_op_thr = curthread;
944 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
947 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
948 callout_stop(&pi->tick);
950 callout_drain(&pi->tick);
952 /* Let detach proceed even if these fail. */
953 cxgbe_uninit_synchronized(pi);
954 port_full_uninit(pi);
956 ifmedia_removeall(&pi->media);
957 ether_ifdetach(pi->ifp);
969 cxgbe_init(void *arg)
971 struct port_info *pi = arg;
972 struct adapter *sc = pi->adapter;
974 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
976 cxgbe_init_synchronized(pi);
977 end_synchronized_op(sc, 0);
981 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
983 int rc = 0, mtu, flags;
984 struct port_info *pi = ifp->if_softc;
985 struct adapter *sc = pi->adapter;
986 struct ifreq *ifr = (struct ifreq *)data;
992 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
995 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
999 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1000 t4_update_fl_bufsize(ifp);
1001 rc = update_mac_settings(pi, XGMAC_MTU);
1003 end_synchronized_op(sc, 0);
1007 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1011 if (ifp->if_flags & IFF_UP) {
1012 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1013 flags = pi->if_flags;
1014 if ((ifp->if_flags ^ flags) &
1015 (IFF_PROMISC | IFF_ALLMULTI)) {
1016 rc = update_mac_settings(pi,
1017 XGMAC_PROMISC | XGMAC_ALLMULTI);
1020 rc = cxgbe_init_synchronized(pi);
1021 pi->if_flags = ifp->if_flags;
1022 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1023 rc = cxgbe_uninit_synchronized(pi);
1024 end_synchronized_op(sc, 0);
1028 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1029 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1032 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1033 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1034 end_synchronized_op(sc, LOCK_HELD);
1038 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1042 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1043 if (mask & IFCAP_TXCSUM) {
1044 ifp->if_capenable ^= IFCAP_TXCSUM;
1045 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1047 if (IFCAP_TSO4 & ifp->if_capenable &&
1048 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1049 ifp->if_capenable &= ~IFCAP_TSO4;
1051 "tso4 disabled due to -txcsum.\n");
1054 if (mask & IFCAP_TXCSUM_IPV6) {
1055 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1056 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1058 if (IFCAP_TSO6 & ifp->if_capenable &&
1059 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1060 ifp->if_capenable &= ~IFCAP_TSO6;
1062 "tso6 disabled due to -txcsum6.\n");
1065 if (mask & IFCAP_RXCSUM)
1066 ifp->if_capenable ^= IFCAP_RXCSUM;
1067 if (mask & IFCAP_RXCSUM_IPV6)
1068 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1071 * Note that we leave CSUM_TSO alone (it is always set). The
1072 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1073 * sending a TSO request our way, so it's sufficient to toggle
1076 if (mask & IFCAP_TSO4) {
1077 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1078 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1079 if_printf(ifp, "enable txcsum first.\n");
1083 ifp->if_capenable ^= IFCAP_TSO4;
1085 if (mask & IFCAP_TSO6) {
1086 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1087 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1088 if_printf(ifp, "enable txcsum6 first.\n");
1092 ifp->if_capenable ^= IFCAP_TSO6;
1094 if (mask & IFCAP_LRO) {
1095 #if defined(INET) || defined(INET6)
1097 struct sge_rxq *rxq;
1099 ifp->if_capenable ^= IFCAP_LRO;
1100 for_each_rxq(pi, i, rxq) {
1101 if (ifp->if_capenable & IFCAP_LRO)
1102 rxq->iq.flags |= IQ_LRO_ENABLED;
1104 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1109 if (mask & IFCAP_TOE) {
1110 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1112 rc = toe_capability(pi, enable);
1116 ifp->if_capenable ^= mask;
1119 if (mask & IFCAP_VLAN_HWTAGGING) {
1120 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1121 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1122 rc = update_mac_settings(pi, XGMAC_VLANEX);
1124 if (mask & IFCAP_VLAN_MTU) {
1125 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1127 /* Need to find out how to disable auto-mtu-inflation */
1129 if (mask & IFCAP_VLAN_HWTSO)
1130 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1131 if (mask & IFCAP_VLAN_HWCSUM)
1132 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1134 #ifdef VLAN_CAPABILITIES
1135 VLAN_CAPABILITIES(ifp);
1138 end_synchronized_op(sc, 0);
1143 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1147 rc = ether_ioctl(ifp, cmd, data);
1154 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1156 struct port_info *pi = ifp->if_softc;
1157 struct adapter *sc = pi->adapter;
1158 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1159 struct buf_ring *br;
1164 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1169 if (m->m_flags & M_FLOWID)
1170 txq += (m->m_pkthdr.flowid % pi->ntxq);
1173 if (TXQ_TRYLOCK(txq) == 0) {
1174 struct sge_eq *eq = &txq->eq;
1177 * It is possible that t4_eth_tx finishes up and releases the
1178 * lock between the TRYLOCK above and the drbr_enqueue here. We
1179 * need to make sure that this mbuf doesn't just sit there in
1183 rc = drbr_enqueue(ifp, br, m);
1184 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1185 !(eq->flags & EQ_DOOMED))
1186 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1191 * txq->m is the mbuf that is held up due to a temporary shortage of
1192 * resources and it should be put on the wire first. Then what's in
1193 * drbr and finally the mbuf that was just passed in to us.
1195 * Return code should indicate the fate of the mbuf that was passed in
1199 TXQ_LOCK_ASSERT_OWNED(txq);
1200 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1202 /* Queued for transmission. */
1204 rc = drbr_enqueue(ifp, br, m);
1205 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1206 (void) t4_eth_tx(ifp, txq, m);
1211 /* Direct transmission. */
1212 rc = t4_eth_tx(ifp, txq, m);
1213 if (rc != 0 && txq->m)
1214 rc = 0; /* held, will be transmitted soon (hopefully) */
1221 cxgbe_qflush(struct ifnet *ifp)
1223 struct port_info *pi = ifp->if_softc;
1224 struct sge_txq *txq;
1228 /* queues do not exist if !PORT_INIT_DONE. */
1229 if (pi->flags & PORT_INIT_DONE) {
1230 for_each_txq(pi, i, txq) {
1234 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1243 cxgbe_media_change(struct ifnet *ifp)
1245 struct port_info *pi = ifp->if_softc;
1247 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1249 return (EOPNOTSUPP);
1253 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1255 struct port_info *pi = ifp->if_softc;
1256 struct ifmedia_entry *cur = pi->media.ifm_cur;
1257 int speed = pi->link_cfg.speed;
1258 int data = (pi->port_type << 8) | pi->mod_type;
1260 if (cur->ifm_data != data) {
1261 build_medialist(pi);
1262 cur = pi->media.ifm_cur;
1265 ifmr->ifm_status = IFM_AVALID;
1266 if (!pi->link_cfg.link_ok)
1269 ifmr->ifm_status |= IFM_ACTIVE;
1271 /* active and current will differ iff current media is autoselect. */
1272 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1275 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1276 if (speed == SPEED_10000)
1277 ifmr->ifm_active |= IFM_10G_T;
1278 else if (speed == SPEED_1000)
1279 ifmr->ifm_active |= IFM_1000_T;
1280 else if (speed == SPEED_100)
1281 ifmr->ifm_active |= IFM_100_TX;
1282 else if (speed == SPEED_10)
1283 ifmr->ifm_active |= IFM_10_T;
1285 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1290 t4_fatal_err(struct adapter *sc)
1292 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1293 t4_intr_disable(sc);
1294 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1295 device_get_nameunit(sc->dev));
1299 map_bars(struct adapter *sc)
1301 sc->regs_rid = PCIR_BAR(0);
1302 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1303 &sc->regs_rid, RF_ACTIVE);
1304 if (sc->regs_res == NULL) {
1305 device_printf(sc->dev, "cannot map registers.\n");
1308 sc->bt = rman_get_bustag(sc->regs_res);
1309 sc->bh = rman_get_bushandle(sc->regs_res);
1310 sc->mmio_len = rman_get_size(sc->regs_res);
1312 sc->msix_rid = PCIR_BAR(4);
1313 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1314 &sc->msix_rid, RF_ACTIVE);
1315 if (sc->msix_res == NULL) {
1316 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1324 setup_memwin(struct adapter *sc)
1329 * Read low 32b of bar0 indirectly via the hardware backdoor mechanism.
1330 * Works from within PCI passthrough environments too, where
1331 * rman_get_start() can return a different value. We need to program
1332 * the memory window decoders with the actual addresses that will be
1333 * coming across the PCIe link.
1335 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1336 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1338 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1339 (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1340 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1342 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1343 (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1344 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1346 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1347 (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1348 V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1351 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1355 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1356 struct intrs_and_queues *iaq)
1358 int rc, itype, navail, nrxq10g, nrxq1g, n;
1359 int nofldrxq10g = 0, nofldrxq1g = 0;
1361 bzero(iaq, sizeof(*iaq));
1363 iaq->ntxq10g = t4_ntxq10g;
1364 iaq->ntxq1g = t4_ntxq1g;
1365 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1366 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1368 if (is_offload(sc)) {
1369 iaq->nofldtxq10g = t4_nofldtxq10g;
1370 iaq->nofldtxq1g = t4_nofldtxq1g;
1371 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1372 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1376 for (itype = INTR_MSIX; itype; itype >>= 1) {
1378 if ((itype & t4_intr_types) == 0)
1379 continue; /* not allowed */
1381 if (itype == INTR_MSIX)
1382 navail = pci_msix_count(sc->dev);
1383 else if (itype == INTR_MSI)
1384 navail = pci_msi_count(sc->dev);
1391 iaq->intr_type = itype;
1392 iaq->intr_flags = 0;
1395 * Best option: an interrupt vector for errors, one for the
1396 * firmware event queue, and one each for each rxq (NIC as well
1399 iaq->nirq = T4_EXTRA_INTR;
1400 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1401 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1402 if (iaq->nirq <= navail &&
1403 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1404 iaq->intr_flags |= INTR_DIRECT;
1409 * Second best option: an interrupt vector for errors, one for
1410 * the firmware event queue, and one each for either NIC or
1413 iaq->nirq = T4_EXTRA_INTR;
1414 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1415 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1416 if (iaq->nirq <= navail &&
1417 (itype != INTR_MSI || powerof2(iaq->nirq)))
1421 * Next best option: an interrupt vector for errors, one for the
1422 * firmware event queue, and at least one per port. At this
1423 * point we know we'll have to downsize nrxq or nofldrxq to fit
1424 * what's available to us.
1426 iaq->nirq = T4_EXTRA_INTR;
1427 iaq->nirq += n10g + n1g;
1428 if (iaq->nirq <= navail) {
1429 int leftover = navail - iaq->nirq;
1432 int target = max(nrxq10g, nofldrxq10g);
1435 while (n < target && leftover >= n10g) {
1440 iaq->nrxq10g = min(n, nrxq10g);
1443 iaq->nofldrxq10g = min(n, nofldrxq10g);
1448 int target = max(nrxq1g, nofldrxq1g);
1451 while (n < target && leftover >= n1g) {
1456 iaq->nrxq1g = min(n, nrxq1g);
1459 iaq->nofldrxq1g = min(n, nofldrxq1g);
1463 if (itype != INTR_MSI || powerof2(iaq->nirq))
1468 * Least desirable option: one interrupt vector for everything.
1470 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1473 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1479 if (itype == INTR_MSIX)
1480 rc = pci_alloc_msix(sc->dev, &navail);
1481 else if (itype == INTR_MSI)
1482 rc = pci_alloc_msi(sc->dev, &navail);
1485 if (navail == iaq->nirq)
1489 * Didn't get the number requested. Use whatever number
1490 * the kernel is willing to allocate (it's in navail).
1492 device_printf(sc->dev, "fewer vectors than requested, "
1493 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1494 itype, iaq->nirq, navail);
1495 pci_release_msi(sc->dev);
1499 device_printf(sc->dev,
1500 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1501 itype, rc, iaq->nirq, navail);
1504 device_printf(sc->dev,
1505 "failed to find a usable interrupt type. "
1506 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1507 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1513 * Is the given firmware compatible with the one the driver was compiled with?
1516 fw_compatible(const struct fw_hdr *hdr)
1519 if (hdr->fw_ver == htonl(FW_VERSION))
1523 * XXX: Is this too conservative? Perhaps I should limit this to the
1524 * features that are supported in the driver.
1526 if (hdr->intfver_nic == FW_HDR_INTFVER_NIC &&
1527 hdr->intfver_vnic == FW_HDR_INTFVER_VNIC &&
1528 hdr->intfver_ofld == FW_HDR_INTFVER_OFLD &&
1529 hdr->intfver_ri == FW_HDR_INTFVER_RI &&
1530 hdr->intfver_iscsipdu == FW_HDR_INTFVER_ISCSIPDU &&
1531 hdr->intfver_iscsi == FW_HDR_INTFVER_ISCSI &&
1532 hdr->intfver_fcoepdu == FW_HDR_INTFVER_FCOEPDU &&
1533 hdr->intfver_fcoe == FW_HDR_INTFVER_FCOEPDU)
1540 * Install a compatible firmware (if required), establish contact with it (by
1541 * saying hello), and reset the device. If we end up as the master driver,
1542 * partition adapter resources by providing a configuration file to the
1546 prep_firmware(struct adapter *sc)
1548 const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
1549 int rc, card_fw_usable, kld_fw_usable;
1550 enum dev_state state;
1551 struct fw_hdr *card_fw;
1552 const struct fw_hdr *kld_fw;
1554 default_cfg = firmware_get(T4_CFGNAME);
1556 /* Read the header of the firmware on the card */
1557 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
1558 rc = -t4_read_flash(sc, FLASH_FW_START,
1559 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
1561 card_fw_usable = fw_compatible((const void*)card_fw);
1563 device_printf(sc->dev,
1564 "Unable to read card's firmware header: %d\n", rc);
1568 /* This is the firmware in the KLD */
1569 fw = firmware_get(T4_FWNAME);
1571 kld_fw = (const void *)fw->data;
1572 kld_fw_usable = fw_compatible(kld_fw);
1579 * Short circuit for the common case: the firmware on the card is an
1580 * exact match and the KLD is an exact match too, or it's
1581 * absent/incompatible, or we're prohibited from using it. Note that
1582 * t4_fw_install = 2 is ignored here -- use cxgbetool loadfw if you want
1583 * to reinstall the same firmware as the one on the card.
1585 if (card_fw_usable && card_fw->fw_ver == htonl(FW_VERSION) &&
1586 (!kld_fw_usable || kld_fw->fw_ver == htonl(FW_VERSION) ||
1587 t4_fw_install == 0))
1590 if (kld_fw_usable && (!card_fw_usable ||
1591 ntohl(kld_fw->fw_ver) > ntohl(card_fw->fw_ver) ||
1592 (t4_fw_install == 2 && kld_fw->fw_ver != card_fw->fw_ver))) {
1593 uint32_t v = ntohl(kld_fw->fw_ver);
1595 device_printf(sc->dev,
1596 "installing firmware %d.%d.%d.%d on card.\n",
1597 G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1598 G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1600 rc = -t4_load_fw(sc, fw->data, fw->datasize);
1602 device_printf(sc->dev,
1603 "failed to install firmware: %d\n", rc);
1607 /* Installed successfully, update the cached header too. */
1608 memcpy(card_fw, kld_fw, sizeof(*card_fw));
1612 if (!card_fw_usable) {
1615 c = ntohl(card_fw->fw_ver);
1616 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
1618 device_printf(sc->dev, "Cannot find a usable firmware: "
1619 "fw_install %d, driver compiled with %d.%d.%d.%d, "
1620 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
1622 G_FW_HDR_FW_VER_MAJOR(FW_VERSION),
1623 G_FW_HDR_FW_VER_MINOR(FW_VERSION),
1624 G_FW_HDR_FW_VER_MICRO(FW_VERSION),
1625 G_FW_HDR_FW_VER_BUILD(FW_VERSION),
1626 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1627 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
1628 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1629 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1634 /* We're using whatever's on the card and it's known to be good. */
1635 sc->params.fw_vers = ntohl(card_fw->fw_ver);
1636 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1637 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1638 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1639 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1640 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1642 /* Contact firmware. */
1643 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1646 device_printf(sc->dev,
1647 "failed to connect to the firmware: %d.\n", rc);
1651 sc->flags |= MASTER_PF;
1654 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1656 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1657 if (rc != ETIMEDOUT && rc != EIO)
1658 t4_fw_bye(sc, sc->mbox);
1662 /* Partition adapter resources as specified in the config file. */
1663 if (sc->flags & MASTER_PF) {
1664 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s",
1665 pci_get_device(sc->dev) == 0x440a ? "uwire" : t4_cfg_file);
1666 if (strncmp(sc->cfg_file, "default", sizeof(sc->cfg_file))) {
1669 snprintf(s, sizeof(s), "t4fw_cfg_%s", sc->cfg_file);
1670 cfg = firmware_get(s);
1672 device_printf(sc->dev,
1673 "unable to locate %s module, "
1674 "will use default config file.\n", s);
1675 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
1680 rc = partition_resources(sc, cfg ? cfg : default_cfg);
1682 goto done; /* error message displayed already */
1684 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "notme");
1685 sc->cfcsum = (u_int)-1;
1691 free(card_fw, M_CXGBE);
1693 firmware_put(fw, FIRMWARE_UNLOAD);
1695 firmware_put(cfg, FIRMWARE_UNLOAD);
1696 if (default_cfg != NULL)
1697 firmware_put(default_cfg, FIRMWARE_UNLOAD);
1702 #define FW_PARAM_DEV(param) \
1703 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1704 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1705 #define FW_PARAM_PFVF(param) \
1706 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1707 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1710 * Upload configuration file to card's memory.
1713 upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
1717 uint32_t param, val, mtype, maddr, bar, off, win, remaining;
1720 /* Figure out where the firmware wants us to upload it. */
1721 param = FW_PARAM_DEV(CF);
1722 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1724 /* Firmwares without config file support will fail this way */
1725 device_printf(sc->dev,
1726 "failed to query config file location: %d.\n", rc);
1729 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1730 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1733 device_printf(sc->dev,
1734 "cannot upload config file (type %u, addr %x).\n",
1739 /* Translate mtype/maddr to an address suitable for the PCIe window */
1740 val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1741 val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
1743 case FW_MEMTYPE_CF_EDC0:
1744 if (!(val & F_EDRAM0_ENABLE))
1746 bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1747 maddr += G_EDRAM0_BASE(bar) << 20;
1750 case FW_MEMTYPE_CF_EDC1:
1751 if (!(val & F_EDRAM1_ENABLE))
1753 bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1754 maddr += G_EDRAM1_BASE(bar) << 20;
1757 case FW_MEMTYPE_CF_EXTMEM:
1758 if (!(val & F_EXT_MEM_ENABLE))
1760 bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1761 maddr += G_EXT_MEM_BASE(bar) << 20;
1766 device_printf(sc->dev,
1767 "cannot upload config file (type %u, enabled %u).\n",
1773 * Position the PCIe window (we use memwin2) to the 16B aligned area
1774 * just at/before the upload location.
1777 off = maddr - win; /* offset from the start of the window. */
1778 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
1779 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
1781 remaining = fw->datasize;
1782 if (remaining > FLASH_CFG_MAX_SIZE ||
1783 remaining > MEMWIN2_APERTURE - off) {
1784 device_printf(sc->dev, "cannot upload config file all at once "
1785 "(size %u, max %u, room %u).\n",
1786 remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
1791 * XXX: sheer laziness. We deliberately added 4 bytes of useless
1792 * stuffing/comments at the end of the config file so it's ok to simply
1793 * throw away the last remaining bytes when the config file is not an
1794 * exact multiple of 4.
1797 for (i = 0; remaining >= 4; i += 4, remaining -= 4)
1798 t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
1804 * Partition chip resources for use between various PFs, VFs, etc. This is done
1805 * by uploading the firmware configuration file to the adapter and instructing
1806 * the firmware to process it.
1809 partition_resources(struct adapter *sc, const struct firmware *cfg)
1812 struct fw_caps_config_cmd caps;
1813 uint32_t mtype, maddr, finicsum, cfcsum;
1815 rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
1817 mtype = FW_MEMTYPE_CF_FLASH;
1818 maddr = t4_flash_cfg_addr(sc);
1821 bzero(&caps, sizeof(caps));
1822 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1823 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1824 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1825 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1826 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1827 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1829 device_printf(sc->dev,
1830 "failed to pre-process config file: %d.\n", rc);
1834 finicsum = be32toh(caps.finicsum);
1835 cfcsum = be32toh(caps.cfcsum);
1836 if (finicsum != cfcsum) {
1837 device_printf(sc->dev,
1838 "WARNING: config file checksum mismatch: %08x %08x\n",
1841 sc->cfcsum = cfcsum;
1843 #define LIMIT_CAPS(x) do { \
1844 caps.x &= htobe16(t4_##x##_allowed); \
1845 sc->x = htobe16(caps.x); \
1849 * Let the firmware know what features will (not) be used so it can tune
1850 * things accordingly.
1852 LIMIT_CAPS(linkcaps);
1853 LIMIT_CAPS(niccaps);
1854 LIMIT_CAPS(toecaps);
1855 LIMIT_CAPS(rdmacaps);
1856 LIMIT_CAPS(iscsicaps);
1857 LIMIT_CAPS(fcoecaps);
1860 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1861 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1862 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1863 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
1865 device_printf(sc->dev,
1866 "failed to process config file: %d.\n", rc);
1874 * Retrieve parameters that are needed (or nice to have) prior to calling
1875 * t4_sge_init and t4_fw_initialize.
1878 get_params__pre_init(struct adapter *sc)
1881 uint32_t param[2], val[2];
1882 struct fw_devlog_cmd cmd;
1883 struct devlog_params *dlog = &sc->params.devlog;
1885 param[0] = FW_PARAM_DEV(PORTVEC);
1886 param[1] = FW_PARAM_DEV(CCLK);
1887 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1889 device_printf(sc->dev,
1890 "failed to query parameters (pre_init): %d.\n", rc);
1894 sc->params.portvec = val[0];
1895 sc->params.nports = bitcount32(val[0]);
1896 sc->params.vpd.cclk = val[1];
1898 /* Read device log parameters. */
1899 bzero(&cmd, sizeof(cmd));
1900 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1901 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1902 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
1903 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
1905 device_printf(sc->dev,
1906 "failed to get devlog parameters: %d.\n", rc);
1907 bzero(dlog, sizeof (*dlog));
1908 rc = 0; /* devlog isn't critical for device operation */
1910 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
1911 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1912 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1913 dlog->size = be32toh(cmd.memsize_devlog);
1920 * Retrieve various parameters that are of interest to the driver. The device
1921 * has been initialized by the firmware at this point.
1924 get_params__post_init(struct adapter *sc)
1927 uint32_t param[7], val[7];
1928 struct fw_caps_config_cmd caps;
1930 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1931 param[1] = FW_PARAM_PFVF(EQ_START);
1932 param[2] = FW_PARAM_PFVF(FILTER_START);
1933 param[3] = FW_PARAM_PFVF(FILTER_END);
1934 param[4] = FW_PARAM_PFVF(L2T_START);
1935 param[5] = FW_PARAM_PFVF(L2T_END);
1936 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1938 device_printf(sc->dev,
1939 "failed to query parameters (post_init): %d.\n", rc);
1943 sc->sge.iq_start = val[0];
1944 sc->sge.eq_start = val[1];
1945 sc->tids.ftid_base = val[2];
1946 sc->tids.nftids = val[3] - val[2] + 1;
1947 sc->vres.l2t.start = val[4];
1948 sc->vres.l2t.size = val[5] - val[4] + 1;
1949 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
1950 ("%s: L2 table size (%u) larger than expected (%u)",
1951 __func__, sc->vres.l2t.size, L2T_SIZE));
1953 /* get capabilites */
1954 bzero(&caps, sizeof(caps));
1955 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1956 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1957 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
1958 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
1960 device_printf(sc->dev,
1961 "failed to get card capabilities: %d.\n", rc);
1966 /* query offload-related parameters */
1967 param[0] = FW_PARAM_DEV(NTID);
1968 param[1] = FW_PARAM_PFVF(SERVER_START);
1969 param[2] = FW_PARAM_PFVF(SERVER_END);
1970 param[3] = FW_PARAM_PFVF(TDDP_START);
1971 param[4] = FW_PARAM_PFVF(TDDP_END);
1972 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1973 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1975 device_printf(sc->dev,
1976 "failed to query TOE parameters: %d.\n", rc);
1979 sc->tids.ntids = val[0];
1980 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1981 sc->tids.stid_base = val[1];
1982 sc->tids.nstids = val[2] - val[1] + 1;
1983 sc->vres.ddp.start = val[3];
1984 sc->vres.ddp.size = val[4] - val[3] + 1;
1985 sc->params.ofldq_wr_cred = val[5];
1986 sc->params.offload = 1;
1988 if (caps.rdmacaps) {
1989 param[0] = FW_PARAM_PFVF(STAG_START);
1990 param[1] = FW_PARAM_PFVF(STAG_END);
1991 param[2] = FW_PARAM_PFVF(RQ_START);
1992 param[3] = FW_PARAM_PFVF(RQ_END);
1993 param[4] = FW_PARAM_PFVF(PBL_START);
1994 param[5] = FW_PARAM_PFVF(PBL_END);
1995 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1997 device_printf(sc->dev,
1998 "failed to query RDMA parameters(1): %d.\n", rc);
2001 sc->vres.stag.start = val[0];
2002 sc->vres.stag.size = val[1] - val[0] + 1;
2003 sc->vres.rq.start = val[2];
2004 sc->vres.rq.size = val[3] - val[2] + 1;
2005 sc->vres.pbl.start = val[4];
2006 sc->vres.pbl.size = val[5] - val[4] + 1;
2008 param[0] = FW_PARAM_PFVF(SQRQ_START);
2009 param[1] = FW_PARAM_PFVF(SQRQ_END);
2010 param[2] = FW_PARAM_PFVF(CQ_START);
2011 param[3] = FW_PARAM_PFVF(CQ_END);
2012 param[4] = FW_PARAM_PFVF(OCQ_START);
2013 param[5] = FW_PARAM_PFVF(OCQ_END);
2014 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2016 device_printf(sc->dev,
2017 "failed to query RDMA parameters(2): %d.\n", rc);
2020 sc->vres.qp.start = val[0];
2021 sc->vres.qp.size = val[1] - val[0] + 1;
2022 sc->vres.cq.start = val[2];
2023 sc->vres.cq.size = val[3] - val[2] + 1;
2024 sc->vres.ocq.start = val[4];
2025 sc->vres.ocq.size = val[5] - val[4] + 1;
2027 if (caps.iscsicaps) {
2028 param[0] = FW_PARAM_PFVF(ISCSI_START);
2029 param[1] = FW_PARAM_PFVF(ISCSI_END);
2030 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2032 device_printf(sc->dev,
2033 "failed to query iSCSI parameters: %d.\n", rc);
2036 sc->vres.iscsi.start = val[0];
2037 sc->vres.iscsi.size = val[1] - val[0] + 1;
2040 /* These are finalized by FW initialization, load their values now */
2041 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
2042 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
2043 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
2044 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
2050 set_params__post_init(struct adapter *sc)
2052 uint32_t param, val;
2055 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2056 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2058 /* ask for encapsulated CPLs */
2059 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2061 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2063 device_printf(sc->dev,
2064 "failed to set parameter (post_init): %d.\n", rc);
2067 } else if (rc != FW_EINVAL) {
2068 device_printf(sc->dev,
2069 "failed to check for encapsulated CPLs: %d.\n", rc);
2071 rc = 0; /* the firmware doesn't support the param, no worries */
2076 #undef FW_PARAM_PFVF
2080 t4_set_desc(struct adapter *sc)
2083 struct adapter_params *p = &sc->params;
2085 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2086 p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
2088 device_set_desc_copy(sc->dev, buf);
2092 build_medialist(struct port_info *pi)
2094 struct ifmedia *media = &pi->media;
2099 ifmedia_removeall(media);
2101 m = IFM_ETHER | IFM_FDX;
2102 data = (pi->port_type << 8) | pi->mod_type;
2104 switch(pi->port_type) {
2105 case FW_PORT_TYPE_BT_XFI:
2106 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2109 case FW_PORT_TYPE_BT_XAUI:
2110 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2113 case FW_PORT_TYPE_BT_SGMII:
2114 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2115 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2116 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2117 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2120 case FW_PORT_TYPE_CX4:
2121 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2122 ifmedia_set(media, m | IFM_10G_CX4);
2125 case FW_PORT_TYPE_SFP:
2126 case FW_PORT_TYPE_FIBER_XFI:
2127 case FW_PORT_TYPE_FIBER_XAUI:
2128 switch (pi->mod_type) {
2130 case FW_PORT_MOD_TYPE_LR:
2131 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2132 ifmedia_set(media, m | IFM_10G_LR);
2135 case FW_PORT_MOD_TYPE_SR:
2136 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2137 ifmedia_set(media, m | IFM_10G_SR);
2140 case FW_PORT_MOD_TYPE_LRM:
2141 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2142 ifmedia_set(media, m | IFM_10G_LRM);
2145 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2146 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2147 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2148 ifmedia_set(media, m | IFM_10G_TWINAX);
2151 case FW_PORT_MOD_TYPE_NONE:
2153 ifmedia_add(media, m | IFM_NONE, data, NULL);
2154 ifmedia_set(media, m | IFM_NONE);
2157 case FW_PORT_MOD_TYPE_NA:
2158 case FW_PORT_MOD_TYPE_ER:
2160 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2161 ifmedia_set(media, m | IFM_UNKNOWN);
2166 case FW_PORT_TYPE_KX4:
2167 case FW_PORT_TYPE_KX:
2168 case FW_PORT_TYPE_KR:
2170 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2171 ifmedia_set(media, m | IFM_UNKNOWN);
2178 #define FW_MAC_EXACT_CHUNK 7
2181 * Program the port's XGMAC based on parameters in ifnet. The caller also
2182 * indicates which parameters should be programmed (the rest are left alone).
2185 update_mac_settings(struct port_info *pi, int flags)
2188 struct ifnet *ifp = pi->ifp;
2189 struct adapter *sc = pi->adapter;
2190 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2192 ASSERT_SYNCHRONIZED_OP(sc);
2193 KASSERT(flags, ("%s: not told what to update.", __func__));
2195 if (flags & XGMAC_MTU)
2198 if (flags & XGMAC_PROMISC)
2199 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2201 if (flags & XGMAC_ALLMULTI)
2202 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2204 if (flags & XGMAC_VLANEX)
2205 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2207 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2210 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2214 if (flags & XGMAC_UCADDR) {
2215 uint8_t ucaddr[ETHER_ADDR_LEN];
2217 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2218 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2219 ucaddr, true, true);
2222 if_printf(ifp, "change_mac failed: %d\n", rc);
2225 pi->xact_addr_filt = rc;
2230 if (flags & XGMAC_MCADDRS) {
2231 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2234 struct ifmultiaddr *ifma;
2237 if_maddr_rlock(ifp);
2238 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2239 if (ifma->ifma_addr->sa_family != AF_LINK)
2242 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2244 if (i == FW_MAC_EXACT_CHUNK) {
2245 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2246 del, i, mcaddr, NULL, &hash, 0);
2249 for (j = 0; j < i; j++) {
2251 "failed to add mc address"
2253 "%02x:%02x:%02x rc=%d\n",
2254 mcaddr[j][0], mcaddr[j][1],
2255 mcaddr[j][2], mcaddr[j][3],
2256 mcaddr[j][4], mcaddr[j][5],
2266 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2267 del, i, mcaddr, NULL, &hash, 0);
2270 for (j = 0; j < i; j++) {
2272 "failed to add mc address"
2274 "%02x:%02x:%02x rc=%d\n",
2275 mcaddr[j][0], mcaddr[j][1],
2276 mcaddr[j][2], mcaddr[j][3],
2277 mcaddr[j][4], mcaddr[j][5],
2284 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2286 if_printf(ifp, "failed to set mc address hash: %d", rc);
2288 if_maddr_runlock(ifp);
2295 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2301 /* the caller thinks it's ok to sleep, but is it really? */
2302 if (flags & SLEEP_OK)
2303 pause("t4slptst", 1);
2314 if (pi && IS_DOOMED(pi)) {
2324 if (!(flags & SLEEP_OK)) {
2329 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2335 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2338 sc->last_op = wmesg;
2339 sc->last_op_thr = curthread;
2343 if (!(flags & HOLD_LOCK) || rc)
2350 end_synchronized_op(struct adapter *sc, int flags)
2353 if (flags & LOCK_HELD)
2354 ADAPTER_LOCK_ASSERT_OWNED(sc);
2358 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2365 cxgbe_init_synchronized(struct port_info *pi)
2367 struct adapter *sc = pi->adapter;
2368 struct ifnet *ifp = pi->ifp;
2371 ASSERT_SYNCHRONIZED_OP(sc);
2373 if (isset(&sc->open_device_map, pi->port_id)) {
2374 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2375 ("mismatch between open_device_map and if_drv_flags"));
2376 return (0); /* already running */
2379 if (!(sc->flags & FULL_INIT_DONE) &&
2380 ((rc = adapter_full_init(sc)) != 0))
2381 return (rc); /* error message displayed already */
2383 if (!(pi->flags & PORT_INIT_DONE) &&
2384 ((rc = port_full_init(pi)) != 0))
2385 return (rc); /* error message displayed already */
2387 rc = update_mac_settings(pi, XGMAC_ALL);
2389 goto done; /* error message displayed already */
2391 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2393 if_printf(ifp, "start_link failed: %d\n", rc);
2397 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2399 if_printf(ifp, "enable_vi failed: %d\n", rc);
2404 setbit(&sc->open_device_map, pi->port_id);
2406 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2409 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2412 cxgbe_uninit_synchronized(pi);
2421 cxgbe_uninit_synchronized(struct port_info *pi)
2423 struct adapter *sc = pi->adapter;
2424 struct ifnet *ifp = pi->ifp;
2427 ASSERT_SYNCHRONIZED_OP(sc);
2430 * Disable the VI so that all its data in either direction is discarded
2431 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2432 * tick) intact as the TP can deliver negative advice or data that it's
2433 * holding in its RAM (for an offloaded connection) even after the VI is
2436 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2438 if_printf(ifp, "disable_vi failed: %d\n", rc);
2442 clrbit(&sc->open_device_map, pi->port_id);
2444 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2447 pi->link_cfg.link_ok = 0;
2448 pi->link_cfg.speed = 0;
2449 t4_os_link_changed(sc, pi->port_id, 0);
2455 * It is ok for this function to fail midway and return right away. t4_detach
2456 * will walk the entire sc->irq list and clean up whatever is valid.
2459 setup_intr_handlers(struct adapter *sc)
2464 struct port_info *pi;
2465 struct sge_rxq *rxq;
2467 struct sge_ofld_rxq *ofld_rxq;
2474 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2475 if (sc->intr_count == 1) {
2476 KASSERT(!(sc->flags & INTR_DIRECT),
2477 ("%s: single interrupt && INTR_DIRECT?", __func__));
2479 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2483 /* Multiple interrupts. */
2484 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2485 ("%s: too few intr.", __func__));
2487 /* The first one is always error intr */
2488 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2494 /* The second one is always the firmware event queue */
2495 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2503 * Note that if INTR_DIRECT is not set then either the NIC rx
2504 * queues or (exclusive or) the TOE rx queueus will be taking
2505 * direct interrupts.
2507 * There is no need to check for is_offload(sc) as nofldrxq
2508 * will be 0 if offload is disabled.
2510 for_each_port(sc, p) {
2515 * Skip over the NIC queues if they aren't taking direct
2518 if (!(sc->flags & INTR_DIRECT) &&
2519 pi->nofldrxq > pi->nrxq)
2522 rxq = &sc->sge.rxq[pi->first_rxq];
2523 for (q = 0; q < pi->nrxq; q++, rxq++) {
2524 snprintf(s, sizeof(s), "%d.%d", p, q);
2525 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
2535 * Skip over the offload queues if they aren't taking
2536 * direct interrupts.
2538 if (!(sc->flags & INTR_DIRECT))
2541 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
2542 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
2543 snprintf(s, sizeof(s), "%d,%d", p, q);
2544 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
2559 adapter_full_init(struct adapter *sc)
2563 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2564 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
2565 ("%s: FULL_INIT_DONE already", __func__));
2568 * queues that belong to the adapter (not any particular port).
2570 rc = t4_setup_adapter_queues(sc);
2574 for (i = 0; i < nitems(sc->tq); i++) {
2575 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
2576 taskqueue_thread_enqueue, &sc->tq[i]);
2577 if (sc->tq[i] == NULL) {
2578 device_printf(sc->dev,
2579 "failed to allocate task queue %d\n", i);
2583 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
2584 device_get_nameunit(sc->dev), i);
2588 sc->flags |= FULL_INIT_DONE;
2591 adapter_full_uninit(sc);
2597 adapter_full_uninit(struct adapter *sc)
2601 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2603 t4_teardown_adapter_queues(sc);
2605 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
2606 taskqueue_free(sc->tq[i]);
2610 sc->flags &= ~FULL_INIT_DONE;
2616 port_full_init(struct port_info *pi)
2618 struct adapter *sc = pi->adapter;
2619 struct ifnet *ifp = pi->ifp;
2621 struct sge_rxq *rxq;
2624 ASSERT_SYNCHRONIZED_OP(sc);
2625 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
2626 ("%s: PORT_INIT_DONE already", __func__));
2628 sysctl_ctx_init(&pi->ctx);
2629 pi->flags |= PORT_SYSCTL_CTX;
2632 * Allocate tx/rx/fl queues for this port.
2634 rc = t4_setup_port_queues(pi);
2636 goto done; /* error message displayed already */
2639 * Setup RSS for this port.
2641 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
2643 for_each_rxq(pi, i, rxq) {
2644 rss[i] = rxq->iq.abs_id;
2646 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2647 pi->rss_size, rss, pi->nrxq);
2650 if_printf(ifp, "rss_config failed: %d\n", rc);
2654 pi->flags |= PORT_INIT_DONE;
2657 port_full_uninit(pi);
2666 port_full_uninit(struct port_info *pi)
2668 struct adapter *sc = pi->adapter;
2670 struct sge_rxq *rxq;
2671 struct sge_txq *txq;
2673 struct sge_ofld_rxq *ofld_rxq;
2674 struct sge_wrq *ofld_txq;
2677 if (pi->flags & PORT_INIT_DONE) {
2679 /* Need to quiesce queues. XXX: ctrl queues? */
2681 for_each_txq(pi, i, txq) {
2682 quiesce_eq(sc, &txq->eq);
2686 for_each_ofld_txq(pi, i, ofld_txq) {
2687 quiesce_eq(sc, &ofld_txq->eq);
2691 for_each_rxq(pi, i, rxq) {
2692 quiesce_iq(sc, &rxq->iq);
2693 quiesce_fl(sc, &rxq->fl);
2697 for_each_ofld_rxq(pi, i, ofld_rxq) {
2698 quiesce_iq(sc, &ofld_rxq->iq);
2699 quiesce_fl(sc, &ofld_rxq->fl);
2704 t4_teardown_port_queues(pi);
2705 pi->flags &= ~PORT_INIT_DONE;
2711 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
2714 eq->flags |= EQ_DOOMED;
2717 * Wait for the response to a credit flush if one's
2720 while (eq->flags & EQ_CRFLUSHED)
2721 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
2724 callout_drain(&eq->tx_callout); /* XXX: iffy */
2725 pause("callout", 10); /* Still iffy */
2727 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
2731 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
2733 (void) sc; /* unused */
2735 /* Synchronize with the interrupt handler */
2736 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
2741 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
2743 mtx_lock(&sc->sfl_lock);
2745 fl->flags |= FL_DOOMED;
2747 mtx_unlock(&sc->sfl_lock);
2749 callout_drain(&sc->sfl_callout);
2750 KASSERT((fl->flags & FL_STARVING) == 0,
2751 ("%s: still starving", __func__));
2755 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2756 driver_intr_t *handler, void *arg, char *name)
2761 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2762 RF_SHAREABLE | RF_ACTIVE);
2763 if (irq->res == NULL) {
2764 device_printf(sc->dev,
2765 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2769 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2770 NULL, handler, arg, &irq->tag);
2772 device_printf(sc->dev,
2773 "failed to setup interrupt for rid %d, name %s: %d\n",
2776 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2782 t4_free_irq(struct adapter *sc, struct irq *irq)
2785 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2787 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2789 bzero(irq, sizeof(*irq));
2795 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2798 uint32_t *p = (uint32_t *)(buf + start);
2800 for ( ; start <= end; start += sizeof(uint32_t))
2801 *p++ = t4_read_reg(sc, start);
2805 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2808 static const unsigned int reg_ranges[] = {
3028 regs->version = 4 | (sc->params.rev << 10);
3029 for (i = 0; i < nitems(reg_ranges); i += 2)
3030 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3034 cxgbe_tick(void *arg)
3036 struct port_info *pi = arg;
3037 struct ifnet *ifp = pi->ifp;
3038 struct sge_txq *txq;
3040 struct port_stats *s = &pi->stats;
3043 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3045 return; /* without scheduling another callout */
3048 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3050 ifp->if_opackets = s->tx_frames - s->tx_pause;
3051 ifp->if_ipackets = s->rx_frames - s->rx_pause;
3052 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3053 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3054 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3055 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3056 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3057 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3061 for_each_txq(pi, i, txq)
3062 drops += txq->br->br_drops;
3063 ifp->if_snd.ifq_drops = drops;
3065 ifp->if_oerrors = s->tx_error_frames;
3066 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
3067 s->rx_fcs_err + s->rx_len_err;
3069 callout_schedule(&pi->tick, hz);
3074 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
3078 if (arg != ifp || ifp->if_type != IFT_ETHER)
3081 vlan = VLAN_DEVAT(ifp, vid);
3082 VLAN_SETCOOKIE(vlan, ifp);
3086 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
3090 panic("%s: opcode 0x%02x on iq %p with payload %p",
3091 __func__, rss->opcode, iq, m);
3093 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
3094 __func__, rss->opcode, iq, m);
3101 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3103 uintptr_t *loc, new;
3105 if (opcode >= nitems(sc->cpl_handler))
3108 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3109 loc = (uintptr_t *) &sc->cpl_handler[opcode];
3110 atomic_store_rel_ptr(loc, new);
3116 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
3120 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
3122 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
3123 __func__, iq, ctrl);
3129 t4_register_an_handler(struct adapter *sc, an_handler_t h)
3131 uintptr_t *loc, new;
3133 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
3134 loc = (uintptr_t *) &sc->an_handler;
3135 atomic_store_rel_ptr(loc, new);
3141 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
3143 const struct cpl_fw6_msg *cpl =
3144 __containerof(rpl, struct cpl_fw6_msg, data[0]);
3147 panic("%s: fw_msg type %d", __func__, cpl->type);
3149 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
3155 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
3157 uintptr_t *loc, new;
3159 if (type >= nitems(sc->fw_msg_handler))
3163 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
3164 * handler dispatch table. Reject any attempt to install a handler for
3167 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
3170 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
3171 loc = (uintptr_t *) &sc->fw_msg_handler[type];
3172 atomic_store_rel_ptr(loc, new);
3178 t4_sysctls(struct adapter *sc)
3180 struct sysctl_ctx_list *ctx;
3181 struct sysctl_oid *oid;
3182 struct sysctl_oid_list *children, *c0;
3183 static char *caps[] = {
3184 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
3185 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
3186 "\20\1TOE", /* caps[2] toecaps */
3187 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
3188 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
3189 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
3190 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
3191 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
3194 ctx = device_get_sysctl_ctx(sc->dev);
3199 oid = device_get_sysctl_tree(sc->dev);
3200 c0 = children = SYSCTL_CHILDREN(oid);
3202 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
3203 &sc->params.nports, 0, "# of ports");
3205 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
3206 &sc->params.rev, 0, "chip hardware revision");
3208 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
3209 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
3211 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
3212 CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
3214 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
3215 &sc->cfcsum, 0, "config file checksum");
3217 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
3218 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
3219 sysctl_bitfield, "A", "available link capabilities");
3221 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
3222 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
3223 sysctl_bitfield, "A", "available NIC capabilities");
3225 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
3226 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
3227 sysctl_bitfield, "A", "available TCP offload capabilities");
3229 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
3230 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
3231 sysctl_bitfield, "A", "available RDMA capabilities");
3233 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
3234 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
3235 sysctl_bitfield, "A", "available iSCSI capabilities");
3237 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
3238 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
3239 sysctl_bitfield, "A", "available FCoE capabilities");
3241 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
3242 &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
3244 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
3245 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
3246 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
3247 "interrupt holdoff timer values (us)");
3249 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
3250 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
3251 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
3252 "interrupt holdoff packet counter values");
3256 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
3258 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
3259 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
3260 "logs and miscellaneous information");
3261 children = SYSCTL_CHILDREN(oid);
3263 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
3264 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3265 sysctl_cctrl, "A", "congestion control");
3267 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
3268 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3269 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
3271 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
3272 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
3273 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
3275 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
3276 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
3277 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
3279 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
3280 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
3281 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
3283 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
3284 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
3285 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
3287 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
3288 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
3289 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
3291 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
3292 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3293 sysctl_cim_la, "A", "CIM logic analyzer");
3295 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
3296 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
3297 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
3299 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
3300 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
3301 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
3303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
3304 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
3305 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
3307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
3308 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
3309 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
3311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
3312 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
3313 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
3315 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
3316 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
3317 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
3319 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
3320 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3321 sysctl_cim_qcfg, "A", "CIM queue configuration");
3323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
3324 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3325 sysctl_cpl_stats, "A", "CPL statistics");
3327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
3328 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3329 sysctl_ddp_stats, "A", "DDP statistics");
3331 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
3332 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3333 sysctl_devlog, "A", "firmware's device log");
3335 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
3336 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3337 sysctl_fcoe_stats, "A", "FCoE statistics");
3339 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
3340 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3341 sysctl_hw_sched, "A", "hardware scheduler ");
3343 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
3344 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3345 sysctl_l2t, "A", "hardware L2 table");
3347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
3348 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3349 sysctl_lb_stats, "A", "loopback statistics");
3351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
3352 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3353 sysctl_meminfo, "A", "memory regions");
3355 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
3356 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3357 sysctl_path_mtus, "A", "path MTUs");
3359 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
3360 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3361 sysctl_pm_stats, "A", "PM statistics");
3363 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
3364 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3365 sysctl_rdma_stats, "A", "RDMA statistics");
3367 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
3368 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3369 sysctl_tcp_stats, "A", "TCP statistics");
3371 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
3372 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3373 sysctl_tids, "A", "TID information");
3375 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
3376 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3377 sysctl_tp_err_stats, "A", "TP error statistics");
3379 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
3380 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
3381 sysctl_tx_rate, "A", "Tx rate");
3385 if (is_offload(sc)) {
3389 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
3390 NULL, "TOE parameters");
3391 children = SYSCTL_CHILDREN(oid);
3393 sc->tt.sndbuf = 256 * 1024;
3394 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
3395 &sc->tt.sndbuf, 0, "max hardware send buffer size");
3398 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
3399 &sc->tt.ddp, 0, "DDP allowed");
3401 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
3402 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
3403 &sc->tt.indsz, 0, "DDP max indicate size allowed");
3406 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
3407 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
3408 &sc->tt.ddp_thres, 0, "DDP threshold");
3417 cxgbe_sysctls(struct port_info *pi)
3419 struct sysctl_ctx_list *ctx;
3420 struct sysctl_oid *oid;
3421 struct sysctl_oid_list *children;
3423 ctx = device_get_sysctl_ctx(pi->dev);
3428 oid = device_get_sysctl_tree(pi->dev);
3429 children = SYSCTL_CHILDREN(oid);
3431 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
3432 &pi->nrxq, 0, "# of rx queues");
3433 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
3434 &pi->ntxq, 0, "# of tx queues");
3435 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
3436 &pi->first_rxq, 0, "index of first rx queue");
3437 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
3438 &pi->first_txq, 0, "index of first tx queue");
3441 if (is_offload(pi->adapter)) {
3442 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
3444 "# of rx queues for offloaded TCP connections");
3445 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
3447 "# of tx queues for offloaded TCP connections");
3448 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
3449 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
3450 "index of first TOE rx queue");
3451 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
3452 CTLFLAG_RD, &pi->first_ofld_txq, 0,
3453 "index of first TOE tx queue");
3457 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
3458 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
3459 "holdoff timer index");
3460 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
3461 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
3462 "holdoff packet counter index");
3464 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
3465 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
3467 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
3468 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
3472 * dev.cxgbe.X.stats.
3474 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
3475 NULL, "port statistics");
3476 children = SYSCTL_CHILDREN(oid);
3478 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
3479 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
3480 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
3481 sysctl_handle_t4_reg64, "QU", desc)
3483 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
3484 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
3485 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
3486 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
3487 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
3488 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
3489 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
3490 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
3491 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
3492 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
3493 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
3494 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
3495 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
3496 "# of tx frames in this range",
3497 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
3498 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
3499 "# of tx frames in this range",
3500 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
3501 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
3502 "# of tx frames in this range",
3503 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
3504 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
3505 "# of tx frames in this range",
3506 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
3507 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
3508 "# of tx frames in this range",
3509 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
3510 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
3511 "# of tx frames in this range",
3512 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
3513 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
3514 "# of tx frames in this range",
3515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
3516 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
3517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
3518 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
3519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
3520 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
3521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
3522 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
3523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
3524 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
3525 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
3526 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
3527 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
3528 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
3529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
3530 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
3531 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
3532 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
3533 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
3534 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
3535 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
3537 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
3538 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
3539 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
3540 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
3541 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
3542 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
3543 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
3544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
3545 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
3546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
3547 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
3548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
3549 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
3550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
3551 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
3552 "# of frames received with bad FCS",
3553 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
3554 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
3555 "# of frames received with length error",
3556 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
3557 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
3558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
3559 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
3560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
3561 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
3562 "# of rx frames in this range",
3563 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
3564 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
3565 "# of rx frames in this range",
3566 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
3567 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
3568 "# of rx frames in this range",
3569 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
3570 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
3571 "# of rx frames in this range",
3572 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
3573 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
3574 "# of rx frames in this range",
3575 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
3576 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
3577 "# of rx frames in this range",
3578 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
3579 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
3580 "# of rx frames in this range",
3581 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
3582 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
3583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
3584 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
3585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
3586 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
3587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
3588 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
3589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
3590 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
3591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
3592 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
3593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
3594 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
3595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
3596 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
3597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
3598 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
3599 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
3601 #undef SYSCTL_ADD_T4_REG64
3603 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
3604 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
3605 &pi->stats.name, desc)
3607 /* We get these from port_stats and they may be stale by upto 1s */
3608 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
3609 "# drops due to buffer-group 0 overflows");
3610 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
3611 "# drops due to buffer-group 1 overflows");
3612 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
3613 "# drops due to buffer-group 2 overflows");
3614 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
3615 "# drops due to buffer-group 3 overflows");
3616 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
3617 "# of buffer-group 0 truncated packets");
3618 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
3619 "# of buffer-group 1 truncated packets");
3620 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
3621 "# of buffer-group 2 truncated packets");
3622 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
3623 "# of buffer-group 3 truncated packets");
3625 #undef SYSCTL_ADD_T4_PORTSTAT
3631 sysctl_int_array(SYSCTL_HANDLER_ARGS)
3636 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
3637 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
3638 sbuf_printf(&sb, "%d ", *i);
3641 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3647 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
3652 rc = sysctl_wire_old_buffer(req, 0);
3656 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3660 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
3661 rc = sbuf_finish(sb);
3668 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
3670 struct port_info *pi = arg1;
3671 struct adapter *sc = pi->adapter;
3673 struct sge_rxq *rxq;
3678 rc = sysctl_handle_int(oidp, &idx, 0, req);
3679 if (rc != 0 || req->newptr == NULL)
3682 if (idx < 0 || idx >= SGE_NTIMERS)
3685 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
3690 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
3691 for_each_rxq(pi, i, rxq) {
3692 #ifdef atomic_store_rel_8
3693 atomic_store_rel_8(&rxq->iq.intr_params, v);
3695 rxq->iq.intr_params = v;
3700 end_synchronized_op(sc, LOCK_HELD);
3705 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
3707 struct port_info *pi = arg1;
3708 struct adapter *sc = pi->adapter;
3713 rc = sysctl_handle_int(oidp, &idx, 0, req);
3714 if (rc != 0 || req->newptr == NULL)
3717 if (idx < -1 || idx >= SGE_NCOUNTERS)
3720 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
3725 if (pi->flags & PORT_INIT_DONE)
3726 rc = EBUSY; /* cannot be changed once the queues are created */
3730 end_synchronized_op(sc, LOCK_HELD);
3735 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
3737 struct port_info *pi = arg1;
3738 struct adapter *sc = pi->adapter;
3741 qsize = pi->qsize_rxq;
3743 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3744 if (rc != 0 || req->newptr == NULL)
3747 if (qsize < 128 || (qsize & 7))
3750 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
3755 if (pi->flags & PORT_INIT_DONE)
3756 rc = EBUSY; /* cannot be changed once the queues are created */
3758 pi->qsize_rxq = qsize;
3760 end_synchronized_op(sc, LOCK_HELD);
3765 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
3767 struct port_info *pi = arg1;
3768 struct adapter *sc = pi->adapter;
3771 qsize = pi->qsize_txq;
3773 rc = sysctl_handle_int(oidp, &qsize, 0, req);
3774 if (rc != 0 || req->newptr == NULL)
3777 /* bufring size must be powerof2 */
3778 if (qsize < 128 || !powerof2(qsize))
3781 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
3786 if (pi->flags & PORT_INIT_DONE)
3787 rc = EBUSY; /* cannot be changed once the queues are created */
3789 pi->qsize_txq = qsize;
3791 end_synchronized_op(sc, LOCK_HELD);
3796 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
3798 struct adapter *sc = arg1;
3802 val = t4_read_reg64(sc, reg);
3804 return (sysctl_handle_64(oidp, &val, 0, req));
3809 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
3811 struct adapter *sc = arg1;
3814 uint16_t incr[NMTUS][NCCTRL_WIN];
3815 static const char *dec_fac[] = {
3816 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
3820 rc = sysctl_wire_old_buffer(req, 0);
3824 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3828 t4_read_cong_tbl(sc, incr);
3830 for (i = 0; i < NCCTRL_WIN; ++i) {
3831 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
3832 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
3833 incr[5][i], incr[6][i], incr[7][i]);
3834 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
3835 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
3836 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
3837 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
3840 rc = sbuf_finish(sb);
3846 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ] = {
3847 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
3848 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI" /* obq's */
3852 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
3854 struct adapter *sc = arg1;
3856 int rc, i, n, qid = arg2;
3860 KASSERT(qid >= 0 && qid < nitems(qname),
3861 ("%s: bad qid %d\n", __func__, qid));
3863 if (qid < CIM_NUM_IBQ) {
3866 n = 4 * CIM_IBQ_SIZE;
3867 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
3868 rc = t4_read_cim_ibq(sc, qid, buf, n);
3870 /* outbound queue */
3873 n = 4 * 6 * CIM_OBQ_SIZE;
3874 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
3875 rc = t4_read_cim_obq(sc, qid, buf, n);
3882 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
3884 rc = sysctl_wire_old_buffer(req, 0);
3888 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3894 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
3895 for (i = 0, p = buf; i < n; i += 16, p += 4)
3896 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
3899 rc = sbuf_finish(sb);
3907 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
3909 struct adapter *sc = arg1;
3915 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
3919 rc = sysctl_wire_old_buffer(req, 0);
3923 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3927 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
3930 rc = -t4_cim_read_la(sc, buf, NULL);
3934 sbuf_printf(sb, "Status Data PC%s",
3935 cfg & F_UPDBGLACAPTPCONLY ? "" :
3936 " LS0Stat LS0Addr LS0Data");
3938 KASSERT((sc->params.cim_la_size & 7) == 0,
3939 ("%s: p will walk off the end of buf", __func__));
3941 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
3942 if (cfg & F_UPDBGLACAPTPCONLY) {
3943 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
3945 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
3946 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
3947 p[4] & 0xff, p[5] >> 8);
3948 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
3949 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
3950 p[1] & 0xf, p[2] >> 4);
3953 "\n %02x %x%07x %x%07x %08x %08x "
3955 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
3956 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
3961 rc = sbuf_finish(sb);
3969 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
3971 struct adapter *sc = arg1;
3974 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ];
3975 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ];
3976 uint16_t thres[CIM_NUM_IBQ];
3977 uint32_t obq_wr[2 * CIM_NUM_OBQ], *wr = obq_wr;
3978 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ)], *p = stat;
3980 rc = -t4_cim_read(sc, A_UP_IBQ_0_RDADDR, nitems(stat), stat);
3982 rc = -t4_cim_read(sc, A_UP_OBQ_0_REALADDR, nitems(obq_wr),
3987 t4_read_cimq_cfg(sc, base, size, thres);
3989 rc = sysctl_wire_old_buffer(req, 0);
3993 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
3997 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
3999 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
4000 sbuf_printf(sb, "\n%5s %5x %5u %4u %6x %4x %4u %4u %5u",
4001 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
4002 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4003 G_QUEREMFLITS(p[2]) * 16);
4004 for ( ; i < CIM_NUM_IBQ + CIM_NUM_OBQ; i++, p += 4, wr += 2)
4005 sbuf_printf(sb, "\n%5s %5x %5u %11x %4x %4u %4u %5u", qname[i],
4006 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
4007 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
4008 G_QUEREMFLITS(p[2]) * 16);
4010 rc = sbuf_finish(sb);
4017 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
4019 struct adapter *sc = arg1;
4022 struct tp_cpl_stats stats;
4024 rc = sysctl_wire_old_buffer(req, 0);
4028 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4032 t4_tp_get_cpl_stats(sc, &stats);
4034 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4036 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
4037 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
4038 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
4039 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
4041 rc = sbuf_finish(sb);
4048 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
4050 struct adapter *sc = arg1;
4053 struct tp_usm_stats stats;
4055 rc = sysctl_wire_old_buffer(req, 0);
4059 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4063 t4_get_usm_stats(sc, &stats);
4065 sbuf_printf(sb, "Frames: %u\n", stats.frames);
4066 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
4067 sbuf_printf(sb, "Drops: %u", stats.drops);
4069 rc = sbuf_finish(sb);
4075 const char *devlog_level_strings[] = {
4076 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
4077 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
4078 [FW_DEVLOG_LEVEL_ERR] = "ERR",
4079 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
4080 [FW_DEVLOG_LEVEL_INFO] = "INFO",
4081 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
4084 const char *devlog_facility_strings[] = {
4085 [FW_DEVLOG_FACILITY_CORE] = "CORE",
4086 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
4087 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
4088 [FW_DEVLOG_FACILITY_RES] = "RES",
4089 [FW_DEVLOG_FACILITY_HW] = "HW",
4090 [FW_DEVLOG_FACILITY_FLR] = "FLR",
4091 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
4092 [FW_DEVLOG_FACILITY_PHY] = "PHY",
4093 [FW_DEVLOG_FACILITY_MAC] = "MAC",
4094 [FW_DEVLOG_FACILITY_PORT] = "PORT",
4095 [FW_DEVLOG_FACILITY_VI] = "VI",
4096 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
4097 [FW_DEVLOG_FACILITY_ACL] = "ACL",
4098 [FW_DEVLOG_FACILITY_TM] = "TM",
4099 [FW_DEVLOG_FACILITY_QFC] = "QFC",
4100 [FW_DEVLOG_FACILITY_DCB] = "DCB",
4101 [FW_DEVLOG_FACILITY_ETH] = "ETH",
4102 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
4103 [FW_DEVLOG_FACILITY_RI] = "RI",
4104 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
4105 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
4106 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
4107 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
4111 sysctl_devlog(SYSCTL_HANDLER_ARGS)
4113 struct adapter *sc = arg1;
4114 struct devlog_params *dparams = &sc->params.devlog;
4115 struct fw_devlog_e *buf, *e;
4116 int i, j, rc, nentries, first = 0;
4118 uint64_t ftstamp = UINT64_MAX;
4120 if (dparams->start == 0)
4123 nentries = dparams->size / sizeof(struct fw_devlog_e);
4125 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
4129 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
4134 for (i = 0; i < nentries; i++) {
4137 if (e->timestamp == 0)
4140 e->timestamp = be64toh(e->timestamp);
4141 e->seqno = be32toh(e->seqno);
4142 for (j = 0; j < 8; j++)
4143 e->params[j] = be32toh(e->params[j]);
4145 if (e->timestamp < ftstamp) {
4146 ftstamp = e->timestamp;
4151 if (buf[first].timestamp == 0)
4152 goto done; /* nothing in the log */
4154 rc = sysctl_wire_old_buffer(req, 0);
4158 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4163 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
4164 "Seq#", "Tstamp", "Level", "Facility", "Message");
4169 if (e->timestamp == 0)
4172 sbuf_printf(sb, "%10d %15ju %8s %8s ",
4173 e->seqno, e->timestamp,
4174 (e->level < nitems(devlog_level_strings) ?
4175 devlog_level_strings[e->level] : "UNKNOWN"),
4176 (e->facility < nitems(devlog_facility_strings) ?
4177 devlog_facility_strings[e->facility] : "UNKNOWN"));
4178 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
4179 e->params[2], e->params[3], e->params[4],
4180 e->params[5], e->params[6], e->params[7]);
4182 if (++i == nentries)
4184 } while (i != first);
4186 rc = sbuf_finish(sb);
4194 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
4196 struct adapter *sc = arg1;
4199 struct tp_fcoe_stats stats[4];
4201 rc = sysctl_wire_old_buffer(req, 0);
4205 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4209 t4_get_fcoe_stats(sc, 0, &stats[0]);
4210 t4_get_fcoe_stats(sc, 1, &stats[1]);
4211 t4_get_fcoe_stats(sc, 2, &stats[2]);
4212 t4_get_fcoe_stats(sc, 3, &stats[3]);
4214 sbuf_printf(sb, " channel 0 channel 1 "
4215 "channel 2 channel 3\n");
4216 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
4217 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
4218 stats[3].octetsDDP);
4219 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
4220 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
4221 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
4222 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
4223 stats[3].framesDrop);
4225 rc = sbuf_finish(sb);
4232 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
4234 struct adapter *sc = arg1;
4237 unsigned int map, kbps, ipg, mode;
4238 unsigned int pace_tab[NTX_SCHED];
4240 rc = sysctl_wire_old_buffer(req, 0);
4244 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4248 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
4249 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
4250 t4_read_pace_tbl(sc, pace_tab);
4252 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
4253 "Class IPG (0.1 ns) Flow IPG (us)");
4255 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
4256 t4_get_tx_sched(sc, i, &kbps, &ipg);
4257 sbuf_printf(sb, "\n %u %-5s %u ", i,
4258 (mode & (1 << i)) ? "flow" : "class", map & 3);
4260 sbuf_printf(sb, "%9u ", kbps);
4262 sbuf_printf(sb, " disabled ");
4265 sbuf_printf(sb, "%13u ", ipg);
4267 sbuf_printf(sb, " disabled ");
4270 sbuf_printf(sb, "%10u", pace_tab[i]);
4272 sbuf_printf(sb, " disabled");
4275 rc = sbuf_finish(sb);
4282 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
4284 struct adapter *sc = arg1;
4288 struct lb_port_stats s[2];
4289 static const char *stat_name[] = {
4290 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
4291 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
4292 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
4293 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
4294 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
4295 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
4296 "BG2FramesTrunc:", "BG3FramesTrunc:"
4299 rc = sysctl_wire_old_buffer(req, 0);
4303 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4307 memset(s, 0, sizeof(s));
4309 for (i = 0; i < 4; i += 2) {
4310 t4_get_lb_stats(sc, i, &s[0]);
4311 t4_get_lb_stats(sc, i + 1, &s[1]);
4315 sbuf_printf(sb, "%s Loopback %u"
4316 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
4318 for (j = 0; j < nitems(stat_name); j++)
4319 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
4323 rc = sbuf_finish(sb);
4336 mem_desc_cmp(const void *a, const void *b)
4338 return ((const struct mem_desc *)a)->base -
4339 ((const struct mem_desc *)b)->base;
4343 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
4348 size = to - from + 1;
4352 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
4353 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
4357 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
4359 struct adapter *sc = arg1;
4363 static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
4364 static const char *region[] = {
4365 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
4366 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
4367 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
4368 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
4369 "RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
4370 "ULPTX state:", "On-chip queues:"
4372 struct mem_desc avail[3];
4373 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
4374 struct mem_desc *md = mem;
4376 rc = sysctl_wire_old_buffer(req, 0);
4380 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4384 for (i = 0; i < nitems(mem); i++) {
4389 /* Find and sort the populated memory ranges */
4391 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4392 if (lo & F_EDRAM0_ENABLE) {
4393 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4394 avail[i].base = G_EDRAM0_BASE(hi) << 20;
4395 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
4399 if (lo & F_EDRAM1_ENABLE) {
4400 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4401 avail[i].base = G_EDRAM1_BASE(hi) << 20;
4402 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
4406 if (lo & F_EXT_MEM_ENABLE) {
4407 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4408 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
4409 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
4413 if (!i) /* no memory available */
4415 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
4417 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
4418 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
4419 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
4420 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
4421 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
4422 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
4423 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
4424 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
4425 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
4427 /* the next few have explicit upper bounds */
4428 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
4429 md->limit = md->base - 1 +
4430 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
4431 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
4434 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
4435 md->limit = md->base - 1 +
4436 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
4437 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
4440 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4441 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
4442 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
4443 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
4446 md->idx = nitems(region); /* hide it */
4450 #define ulp_region(reg) \
4451 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
4452 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
4454 ulp_region(RX_ISCSI);
4455 ulp_region(RX_TDDP);
4457 ulp_region(RX_STAG);
4459 ulp_region(RX_RQUDP);
4464 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
4465 md->limit = md->base + sc->tids.ntids - 1;
4467 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
4468 md->limit = md->base + sc->tids.ntids - 1;
4471 md->base = sc->vres.ocq.start;
4472 if (sc->vres.ocq.size)
4473 md->limit = md->base + sc->vres.ocq.size - 1;
4475 md->idx = nitems(region); /* hide it */
4478 /* add any address-space holes, there can be up to 3 */
4479 for (n = 0; n < i - 1; n++)
4480 if (avail[n].limit < avail[n + 1].base)
4481 (md++)->base = avail[n].limit;
4483 (md++)->base = avail[n].limit;
4486 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
4488 for (lo = 0; lo < i; lo++)
4489 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
4490 avail[lo].limit - 1);
4492 sbuf_printf(sb, "\n");
4493 for (i = 0; i < n; i++) {
4494 if (mem[i].idx >= nitems(region))
4495 continue; /* skip holes */
4497 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
4498 mem_region_show(sb, region[mem[i].idx], mem[i].base,
4502 sbuf_printf(sb, "\n");
4503 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
4504 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
4505 mem_region_show(sb, "uP RAM:", lo, hi);
4507 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
4508 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
4509 mem_region_show(sb, "uP Extmem2:", lo, hi);
4511 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
4512 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
4514 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
4515 (lo & F_PMRXNUMCHN) ? 2 : 1);
4517 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
4518 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
4519 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
4521 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
4522 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
4523 sbuf_printf(sb, "%u p-structs\n",
4524 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
4526 for (i = 0; i < 4; i++) {
4527 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
4528 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
4529 i, G_USED(lo), G_ALLOC(lo));
4531 for (i = 0; i < 4; i++) {
4532 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
4534 "\nLoopback %d using %u pages out of %u allocated",
4535 i, G_USED(lo), G_ALLOC(lo));
4538 rc = sbuf_finish(sb);
4545 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
4547 struct adapter *sc = arg1;
4550 uint16_t mtus[NMTUS];
4552 rc = sysctl_wire_old_buffer(req, 0);
4556 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4560 t4_read_mtu_tbl(sc, mtus, NULL);
4562 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
4563 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
4564 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
4565 mtus[14], mtus[15]);
4567 rc = sbuf_finish(sb);
4574 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
4576 struct adapter *sc = arg1;
4579 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
4580 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
4581 static const char *pm_stats[] = {
4582 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
4585 rc = sysctl_wire_old_buffer(req, 0);
4589 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4593 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
4594 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
4596 sbuf_printf(sb, " Tx count Tx cycles "
4597 "Rx count Rx cycles");
4598 for (i = 0; i < PM_NSTATS; i++)
4599 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
4600 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
4602 rc = sbuf_finish(sb);
4609 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
4611 struct adapter *sc = arg1;
4614 struct tp_rdma_stats stats;
4616 rc = sysctl_wire_old_buffer(req, 0);
4620 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4624 t4_tp_get_rdma_stats(sc, &stats);
4625 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
4626 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
4628 rc = sbuf_finish(sb);
4635 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
4637 struct adapter *sc = arg1;
4640 struct tp_tcp_stats v4, v6;
4642 rc = sysctl_wire_old_buffer(req, 0);
4646 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4650 t4_tp_get_tcp_stats(sc, &v4, &v6);
4653 sbuf_printf(sb, "OutRsts: %20u %20u\n",
4654 v4.tcpOutRsts, v6.tcpOutRsts);
4655 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
4656 v4.tcpInSegs, v6.tcpInSegs);
4657 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
4658 v4.tcpOutSegs, v6.tcpOutSegs);
4659 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
4660 v4.tcpRetransSegs, v6.tcpRetransSegs);
4662 rc = sbuf_finish(sb);
4669 sysctl_tids(SYSCTL_HANDLER_ARGS)
4671 struct adapter *sc = arg1;
4674 struct tid_info *t = &sc->tids;
4676 rc = sysctl_wire_old_buffer(req, 0);
4680 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4685 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
4690 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
4691 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
4694 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
4695 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4698 sbuf_printf(sb, "TID range: %u-%u",
4699 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
4703 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
4704 sbuf_printf(sb, ", in use: %u\n",
4705 atomic_load_acq_int(&t->tids_in_use));
4709 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
4710 t->stid_base + t->nstids - 1, t->stids_in_use);
4714 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
4715 t->ftid_base + t->nftids - 1);
4718 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
4719 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
4720 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
4722 rc = sbuf_finish(sb);
4729 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
4731 struct adapter *sc = arg1;
4734 struct tp_err_stats stats;
4736 rc = sysctl_wire_old_buffer(req, 0);
4740 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4744 t4_tp_get_err_stats(sc, &stats);
4746 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4748 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
4749 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
4750 stats.macInErrs[3]);
4751 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
4752 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
4753 stats.hdrInErrs[3]);
4754 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
4755 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
4756 stats.tcpInErrs[3]);
4757 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
4758 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
4759 stats.tcp6InErrs[3]);
4760 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
4761 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
4762 stats.tnlCongDrops[3]);
4763 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
4764 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
4765 stats.tnlTxDrops[3]);
4766 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
4767 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
4768 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
4769 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
4770 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
4771 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
4772 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
4773 stats.ofldNoNeigh, stats.ofldCongDefer);
4775 rc = sbuf_finish(sb);
4782 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
4784 struct adapter *sc = arg1;
4787 u64 nrate[NCHAN], orate[NCHAN];
4789 rc = sysctl_wire_old_buffer(req, 0);
4793 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
4797 t4_get_chan_txrate(sc, nrate, orate);
4798 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
4800 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
4801 nrate[0], nrate[1], nrate[2], nrate[3]);
4802 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
4803 orate[0], orate[1], orate[2], orate[3]);
4805 rc = sbuf_finish(sb);
4813 txq_start(struct ifnet *ifp, struct sge_txq *txq)
4815 struct buf_ring *br;
4818 TXQ_LOCK_ASSERT_OWNED(txq);
4821 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
4823 t4_eth_tx(ifp, txq, m);
4827 t4_tx_callout(void *arg)
4829 struct sge_eq *eq = arg;
4832 if (EQ_TRYLOCK(eq) == 0)
4835 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
4838 if (__predict_true(!(eq->flags && EQ_DOOMED)))
4839 callout_schedule(&eq->tx_callout, 1);
4843 EQ_LOCK_ASSERT_OWNED(eq);
4845 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
4847 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4848 struct sge_txq *txq = arg;
4849 struct port_info *pi = txq->ifp->if_softc;
4853 struct sge_wrq *wrq = arg;
4858 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
4865 t4_tx_task(void *arg, int count)
4867 struct sge_eq *eq = arg;
4870 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
4871 struct sge_txq *txq = arg;
4872 txq_start(txq->ifp, txq);
4874 struct sge_wrq *wrq = arg;
4875 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
4881 fconf_to_mode(uint32_t fconf)
4885 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
4886 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
4888 if (fconf & F_FRAGMENTATION)
4889 mode |= T4_FILTER_IP_FRAGMENT;
4891 if (fconf & F_MPSHITTYPE)
4892 mode |= T4_FILTER_MPS_HIT_TYPE;
4894 if (fconf & F_MACMATCH)
4895 mode |= T4_FILTER_MAC_IDX;
4897 if (fconf & F_ETHERTYPE)
4898 mode |= T4_FILTER_ETH_TYPE;
4900 if (fconf & F_PROTOCOL)
4901 mode |= T4_FILTER_IP_PROTO;
4904 mode |= T4_FILTER_IP_TOS;
4907 mode |= T4_FILTER_VLAN;
4909 if (fconf & F_VNIC_ID)
4910 mode |= T4_FILTER_VNIC;
4913 mode |= T4_FILTER_PORT;
4916 mode |= T4_FILTER_FCoE;
4922 mode_to_fconf(uint32_t mode)
4926 if (mode & T4_FILTER_IP_FRAGMENT)
4927 fconf |= F_FRAGMENTATION;
4929 if (mode & T4_FILTER_MPS_HIT_TYPE)
4930 fconf |= F_MPSHITTYPE;
4932 if (mode & T4_FILTER_MAC_IDX)
4933 fconf |= F_MACMATCH;
4935 if (mode & T4_FILTER_ETH_TYPE)
4936 fconf |= F_ETHERTYPE;
4938 if (mode & T4_FILTER_IP_PROTO)
4939 fconf |= F_PROTOCOL;
4941 if (mode & T4_FILTER_IP_TOS)
4944 if (mode & T4_FILTER_VLAN)
4947 if (mode & T4_FILTER_VNIC)
4950 if (mode & T4_FILTER_PORT)
4953 if (mode & T4_FILTER_FCoE)
4960 fspec_to_fconf(struct t4_filter_specification *fs)
4964 if (fs->val.frag || fs->mask.frag)
4965 fconf |= F_FRAGMENTATION;
4967 if (fs->val.matchtype || fs->mask.matchtype)
4968 fconf |= F_MPSHITTYPE;
4970 if (fs->val.macidx || fs->mask.macidx)
4971 fconf |= F_MACMATCH;
4973 if (fs->val.ethtype || fs->mask.ethtype)
4974 fconf |= F_ETHERTYPE;
4976 if (fs->val.proto || fs->mask.proto)
4977 fconf |= F_PROTOCOL;
4979 if (fs->val.tos || fs->mask.tos)
4982 if (fs->val.vlan_vld || fs->mask.vlan_vld)
4985 if (fs->val.vnic_vld || fs->mask.vnic_vld)
4988 if (fs->val.iport || fs->mask.iport)
4991 if (fs->val.fcoe || fs->mask.fcoe)
4998 get_filter_mode(struct adapter *sc, uint32_t *mode)
5003 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
5008 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
5011 if (sc->filter_mode != fconf) {
5012 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
5013 device_get_nameunit(sc->dev), sc->filter_mode, fconf);
5014 sc->filter_mode = fconf;
5017 *mode = fconf_to_mode(sc->filter_mode);
5019 end_synchronized_op(sc, LOCK_HELD);
5024 set_filter_mode(struct adapter *sc, uint32_t mode)
5029 fconf = mode_to_fconf(mode);
5031 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
5036 if (sc->tids.ftids_in_use > 0) {
5042 if (sc->offload_map) {
5049 rc = -t4_set_filter_mode(sc, fconf);
5051 sc->filter_mode = fconf;
5057 end_synchronized_op(sc, LOCK_HELD);
5061 static inline uint64_t
5062 get_filter_hits(struct adapter *sc, uint32_t fid)
5064 uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5067 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
5068 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
5069 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
5070 hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
5072 return (be64toh(hits));
5076 get_filter(struct adapter *sc, struct t4_filter *t)
5078 int i, rc, nfilters = sc->tids.nftids;
5079 struct filter_entry *f;
5081 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
5086 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
5087 t->idx >= nfilters) {
5088 t->idx = 0xffffffff;
5092 f = &sc->tids.ftid_tab[t->idx];
5093 for (i = t->idx; i < nfilters; i++, f++) {
5096 t->l2tidx = f->l2t ? f->l2t->idx : 0;
5097 t->smtidx = f->smtidx;
5099 t->hits = get_filter_hits(sc, t->idx);
5101 t->hits = UINT64_MAX;
5108 t->idx = 0xffffffff;
5110 end_synchronized_op(sc, LOCK_HELD);
5115 set_filter(struct adapter *sc, struct t4_filter *t)
5117 unsigned int nfilters, nports;
5118 struct filter_entry *f;
5121 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
5125 nfilters = sc->tids.nftids;
5126 nports = sc->params.nports;
5128 if (nfilters == 0) {
5133 if (!(sc->flags & FULL_INIT_DONE)) {
5138 if (t->idx >= nfilters) {
5143 /* Validate against the global filter mode */
5144 if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
5149 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
5154 if (t->fs.val.iport >= nports) {
5159 /* Can't specify an iq if not steering to it */
5160 if (!t->fs.dirsteer && t->fs.iq) {
5165 /* IPv6 filter idx must be 4 aligned */
5166 if (t->fs.type == 1 &&
5167 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
5172 if (sc->tids.ftid_tab == NULL) {
5173 KASSERT(sc->tids.ftids_in_use == 0,
5174 ("%s: no memory allocated but filters_in_use > 0",
5177 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
5178 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
5179 if (sc->tids.ftid_tab == NULL) {
5183 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
5186 for (i = 0; i < 4; i++) {
5187 f = &sc->tids.ftid_tab[t->idx + i];
5189 if (f->pending || f->valid) {
5198 if (t->fs.type == 0)
5202 f = &sc->tids.ftid_tab[t->idx];
5205 rc = set_filter_wr(sc, t->idx);
5207 end_synchronized_op(sc, 0);
5210 mtx_lock(&sc->tids.ftid_lock);
5212 if (f->pending == 0) {
5213 rc = f->valid ? 0 : EIO;
5217 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
5218 PCATCH, "t4setfw", 0)) {
5223 mtx_unlock(&sc->tids.ftid_lock);
5229 del_filter(struct adapter *sc, struct t4_filter *t)
5231 unsigned int nfilters;
5232 struct filter_entry *f;
5235 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
5239 nfilters = sc->tids.nftids;
5241 if (nfilters == 0) {
5246 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
5247 t->idx >= nfilters) {
5252 if (!(sc->flags & FULL_INIT_DONE)) {
5257 f = &sc->tids.ftid_tab[t->idx];
5269 t->fs = f->fs; /* extra info for the caller */
5270 rc = del_filter_wr(sc, t->idx);
5274 end_synchronized_op(sc, 0);
5277 mtx_lock(&sc->tids.ftid_lock);
5279 if (f->pending == 0) {
5280 rc = f->valid ? EIO : 0;
5284 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
5285 PCATCH, "t4delfw", 0)) {
5290 mtx_unlock(&sc->tids.ftid_lock);
5297 clear_filter(struct filter_entry *f)
5300 t4_l2t_release(f->l2t);
5302 bzero(f, sizeof (*f));
5306 set_filter_wr(struct adapter *sc, int fidx)
5308 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
5310 struct fw_filter_wr *fwr;
5313 ASSERT_SYNCHRONIZED_OP(sc);
5315 if (f->fs.newdmac || f->fs.newvlan) {
5316 /* This filter needs an L2T entry; allocate one. */
5317 f->l2t = t4_l2t_alloc_switching(sc->l2t);
5320 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
5322 t4_l2t_release(f->l2t);
5328 ftid = sc->tids.ftid_base + fidx;
5330 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
5335 bzero(fwr, sizeof (*fwr));
5337 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
5338 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
5340 htobe32(V_FW_FILTER_WR_TID(ftid) |
5341 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
5342 V_FW_FILTER_WR_NOREPLY(0) |
5343 V_FW_FILTER_WR_IQ(f->fs.iq));
5344 fwr->del_filter_to_l2tix =
5345 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
5346 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
5347 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
5348 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
5349 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
5350 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
5351 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
5352 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
5353 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
5354 f->fs.newvlan == VLAN_REWRITE) |
5355 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
5356 f->fs.newvlan == VLAN_REWRITE) |
5357 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
5358 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
5359 V_FW_FILTER_WR_PRIO(f->fs.prio) |
5360 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
5361 fwr->ethtype = htobe16(f->fs.val.ethtype);
5362 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
5363 fwr->frag_to_ovlan_vldm =
5364 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
5365 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
5366 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
5367 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
5368 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
5369 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
5371 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
5372 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
5373 fwr->maci_to_matchtypem =
5374 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
5375 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
5376 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
5377 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
5378 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
5379 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
5380 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
5381 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
5382 fwr->ptcl = f->fs.val.proto;
5383 fwr->ptclm = f->fs.mask.proto;
5384 fwr->ttyp = f->fs.val.tos;
5385 fwr->ttypm = f->fs.mask.tos;
5386 fwr->ivlan = htobe16(f->fs.val.vlan);
5387 fwr->ivlanm = htobe16(f->fs.mask.vlan);
5388 fwr->ovlan = htobe16(f->fs.val.vnic);
5389 fwr->ovlanm = htobe16(f->fs.mask.vnic);
5390 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
5391 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
5392 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
5393 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
5394 fwr->lp = htobe16(f->fs.val.dport);
5395 fwr->lpm = htobe16(f->fs.mask.dport);
5396 fwr->fp = htobe16(f->fs.val.sport);
5397 fwr->fpm = htobe16(f->fs.mask.sport);
5399 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
5402 sc->tids.ftids_in_use++;
5409 del_filter_wr(struct adapter *sc, int fidx)
5411 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
5413 struct fw_filter_wr *fwr;
5416 ftid = sc->tids.ftid_base + fidx;
5418 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
5422 bzero(fwr, sizeof (*fwr));
5424 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
5432 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
5434 struct adapter *sc = iq->adapter;
5435 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
5436 unsigned int idx = GET_TID(rpl);
5438 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
5441 if (idx >= sc->tids.ftid_base &&
5442 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
5443 unsigned int rc = G_COOKIE(rpl->cookie);
5444 struct filter_entry *f = &sc->tids.ftid_tab[idx];
5446 mtx_lock(&sc->tids.ftid_lock);
5447 if (rc == FW_FILTER_WR_FLT_ADDED) {
5448 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
5450 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
5451 f->pending = 0; /* asynchronous setup completed */
5454 if (rc != FW_FILTER_WR_FLT_DELETED) {
5455 /* Add or delete failed, display an error */
5457 "filter %u setup failed with error %u\n",
5462 sc->tids.ftids_in_use--;
5464 wakeup(&sc->tids.ftid_tab);
5465 mtx_unlock(&sc->tids.ftid_lock);
5472 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
5476 if (cntxt->cid > M_CTXTQID)
5479 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
5480 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
5483 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
5487 if (sc->flags & FW_OK) {
5488 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
5495 * Read via firmware failed or wasn't even attempted. Read directly via
5498 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
5500 end_synchronized_op(sc, 0);
5505 load_fw(struct adapter *sc, struct t4_data *fw)
5510 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
5514 if (sc->flags & FULL_INIT_DONE) {
5519 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
5520 if (fw_data == NULL) {
5525 rc = copyin(fw->data, fw_data, fw->len);
5527 rc = -t4_load_fw(sc, fw_data, fw->len);
5529 free(fw_data, M_CXGBE);
5531 end_synchronized_op(sc, 0);
5536 read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
5538 uint32_t base, size, lo, hi, win, off, remaining, i, n;
5542 /* reads are in multiples of 32 bits */
5543 if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
5547 * We don't want to deal with potential holes so we mandate that the
5548 * requested region must lie entirely within one of the 3 memories.
5550 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5551 if (lo & F_EDRAM0_ENABLE) {
5552 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5553 base = G_EDRAM0_BASE(hi) << 20;
5554 size = G_EDRAM0_SIZE(hi) << 20;
5556 mr->addr >= base && mr->addr < base + size &&
5557 mr->addr + mr->len <= base + size)
5560 if (lo & F_EDRAM1_ENABLE) {
5561 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5562 base = G_EDRAM1_BASE(hi) << 20;
5563 size = G_EDRAM1_SIZE(hi) << 20;
5565 mr->addr >= base && mr->addr < base + size &&
5566 mr->addr + mr->len <= base + size)
5569 if (lo & F_EXT_MEM_ENABLE) {
5570 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5571 base = G_EXT_MEM_BASE(hi) << 20;
5572 size = G_EXT_MEM_SIZE(hi) << 20;
5574 mr->addr >= base && mr->addr < base + size &&
5575 mr->addr + mr->len <= base + size)
5581 buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
5584 * Position the PCIe window (we use memwin2) to the 16B aligned area
5585 * just at/before the requested region.
5587 win = mr->addr & ~0xf;
5588 off = mr->addr - win; /* offset of the requested region in the win */
5589 remaining = mr->len;
5593 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
5595 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
5597 /* number of bytes that we'll copy in the inner loop */
5598 n = min(remaining, MEMWIN2_APERTURE - off);
5600 for (i = 0; i < n; i += 4, remaining -= 4)
5601 *b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
5603 win += MEMWIN2_APERTURE;
5607 rc = copyout(buf, mr->data, mr->len);
5614 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
5618 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
5621 if (i2cd->len > 1) {
5622 /* XXX: need fw support for longer reads in one go */
5626 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
5629 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
5630 i2cd->offset, &i2cd->data[0]);
5631 end_synchronized_op(sc, 0);
5637 t4_os_find_pci_capability(struct adapter *sc, int cap)
5641 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
5645 t4_os_pci_save_state(struct adapter *sc)
5648 struct pci_devinfo *dinfo;
5651 dinfo = device_get_ivars(dev);
5653 pci_cfg_save(dev, dinfo, 0);
5658 t4_os_pci_restore_state(struct adapter *sc)
5661 struct pci_devinfo *dinfo;
5664 dinfo = device_get_ivars(dev);
5666 pci_cfg_restore(dev, dinfo);
5671 t4_os_portmod_changed(const struct adapter *sc, int idx)
5673 struct port_info *pi = sc->port[idx];
5674 static const char *mod_str[] = {
5675 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
5678 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
5679 if_printf(pi->ifp, "transceiver unplugged.\n");
5680 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
5681 if_printf(pi->ifp, "unknown transceiver inserted.\n");
5682 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
5683 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
5684 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
5685 if_printf(pi->ifp, "%s transceiver inserted.\n",
5686 mod_str[pi->mod_type]);
5688 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
5694 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
5696 struct port_info *pi = sc->port[idx];
5697 struct ifnet *ifp = pi->ifp;
5700 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
5701 if_link_state_change(ifp, LINK_STATE_UP);
5703 if_link_state_change(ifp, LINK_STATE_DOWN);
5707 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
5711 mtx_lock(&t4_list_lock);
5712 SLIST_FOREACH(sc, &t4_list, link) {
5714 * func should not make any assumptions about what state sc is
5715 * in - the only guarantee is that sc->sc_lock is a valid lock.
5719 mtx_unlock(&t4_list_lock);
5723 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
5729 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
5735 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
5739 struct adapter *sc = dev->si_drv1;
5741 rc = priv_check(td, PRIV_DRIVER);
5746 case CHELSIO_T4_GETREG: {
5747 struct t4_reg *edata = (struct t4_reg *)data;
5749 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5752 if (edata->size == 4)
5753 edata->val = t4_read_reg(sc, edata->addr);
5754 else if (edata->size == 8)
5755 edata->val = t4_read_reg64(sc, edata->addr);
5761 case CHELSIO_T4_SETREG: {
5762 struct t4_reg *edata = (struct t4_reg *)data;
5764 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
5767 if (edata->size == 4) {
5768 if (edata->val & 0xffffffff00000000)
5770 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
5771 } else if (edata->size == 8)
5772 t4_write_reg64(sc, edata->addr, edata->val);
5777 case CHELSIO_T4_REGDUMP: {
5778 struct t4_regdump *regs = (struct t4_regdump *)data;
5779 int reglen = T4_REGDUMP_SIZE;
5782 if (regs->len < reglen) {
5783 regs->len = reglen; /* hint to the caller */
5788 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
5789 t4_get_regs(sc, regs, buf);
5790 rc = copyout(buf, regs->data, reglen);
5794 case CHELSIO_T4_GET_FILTER_MODE:
5795 rc = get_filter_mode(sc, (uint32_t *)data);
5797 case CHELSIO_T4_SET_FILTER_MODE:
5798 rc = set_filter_mode(sc, *(uint32_t *)data);
5800 case CHELSIO_T4_GET_FILTER:
5801 rc = get_filter(sc, (struct t4_filter *)data);
5803 case CHELSIO_T4_SET_FILTER:
5804 rc = set_filter(sc, (struct t4_filter *)data);
5806 case CHELSIO_T4_DEL_FILTER:
5807 rc = del_filter(sc, (struct t4_filter *)data);
5809 case CHELSIO_T4_GET_SGE_CONTEXT:
5810 rc = get_sge_context(sc, (struct t4_sge_context *)data);
5812 case CHELSIO_T4_LOAD_FW:
5813 rc = load_fw(sc, (struct t4_data *)data);
5815 case CHELSIO_T4_GET_MEM:
5816 rc = read_card_mem(sc, (struct t4_mem_range *)data);
5818 case CHELSIO_T4_GET_I2C:
5819 rc = read_i2c(sc, (struct t4_i2c_data *)data);
5821 case CHELSIO_T4_CLEAR_STATS: {
5823 u_int port_id = *(uint32_t *)data;
5824 struct port_info *pi;
5826 if (port_id >= sc->params.nports)
5830 t4_clr_port_stats(sc, port_id);
5832 pi = sc->port[port_id];
5833 if (pi->flags & PORT_INIT_DONE) {
5834 struct sge_rxq *rxq;
5835 struct sge_txq *txq;
5836 struct sge_wrq *wrq;
5838 for_each_rxq(pi, i, rxq) {
5839 #if defined(INET) || defined(INET6)
5840 rxq->lro.lro_queued = 0;
5841 rxq->lro.lro_flushed = 0;
5844 rxq->vlan_extraction = 0;
5847 for_each_txq(pi, i, txq) {
5850 txq->vlan_insertion = 0;
5854 txq->txpkts_wrs = 0;
5855 txq->txpkts_pkts = 0;
5856 txq->br->br_drops = 0;
5862 /* nothing to clear for each ofld_rxq */
5864 for_each_ofld_txq(pi, i, wrq) {
5869 wrq = &sc->sge.ctrlq[pi->port_id];
5884 toe_capability(struct port_info *pi, int enable)
5887 struct adapter *sc = pi->adapter;
5889 ASSERT_SYNCHRONIZED_OP(sc);
5891 if (!is_offload(sc))
5895 if (!(sc->flags & FULL_INIT_DONE)) {
5896 rc = cxgbe_init_synchronized(pi);
5901 if (isset(&sc->offload_map, pi->port_id))
5904 if (!(sc->flags & TOM_INIT_DONE)) {
5905 rc = t4_activate_uld(sc, ULD_TOM);
5908 "You must kldload t4_tom.ko before trying "
5909 "to enable TOE on a cxgbe interface.\n");
5913 KASSERT(sc->tom_softc != NULL,
5914 ("%s: TOM activated but softc NULL", __func__));
5915 KASSERT(sc->flags & TOM_INIT_DONE,
5916 ("%s: TOM activated but flag not set", __func__));
5919 setbit(&sc->offload_map, pi->port_id);
5921 if (!isset(&sc->offload_map, pi->port_id))
5924 KASSERT(sc->flags & TOM_INIT_DONE,
5925 ("%s: TOM never initialized?", __func__));
5926 clrbit(&sc->offload_map, pi->port_id);
5933 * Add an upper layer driver to the global list.
5936 t4_register_uld(struct uld_info *ui)
5941 mtx_lock(&t4_uld_list_lock);
5942 SLIST_FOREACH(u, &t4_uld_list, link) {
5943 if (u->uld_id == ui->uld_id) {
5949 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
5952 mtx_unlock(&t4_uld_list_lock);
5957 t4_unregister_uld(struct uld_info *ui)
5962 mtx_lock(&t4_uld_list_lock);
5964 SLIST_FOREACH(u, &t4_uld_list, link) {
5966 if (ui->refcount > 0) {
5971 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
5977 mtx_unlock(&t4_uld_list_lock);
5982 t4_activate_uld(struct adapter *sc, int id)
5985 struct uld_info *ui;
5987 ASSERT_SYNCHRONIZED_OP(sc);
5989 mtx_lock(&t4_uld_list_lock);
5991 SLIST_FOREACH(ui, &t4_uld_list, link) {
5992 if (ui->uld_id == id) {
5993 rc = ui->activate(sc);
6000 mtx_unlock(&t4_uld_list_lock);
6006 t4_deactivate_uld(struct adapter *sc, int id)
6009 struct uld_info *ui;
6011 ASSERT_SYNCHRONIZED_OP(sc);
6013 mtx_lock(&t4_uld_list_lock);
6015 SLIST_FOREACH(ui, &t4_uld_list, link) {
6016 if (ui->uld_id == id) {
6017 rc = ui->deactivate(sc);
6024 mtx_unlock(&t4_uld_list_lock);
6031 * Come up with reasonable defaults for some of the tunables, provided they're
6032 * not set by the user (in which case we'll use the values as is).
6035 tweak_tunables(void)
6037 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
6040 t4_ntxq10g = min(nc, NTXQ_10G);
6043 t4_ntxq1g = min(nc, NTXQ_1G);
6046 t4_nrxq10g = min(nc, NRXQ_10G);
6049 t4_nrxq1g = min(nc, NRXQ_1G);
6052 if (t4_nofldtxq10g < 1)
6053 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
6055 if (t4_nofldtxq1g < 1)
6056 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
6058 if (t4_nofldrxq10g < 1)
6059 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
6061 if (t4_nofldrxq1g < 1)
6062 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
6064 if (t4_toecaps_allowed == -1)
6065 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
6067 if (t4_toecaps_allowed == -1)
6068 t4_toecaps_allowed = 0;
6071 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
6072 t4_tmr_idx_10g = TMR_IDX_10G;
6074 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
6075 t4_pktc_idx_10g = PKTC_IDX_10G;
6077 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
6078 t4_tmr_idx_1g = TMR_IDX_1G;
6080 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
6081 t4_pktc_idx_1g = PKTC_IDX_1G;
6083 if (t4_qsize_txq < 128)
6086 if (t4_qsize_rxq < 128)
6088 while (t4_qsize_rxq & 7)
6091 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
6095 t4_mod_event(module_t mod, int cmd, void *arg)
6102 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
6103 SLIST_INIT(&t4_list);
6105 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
6106 SLIST_INIT(&t4_uld_list);
6113 mtx_lock(&t4_uld_list_lock);
6114 if (!SLIST_EMPTY(&t4_uld_list)) {
6116 mtx_unlock(&t4_uld_list_lock);
6119 mtx_unlock(&t4_uld_list_lock);
6120 mtx_destroy(&t4_uld_list_lock);
6122 mtx_lock(&t4_list_lock);
6123 if (!SLIST_EMPTY(&t4_list)) {
6125 mtx_unlock(&t4_list_lock);
6128 mtx_unlock(&t4_list_lock);
6129 mtx_destroy(&t4_list_lock);
6136 static devclass_t t4_devclass;
6137 static devclass_t cxgbe_devclass;
6139 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
6140 MODULE_VERSION(t4nex, 1);
6142 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
6143 MODULE_VERSION(cxgbe, 1);