2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75 DEVMETHOD(device_probe, t4_probe),
76 DEVMETHOD(device_attach, t4_attach),
77 DEVMETHOD(device_detach, t4_detach),
81 static driver_t t4_driver = {
84 sizeof(struct adapter)
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93 DEVMETHOD(device_probe, cxgbe_probe),
94 DEVMETHOD(device_attach, cxgbe_attach),
95 DEVMETHOD(device_detach, cxgbe_detach),
98 static driver_t cxgbe_driver = {
101 sizeof(struct port_info)
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
108 static struct cdevsw t4_cdevsw = {
109 .d_version = D_VERSION,
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120 DEVMETHOD(device_probe, t5_probe),
121 DEVMETHOD(device_attach, t4_attach),
122 DEVMETHOD(device_detach, t4_detach),
126 static driver_t t5_driver = {
129 sizeof(struct adapter)
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
137 sizeof(struct port_info)
140 static struct cdevsw t5_cdevsw = {
141 .d_version = D_VERSION,
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
171 * Tunables. See tweak_tunables() too.
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
178 * T5 are under hw.cxl.
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 * Holdoff parameters for 10G and 1G ports.
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
238 * Size (# of entries) of each tx and rx queue.
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
247 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
253 * Configuration file.
255 #define DEFAULT_CF "default"
256 #define FLASH_CF "flash"
257 #define UWIRE_CF "uwire"
258 #define FPGA_CF "fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
263 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264 * encouraged respectively).
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
270 * ASIC features that will be used. Disable the ones you don't want so that the
271 * chip resources aren't wasted on features that will not be used.
273 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
294 struct intrs_and_queues {
295 int intr_type; /* INTx, MSI, or MSI-X */
296 int nirq; /* Number of vectors */
298 int ntxq10g; /* # of NIC txq's for each 10G port */
299 int nrxq10g; /* # of NIC rxq's for each 10G port */
300 int ntxq1g; /* # of NIC txq's for each 1G port */
301 int nrxq1g; /* # of NIC rxq's for each 1G port */
303 int nofldtxq10g; /* # of TOE txq's for each 10G port */
304 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
305 int nofldtxq1g; /* # of TOE txq's for each 1G port */
306 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
310 struct filter_entry {
311 uint32_t valid:1; /* filter allocated and valid */
312 uint32_t locked:1; /* filter is administratively locked */
313 uint32_t pending:1; /* filter action is pending firmware reply */
314 uint32_t smtidx:8; /* Source MAC Table index for smac */
315 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
317 struct t4_filter_specification fs;
321 XGMAC_MTU = (1 << 0),
322 XGMAC_PROMISC = (1 << 1),
323 XGMAC_ALLMULTI = (1 << 2),
324 XGMAC_VLANEX = (1 << 3),
325 XGMAC_UCADDR = (1 << 4),
326 XGMAC_MCADDRS = (1 << 5),
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340 struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361 driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
391 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
392 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
394 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
396 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
397 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
398 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
401 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
402 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
405 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
406 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
408 static inline void txq_start(struct ifnet *, struct sge_txq *);
409 static uint32_t fconf_to_mode(uint32_t);
410 static uint32_t mode_to_fconf(uint32_t);
411 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
412 static int get_filter_mode(struct adapter *, uint32_t *);
413 static int set_filter_mode(struct adapter *, uint32_t);
414 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
415 static int get_filter(struct adapter *, struct t4_filter *);
416 static int set_filter(struct adapter *, struct t4_filter *);
417 static int del_filter(struct adapter *, struct t4_filter *);
418 static void clear_filter(struct filter_entry *);
419 static int set_filter_wr(struct adapter *, int);
420 static int del_filter_wr(struct adapter *, int);
421 static int get_sge_context(struct adapter *, struct t4_sge_context *);
422 static int load_fw(struct adapter *, struct t4_data *);
423 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
424 static int read_i2c(struct adapter *, struct t4_i2c_data *);
426 static int toe_capability(struct port_info *, int);
428 static int mod_event(module_t, int, void *);
434 {0xa000, "Chelsio Terminator 4 FPGA"},
435 {0x4400, "Chelsio T440-dbg"},
436 {0x4401, "Chelsio T420-CR"},
437 {0x4402, "Chelsio T422-CR"},
438 {0x4403, "Chelsio T440-CR"},
439 {0x4404, "Chelsio T420-BCH"},
440 {0x4405, "Chelsio T440-BCH"},
441 {0x4406, "Chelsio T440-CH"},
442 {0x4407, "Chelsio T420-SO"},
443 {0x4408, "Chelsio T420-CX"},
444 {0x4409, "Chelsio T420-BT"},
445 {0x440a, "Chelsio T404-BT"},
446 {0x440e, "Chelsio T440-LP-CR"},
448 {0xb000, "Chelsio Terminator 5 FPGA"},
449 {0x5400, "Chelsio T580-dbg"},
450 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
451 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
452 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
453 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
454 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
455 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
456 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
457 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
458 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
459 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
460 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
461 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
463 {0x5404, "Chelsio T520-BCH"},
464 {0x5405, "Chelsio T540-BCH"},
465 {0x5406, "Chelsio T540-CH"},
466 {0x5408, "Chelsio T520-CX"},
467 {0x540b, "Chelsio B520-SR"},
468 {0x540c, "Chelsio B504-BT"},
469 {0x540f, "Chelsio Amsterdam"},
470 {0x5413, "Chelsio T580-CHR"},
476 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
477 * exactly the same for both rxq and ofld_rxq.
479 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
480 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
483 /* No easy way to include t4_msg.h before adapter.h so we check this way */
484 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
485 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
488 t4_probe(device_t dev)
491 uint16_t v = pci_get_vendor(dev);
492 uint16_t d = pci_get_device(dev);
493 uint8_t f = pci_get_function(dev);
495 if (v != PCI_VENDOR_ID_CHELSIO)
498 /* Attach only to PF0 of the FPGA */
499 if (d == 0xa000 && f != 0)
502 for (i = 0; i < nitems(t4_pciids); i++) {
503 if (d == t4_pciids[i].device) {
504 device_set_desc(dev, t4_pciids[i].desc);
505 return (BUS_PROBE_DEFAULT);
513 t5_probe(device_t dev)
516 uint16_t v = pci_get_vendor(dev);
517 uint16_t d = pci_get_device(dev);
518 uint8_t f = pci_get_function(dev);
520 if (v != PCI_VENDOR_ID_CHELSIO)
523 /* Attach only to PF0 of the FPGA */
524 if (d == 0xb000 && f != 0)
527 for (i = 0; i < nitems(t5_pciids); i++) {
528 if (d == t5_pciids[i].device) {
529 device_set_desc(dev, t5_pciids[i].desc);
530 return (BUS_PROBE_DEFAULT);
538 t4_attach(device_t dev)
541 int rc = 0, i, n10g, n1g, rqidx, tqidx;
542 struct intrs_and_queues iaq;
545 int ofld_rqidx, ofld_tqidx;
548 sc = device_get_softc(dev);
551 pci_enable_busmaster(dev);
552 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
555 pci_set_max_read_req(dev, 4096);
556 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
557 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
558 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
561 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
562 device_get_nameunit(dev));
563 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
564 mtx_lock(&t4_list_lock);
565 SLIST_INSERT_HEAD(&t4_list, sc, link);
566 mtx_unlock(&t4_list_lock);
568 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
569 TAILQ_INIT(&sc->sfl);
570 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
572 rc = map_bars_0_and_4(sc);
574 goto done; /* error message displayed already */
577 * This is the real PF# to which we're attaching. Works from within PCI
578 * passthrough environments too, where pci_get_function() could return a
579 * different PF# depending on the passthrough configuration. We need to
580 * use the real PF# in all our communication with the firmware.
582 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
585 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
586 sc->an_handler = an_not_handled;
587 for (i = 0; i < nitems(sc->cpl_handler); i++)
588 sc->cpl_handler[i] = cpl_not_handled;
589 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
590 sc->fw_msg_handler[i] = fw_msg_not_handled;
591 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
592 t4_init_sge_cpl_handlers(sc);
594 /* Prepare the adapter for operation */
595 rc = -t4_prep_adapter(sc);
597 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
602 * Do this really early, with the memory windows set up even before the
603 * character device. The userland tool's register i/o and mem read
604 * will work even in "recovery mode".
607 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
608 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
609 device_get_nameunit(dev));
610 if (sc->cdev == NULL)
611 device_printf(dev, "failed to create nexus char device.\n");
613 sc->cdev->si_drv1 = sc;
615 /* Go no further if recovery mode has been requested. */
616 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
617 device_printf(dev, "recovery mode.\n");
621 /* Prepare the firmware for operation */
622 rc = prep_firmware(sc);
624 goto done; /* error message displayed already */
626 rc = get_params__post_init(sc);
628 goto done; /* error message displayed already */
630 rc = set_params__post_init(sc);
632 goto done; /* error message displayed already */
636 goto done; /* error message displayed already */
638 rc = t4_create_dma_tag(sc);
640 goto done; /* error message displayed already */
643 * First pass over all the ports - allocate VIs and initialize some
644 * basic parameters like mac address, port type, etc. We also figure
645 * out whether a port is 10G or 1G and use that information when
646 * calculating how many interrupts to attempt to allocate.
649 for_each_port(sc, i) {
650 struct port_info *pi;
652 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
655 /* These must be set before t4_port_init */
659 /* Allocate the vi and initialize parameters like mac addr */
660 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
662 device_printf(dev, "unable to initialize port %d: %d\n",
669 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
670 device_get_nameunit(dev), i);
671 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
673 if (is_10G_port(pi) || is_40G_port(pi)) {
675 pi->tmr_idx = t4_tmr_idx_10g;
676 pi->pktc_idx = t4_pktc_idx_10g;
679 pi->tmr_idx = t4_tmr_idx_1g;
680 pi->pktc_idx = t4_pktc_idx_1g;
683 pi->xact_addr_filt = -1;
686 pi->qsize_rxq = t4_qsize_rxq;
687 pi->qsize_txq = t4_qsize_txq;
689 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
690 if (pi->dev == NULL) {
692 "failed to add device for port %d.\n", i);
696 device_set_softc(pi->dev, pi);
700 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
702 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
704 goto done; /* error message displayed already */
706 sc->intr_type = iaq.intr_type;
707 sc->intr_count = iaq.nirq;
708 sc->flags |= iaq.intr_flags;
711 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
712 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
713 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
714 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
715 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
718 if (is_offload(sc)) {
720 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
721 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
722 s->neq += s->nofldtxq + s->nofldrxq;
723 s->niq += s->nofldrxq;
725 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
726 M_CXGBE, M_ZERO | M_WAITOK);
727 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
728 M_CXGBE, M_ZERO | M_WAITOK);
732 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
734 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
736 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
738 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
740 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
743 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
746 t4_init_l2t(sc, M_WAITOK);
749 * Second pass over the ports. This time we know the number of rx and
750 * tx queues that each port should get.
754 ofld_rqidx = ofld_tqidx = 0;
756 for_each_port(sc, i) {
757 struct port_info *pi = sc->port[i];
762 pi->first_rxq = rqidx;
763 pi->first_txq = tqidx;
764 if (is_10G_port(pi) || is_40G_port(pi)) {
765 pi->nrxq = iaq.nrxq10g;
766 pi->ntxq = iaq.ntxq10g;
768 pi->nrxq = iaq.nrxq1g;
769 pi->ntxq = iaq.ntxq1g;
776 if (is_offload(sc)) {
777 pi->first_ofld_rxq = ofld_rqidx;
778 pi->first_ofld_txq = ofld_tqidx;
779 if (is_10G_port(pi) || is_40G_port(pi)) {
780 pi->nofldrxq = iaq.nofldrxq10g;
781 pi->nofldtxq = iaq.nofldtxq10g;
783 pi->nofldrxq = iaq.nofldrxq1g;
784 pi->nofldtxq = iaq.nofldtxq1g;
786 ofld_rqidx += pi->nofldrxq;
787 ofld_tqidx += pi->nofldtxq;
792 rc = setup_intr_handlers(sc);
795 "failed to setup interrupt handlers: %d\n", rc);
799 rc = bus_generic_attach(dev);
802 "failed to attach all child ports: %d\n", rc);
807 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
808 sc->params.pci.width, sc->params.nports, sc->intr_count,
809 sc->intr_type == INTR_MSIX ? "MSI-X" :
810 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
811 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
816 if (rc != 0 && sc->cdev) {
817 /* cdev was created and so cxgbetool works; recover that way. */
819 "error during attach, adapter is now in recovery mode.\n");
835 t4_detach(device_t dev)
838 struct port_info *pi;
841 sc = device_get_softc(dev);
843 if (sc->flags & FULL_INIT_DONE)
847 destroy_dev(sc->cdev);
851 rc = bus_generic_detach(dev);
854 "failed to detach child devices: %d\n", rc);
858 for (i = 0; i < sc->intr_count; i++)
859 t4_free_irq(sc, &sc->irq[i]);
861 for (i = 0; i < MAX_NPORTS; i++) {
864 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
866 device_delete_child(dev, pi->dev);
868 mtx_destroy(&pi->pi_lock);
873 if (sc->flags & FULL_INIT_DONE)
874 adapter_full_uninit(sc);
876 if (sc->flags & FW_OK)
877 t4_fw_bye(sc, sc->mbox);
879 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
880 pci_release_msi(dev);
883 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
887 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
891 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
895 t4_free_l2t(sc->l2t);
898 free(sc->sge.ofld_rxq, M_CXGBE);
899 free(sc->sge.ofld_txq, M_CXGBE);
901 free(sc->irq, M_CXGBE);
902 free(sc->sge.rxq, M_CXGBE);
903 free(sc->sge.txq, M_CXGBE);
904 free(sc->sge.ctrlq, M_CXGBE);
905 free(sc->sge.iqmap, M_CXGBE);
906 free(sc->sge.eqmap, M_CXGBE);
907 free(sc->tids.ftid_tab, M_CXGBE);
908 t4_destroy_dma_tag(sc);
909 if (mtx_initialized(&sc->sc_lock)) {
910 mtx_lock(&t4_list_lock);
911 SLIST_REMOVE(&t4_list, sc, adapter, link);
912 mtx_unlock(&t4_list_lock);
913 mtx_destroy(&sc->sc_lock);
916 if (mtx_initialized(&sc->tids.ftid_lock))
917 mtx_destroy(&sc->tids.ftid_lock);
918 if (mtx_initialized(&sc->sfl_lock))
919 mtx_destroy(&sc->sfl_lock);
921 bzero(sc, sizeof(*sc));
928 cxgbe_probe(device_t dev)
931 struct port_info *pi = device_get_softc(dev);
933 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
934 device_set_desc_copy(dev, buf);
936 return (BUS_PROBE_DEFAULT);
939 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
940 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
941 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
942 #define T4_CAP_ENABLE (T4_CAP)
945 cxgbe_attach(device_t dev)
947 struct port_info *pi = device_get_softc(dev);
950 /* Allocate an ifnet and set it up */
951 ifp = if_alloc(IFT_ETHER);
953 device_printf(dev, "Cannot allocate ifnet\n");
959 callout_init(&pi->tick, CALLOUT_MPSAFE);
961 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
962 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
964 ifp->if_init = cxgbe_init;
965 ifp->if_ioctl = cxgbe_ioctl;
966 ifp->if_transmit = cxgbe_transmit;
967 ifp->if_qflush = cxgbe_qflush;
969 ifp->if_capabilities = T4_CAP;
971 if (is_offload(pi->adapter))
972 ifp->if_capabilities |= IFCAP_TOE;
974 ifp->if_capenable = T4_CAP_ENABLE;
975 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
976 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
978 /* Initialize ifmedia for this port */
979 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
983 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
984 EVENTHANDLER_PRI_ANY);
986 ether_ifattach(ifp, pi->hw_addr);
989 if (is_offload(pi->adapter)) {
991 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
992 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
995 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1003 cxgbe_detach(device_t dev)
1005 struct port_info *pi = device_get_softc(dev);
1006 struct adapter *sc = pi->adapter;
1007 struct ifnet *ifp = pi->ifp;
1009 /* Tell if_ioctl and if_init that the port is going away */
1014 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1017 sc->last_op = "t4detach";
1018 sc->last_op_thr = curthread;
1023 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1026 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1027 callout_stop(&pi->tick);
1029 callout_drain(&pi->tick);
1031 /* Let detach proceed even if these fail. */
1032 cxgbe_uninit_synchronized(pi);
1033 port_full_uninit(pi);
1035 ifmedia_removeall(&pi->media);
1036 ether_ifdetach(pi->ifp);
1048 cxgbe_init(void *arg)
1050 struct port_info *pi = arg;
1051 struct adapter *sc = pi->adapter;
1053 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1055 cxgbe_init_synchronized(pi);
1056 end_synchronized_op(sc, 0);
1060 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1062 int rc = 0, mtu, flags;
1063 struct port_info *pi = ifp->if_softc;
1064 struct adapter *sc = pi->adapter;
1065 struct ifreq *ifr = (struct ifreq *)data;
1071 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1074 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1078 if (pi->flags & PORT_INIT_DONE) {
1079 t4_update_fl_bufsize(ifp);
1080 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1081 rc = update_mac_settings(pi, XGMAC_MTU);
1083 end_synchronized_op(sc, 0);
1087 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1091 if (ifp->if_flags & IFF_UP) {
1092 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1093 flags = pi->if_flags;
1094 if ((ifp->if_flags ^ flags) &
1095 (IFF_PROMISC | IFF_ALLMULTI)) {
1096 rc = update_mac_settings(pi,
1097 XGMAC_PROMISC | XGMAC_ALLMULTI);
1100 rc = cxgbe_init_synchronized(pi);
1101 pi->if_flags = ifp->if_flags;
1102 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1103 rc = cxgbe_uninit_synchronized(pi);
1104 end_synchronized_op(sc, 0);
1108 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1109 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1112 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1113 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1114 end_synchronized_op(sc, LOCK_HELD);
1118 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1122 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1123 if (mask & IFCAP_TXCSUM) {
1124 ifp->if_capenable ^= IFCAP_TXCSUM;
1125 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1127 if (IFCAP_TSO4 & ifp->if_capenable &&
1128 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1129 ifp->if_capenable &= ~IFCAP_TSO4;
1131 "tso4 disabled due to -txcsum.\n");
1134 if (mask & IFCAP_TXCSUM_IPV6) {
1135 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1136 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1138 if (IFCAP_TSO6 & ifp->if_capenable &&
1139 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1140 ifp->if_capenable &= ~IFCAP_TSO6;
1142 "tso6 disabled due to -txcsum6.\n");
1145 if (mask & IFCAP_RXCSUM)
1146 ifp->if_capenable ^= IFCAP_RXCSUM;
1147 if (mask & IFCAP_RXCSUM_IPV6)
1148 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1151 * Note that we leave CSUM_TSO alone (it is always set). The
1152 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1153 * sending a TSO request our way, so it's sufficient to toggle
1156 if (mask & IFCAP_TSO4) {
1157 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1158 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1159 if_printf(ifp, "enable txcsum first.\n");
1163 ifp->if_capenable ^= IFCAP_TSO4;
1165 if (mask & IFCAP_TSO6) {
1166 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1167 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1168 if_printf(ifp, "enable txcsum6 first.\n");
1172 ifp->if_capenable ^= IFCAP_TSO6;
1174 if (mask & IFCAP_LRO) {
1175 #if defined(INET) || defined(INET6)
1177 struct sge_rxq *rxq;
1179 ifp->if_capenable ^= IFCAP_LRO;
1180 for_each_rxq(pi, i, rxq) {
1181 if (ifp->if_capenable & IFCAP_LRO)
1182 rxq->iq.flags |= IQ_LRO_ENABLED;
1184 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1189 if (mask & IFCAP_TOE) {
1190 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1192 rc = toe_capability(pi, enable);
1196 ifp->if_capenable ^= mask;
1199 if (mask & IFCAP_VLAN_HWTAGGING) {
1200 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1201 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1202 rc = update_mac_settings(pi, XGMAC_VLANEX);
1204 if (mask & IFCAP_VLAN_MTU) {
1205 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1207 /* Need to find out how to disable auto-mtu-inflation */
1209 if (mask & IFCAP_VLAN_HWTSO)
1210 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1211 if (mask & IFCAP_VLAN_HWCSUM)
1212 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1214 #ifdef VLAN_CAPABILITIES
1215 VLAN_CAPABILITIES(ifp);
1218 end_synchronized_op(sc, 0);
1223 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1227 rc = ether_ioctl(ifp, cmd, data);
1234 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1236 struct port_info *pi = ifp->if_softc;
1237 struct adapter *sc = pi->adapter;
1238 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1239 struct buf_ring *br;
1244 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1249 if (m->m_flags & M_FLOWID)
1250 txq += (m->m_pkthdr.flowid % pi->ntxq);
1253 if (TXQ_TRYLOCK(txq) == 0) {
1254 struct sge_eq *eq = &txq->eq;
1257 * It is possible that t4_eth_tx finishes up and releases the
1258 * lock between the TRYLOCK above and the drbr_enqueue here. We
1259 * need to make sure that this mbuf doesn't just sit there in
1263 rc = drbr_enqueue(ifp, br, m);
1264 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1265 !(eq->flags & EQ_DOOMED))
1266 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1271 * txq->m is the mbuf that is held up due to a temporary shortage of
1272 * resources and it should be put on the wire first. Then what's in
1273 * drbr and finally the mbuf that was just passed in to us.
1275 * Return code should indicate the fate of the mbuf that was passed in
1279 TXQ_LOCK_ASSERT_OWNED(txq);
1280 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1282 /* Queued for transmission. */
1284 rc = drbr_enqueue(ifp, br, m);
1285 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1286 (void) t4_eth_tx(ifp, txq, m);
1291 /* Direct transmission. */
1292 rc = t4_eth_tx(ifp, txq, m);
1293 if (rc != 0 && txq->m)
1294 rc = 0; /* held, will be transmitted soon (hopefully) */
1301 cxgbe_qflush(struct ifnet *ifp)
1303 struct port_info *pi = ifp->if_softc;
1304 struct sge_txq *txq;
1308 /* queues do not exist if !PORT_INIT_DONE. */
1309 if (pi->flags & PORT_INIT_DONE) {
1310 for_each_txq(pi, i, txq) {
1314 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1323 cxgbe_media_change(struct ifnet *ifp)
1325 struct port_info *pi = ifp->if_softc;
1327 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1329 return (EOPNOTSUPP);
1333 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1335 struct port_info *pi = ifp->if_softc;
1336 struct ifmedia_entry *cur = pi->media.ifm_cur;
1337 int speed = pi->link_cfg.speed;
1338 int data = (pi->port_type << 8) | pi->mod_type;
1340 if (cur->ifm_data != data) {
1341 build_medialist(pi);
1342 cur = pi->media.ifm_cur;
1345 ifmr->ifm_status = IFM_AVALID;
1346 if (!pi->link_cfg.link_ok)
1349 ifmr->ifm_status |= IFM_ACTIVE;
1351 /* active and current will differ iff current media is autoselect. */
1352 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1355 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1356 if (speed == SPEED_10000)
1357 ifmr->ifm_active |= IFM_10G_T;
1358 else if (speed == SPEED_1000)
1359 ifmr->ifm_active |= IFM_1000_T;
1360 else if (speed == SPEED_100)
1361 ifmr->ifm_active |= IFM_100_TX;
1362 else if (speed == SPEED_10)
1363 ifmr->ifm_active |= IFM_10_T;
1365 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1370 t4_fatal_err(struct adapter *sc)
1372 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1373 t4_intr_disable(sc);
1374 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1375 device_get_nameunit(sc->dev));
1379 map_bars_0_and_4(struct adapter *sc)
1381 sc->regs_rid = PCIR_BAR(0);
1382 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1383 &sc->regs_rid, RF_ACTIVE);
1384 if (sc->regs_res == NULL) {
1385 device_printf(sc->dev, "cannot map registers.\n");
1388 sc->bt = rman_get_bustag(sc->regs_res);
1389 sc->bh = rman_get_bushandle(sc->regs_res);
1390 sc->mmio_len = rman_get_size(sc->regs_res);
1391 setbit(&sc->doorbells, DOORBELL_KDB);
1393 sc->msix_rid = PCIR_BAR(4);
1394 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1395 &sc->msix_rid, RF_ACTIVE);
1396 if (sc->msix_res == NULL) {
1397 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1405 map_bar_2(struct adapter *sc)
1409 * T4: only iWARP driver uses the userspace doorbells. There is no need
1410 * to map it if RDMA is disabled.
1412 if (is_t4(sc) && sc->rdmacaps == 0)
1415 sc->udbs_rid = PCIR_BAR(2);
1416 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1417 &sc->udbs_rid, RF_ACTIVE);
1418 if (sc->udbs_res == NULL) {
1419 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1422 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1425 setbit(&sc->doorbells, DOORBELL_UDB);
1426 #if defined(__i386__) || defined(__amd64__)
1427 if (t5_write_combine) {
1431 * Enable write combining on BAR2. This is the
1432 * userspace doorbell BAR and is split into 128B
1433 * (UDBS_SEG_SIZE) doorbell regions, each associated
1434 * with an egress queue. The first 64B has the doorbell
1435 * and the second 64B can be used to submit a tx work
1436 * request with an implicit doorbell.
1439 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1440 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1442 clrbit(&sc->doorbells, DOORBELL_UDB);
1443 setbit(&sc->doorbells, DOORBELL_WCWR);
1444 setbit(&sc->doorbells, DOORBELL_UDBWC);
1446 device_printf(sc->dev,
1447 "couldn't enable write combining: %d\n",
1451 t4_write_reg(sc, A_SGE_STAT_CFG,
1452 V_STATSOURCE_T5(7) | V_STATMODE(0));
1460 static const struct memwin t4_memwin[] = {
1461 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1462 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1463 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1466 static const struct memwin t5_memwin[] = {
1467 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1468 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1469 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1473 setup_memwin(struct adapter *sc)
1475 const struct memwin *mw;
1481 * Read low 32b of bar0 indirectly via the hardware backdoor
1482 * mechanism. Works from within PCI passthrough environments
1483 * too, where rman_get_start() can return a different value. We
1484 * need to program the T4 memory window decoders with the actual
1485 * addresses that will be coming across the PCIe link.
1487 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1488 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1491 n = nitems(t4_memwin);
1493 /* T5 uses the relative offset inside the PCIe BAR */
1497 n = nitems(t5_memwin);
1500 for (i = 0; i < n; i++, mw++) {
1502 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1503 (mw->base + bar0) | V_BIR(0) |
1504 V_WINDOW(ilog2(mw->aperture) - 10));
1508 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1512 * Verify that the memory range specified by the addr/len pair is valid and lies
1513 * entirely within a single region (EDCx or MCx).
1516 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1518 uint32_t em, addr_len, maddr, mlen;
1520 /* Memory can only be accessed in naturally aligned 4 byte units */
1521 if (addr & 3 || len & 3 || len == 0)
1524 /* Enabled memories */
1525 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1526 if (em & F_EDRAM0_ENABLE) {
1527 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1528 maddr = G_EDRAM0_BASE(addr_len) << 20;
1529 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1530 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1531 addr + len <= maddr + mlen)
1534 if (em & F_EDRAM1_ENABLE) {
1535 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1536 maddr = G_EDRAM1_BASE(addr_len) << 20;
1537 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1538 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1539 addr + len <= maddr + mlen)
1542 if (em & F_EXT_MEM_ENABLE) {
1543 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1544 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1545 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1546 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1547 addr + len <= maddr + mlen)
1550 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1551 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1552 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1553 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1554 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1555 addr + len <= maddr + mlen)
1563 * Verify that the memory range specified by the memtype/offset/len pair is
1564 * valid and lies entirely within the memtype specified. The global address of
1565 * the start of the range is returned in addr.
1568 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1571 uint32_t em, addr_len, maddr, mlen;
1573 /* Memory can only be accessed in naturally aligned 4 byte units */
1574 if (off & 3 || len & 3 || len == 0)
1577 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1580 if (!(em & F_EDRAM0_ENABLE))
1582 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1583 maddr = G_EDRAM0_BASE(addr_len) << 20;
1584 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1587 if (!(em & F_EDRAM1_ENABLE))
1589 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1590 maddr = G_EDRAM1_BASE(addr_len) << 20;
1591 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1594 if (!(em & F_EXT_MEM_ENABLE))
1596 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1597 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1598 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1601 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1603 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1604 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1605 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1611 if (mlen > 0 && off < mlen && off + len <= mlen) {
1612 *addr = maddr + off; /* global address */
1620 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1622 const struct memwin *mw;
1625 KASSERT(win >= 0 && win < nitems(t4_memwin),
1626 ("%s: incorrect memwin# (%d)", __func__, win));
1627 mw = &t4_memwin[win];
1629 KASSERT(win >= 0 && win < nitems(t5_memwin),
1630 ("%s: incorrect memwin# (%d)", __func__, win));
1631 mw = &t5_memwin[win];
1636 if (aperture != NULL)
1637 *aperture = mw->aperture;
1641 * Positions the memory window such that it can be used to access the specified
1642 * address in the chip's address space. The return value is the offset of addr
1643 * from the start of the window.
1646 position_memwin(struct adapter *sc, int n, uint32_t addr)
1651 KASSERT(n >= 0 && n <= 3,
1652 ("%s: invalid window %d.", __func__, n));
1653 KASSERT((addr & 3) == 0,
1654 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1658 start = addr & ~0xf; /* start must be 16B aligned */
1660 pf = V_PFNUM(sc->pf);
1661 start = addr & ~0x7f; /* start must be 128B aligned */
1663 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1665 t4_write_reg(sc, reg, start | pf);
1666 t4_read_reg(sc, reg);
1668 return (addr - start);
1672 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1673 struct intrs_and_queues *iaq)
1675 int rc, itype, navail, nrxq10g, nrxq1g, n;
1676 int nofldrxq10g = 0, nofldrxq1g = 0;
1678 bzero(iaq, sizeof(*iaq));
1680 iaq->ntxq10g = t4_ntxq10g;
1681 iaq->ntxq1g = t4_ntxq1g;
1682 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1683 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1685 if (is_offload(sc)) {
1686 iaq->nofldtxq10g = t4_nofldtxq10g;
1687 iaq->nofldtxq1g = t4_nofldtxq1g;
1688 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1689 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1693 for (itype = INTR_MSIX; itype; itype >>= 1) {
1695 if ((itype & t4_intr_types) == 0)
1696 continue; /* not allowed */
1698 if (itype == INTR_MSIX)
1699 navail = pci_msix_count(sc->dev);
1700 else if (itype == INTR_MSI)
1701 navail = pci_msi_count(sc->dev);
1708 iaq->intr_type = itype;
1709 iaq->intr_flags = 0;
1712 * Best option: an interrupt vector for errors, one for the
1713 * firmware event queue, and one each for each rxq (NIC as well
1716 iaq->nirq = T4_EXTRA_INTR;
1717 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1718 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1719 if (iaq->nirq <= navail &&
1720 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1721 iaq->intr_flags |= INTR_DIRECT;
1726 * Second best option: an interrupt vector for errors, one for
1727 * the firmware event queue, and one each for either NIC or
1730 iaq->nirq = T4_EXTRA_INTR;
1731 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1732 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1733 if (iaq->nirq <= navail &&
1734 (itype != INTR_MSI || powerof2(iaq->nirq)))
1738 * Next best option: an interrupt vector for errors, one for the
1739 * firmware event queue, and at least one per port. At this
1740 * point we know we'll have to downsize nrxq or nofldrxq to fit
1741 * what's available to us.
1743 iaq->nirq = T4_EXTRA_INTR;
1744 iaq->nirq += n10g + n1g;
1745 if (iaq->nirq <= navail) {
1746 int leftover = navail - iaq->nirq;
1749 int target = max(nrxq10g, nofldrxq10g);
1752 while (n < target && leftover >= n10g) {
1757 iaq->nrxq10g = min(n, nrxq10g);
1760 iaq->nofldrxq10g = min(n, nofldrxq10g);
1765 int target = max(nrxq1g, nofldrxq1g);
1768 while (n < target && leftover >= n1g) {
1773 iaq->nrxq1g = min(n, nrxq1g);
1776 iaq->nofldrxq1g = min(n, nofldrxq1g);
1780 if (itype != INTR_MSI || powerof2(iaq->nirq))
1785 * Least desirable option: one interrupt vector for everything.
1787 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1790 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1796 if (itype == INTR_MSIX)
1797 rc = pci_alloc_msix(sc->dev, &navail);
1798 else if (itype == INTR_MSI)
1799 rc = pci_alloc_msi(sc->dev, &navail);
1802 if (navail == iaq->nirq)
1806 * Didn't get the number requested. Use whatever number
1807 * the kernel is willing to allocate (it's in navail).
1809 device_printf(sc->dev, "fewer vectors than requested, "
1810 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1811 itype, iaq->nirq, navail);
1812 pci_release_msi(sc->dev);
1816 device_printf(sc->dev,
1817 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1818 itype, rc, iaq->nirq, navail);
1821 device_printf(sc->dev,
1822 "failed to find a usable interrupt type. "
1823 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1824 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1829 #define FW_VERSION(chip) ( \
1830 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1831 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1832 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1833 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1834 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1840 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
1844 .kld_name = "t4fw_cfg",
1845 .fw_mod_name = "t4fw",
1847 .chip = FW_HDR_CHIP_T4,
1848 .fw_ver = htobe32_const(FW_VERSION(T4)),
1849 .intfver_nic = FW_INTFVER(T4, NIC),
1850 .intfver_vnic = FW_INTFVER(T4, VNIC),
1851 .intfver_ofld = FW_INTFVER(T4, OFLD),
1852 .intfver_ri = FW_INTFVER(T4, RI),
1853 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1854 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1855 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1856 .intfver_fcoe = FW_INTFVER(T4, FCOE),
1860 .kld_name = "t5fw_cfg",
1861 .fw_mod_name = "t5fw",
1863 .chip = FW_HDR_CHIP_T5,
1864 .fw_ver = htobe32_const(FW_VERSION(T5)),
1865 .intfver_nic = FW_INTFVER(T5, NIC),
1866 .intfver_vnic = FW_INTFVER(T5, VNIC),
1867 .intfver_ofld = FW_INTFVER(T5, OFLD),
1868 .intfver_ri = FW_INTFVER(T5, RI),
1869 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1870 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1871 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1872 .intfver_fcoe = FW_INTFVER(T5, FCOE),
1877 static struct fw_info *
1878 find_fw_info(int chip)
1882 for (i = 0; i < nitems(fw_info); i++) {
1883 if (fw_info[i].chip == chip)
1884 return (&fw_info[i]);
1890 * Is the given firmware API compatible with the one the driver was compiled
1894 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1897 /* short circuit if it's the exact same firmware version */
1898 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1902 * XXX: Is this too conservative? Perhaps I should limit this to the
1903 * features that are supported in the driver.
1905 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1906 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1907 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1908 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1916 * The firmware in the KLD is usable, but should it be installed? This routine
1917 * explains itself in detail if it indicates the KLD firmware should be
1921 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1925 if (!card_fw_usable) {
1926 reason = "incompatible or unusable";
1931 reason = "older than the version bundled with this driver";
1935 if (t4_fw_install == 2 && k != c) {
1936 reason = "different than the version bundled with this driver";
1943 if (t4_fw_install == 0) {
1944 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1945 "but the driver is prohibited from installing a different "
1946 "firmware on the card.\n",
1947 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1948 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1953 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1954 "installing firmware %u.%u.%u.%u on card.\n",
1955 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1956 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1957 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1958 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1963 * Establish contact with the firmware and determine if we are the master driver
1964 * or not, and whether we are responsible for chip initialization.
1967 prep_firmware(struct adapter *sc)
1969 const struct firmware *fw = NULL, *default_cfg;
1970 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1971 enum dev_state state;
1972 struct fw_info *fw_info;
1973 struct fw_hdr *card_fw; /* fw on the card */
1974 const struct fw_hdr *kld_fw; /* fw in the KLD */
1975 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
1978 /* Contact firmware. */
1979 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1980 if (rc < 0 || state == DEV_STATE_ERR) {
1982 device_printf(sc->dev,
1983 "failed to connect to the firmware: %d, %d.\n", rc, state);
1988 sc->flags |= MASTER_PF;
1989 else if (state == DEV_STATE_UNINIT) {
1991 * We didn't get to be the master so we definitely won't be
1992 * configuring the chip. It's a bug if someone else hasn't
1993 * configured it already.
1995 device_printf(sc->dev, "couldn't be master(%d), "
1996 "device not already initialized either(%d).\n", rc, state);
2000 /* This is the firmware whose headers the driver was compiled against */
2001 fw_info = find_fw_info(chip_id(sc));
2002 if (fw_info == NULL) {
2003 device_printf(sc->dev,
2004 "unable to look up firmware information for chip %d.\n",
2008 drv_fw = &fw_info->fw_hdr;
2011 * The firmware KLD contains many modules. The KLD name is also the
2012 * name of the module that contains the default config file.
2014 default_cfg = firmware_get(fw_info->kld_name);
2016 /* Read the header of the firmware on the card */
2017 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2018 rc = -t4_read_flash(sc, FLASH_FW_START,
2019 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2021 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2023 device_printf(sc->dev,
2024 "Unable to read card's firmware header: %d\n", rc);
2028 /* This is the firmware in the KLD */
2029 fw = firmware_get(fw_info->fw_mod_name);
2031 kld_fw = (const void *)fw->data;
2032 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2038 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2039 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2041 * Common case: the firmware on the card is an exact match and
2042 * the KLD is an exact match too, or the KLD is
2043 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2044 * here -- use cxgbetool loadfw if you want to reinstall the
2045 * same firmware as the one on the card.
2047 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2048 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2049 be32toh(card_fw->fw_ver))) {
2051 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2053 device_printf(sc->dev,
2054 "failed to install firmware: %d\n", rc);
2058 /* Installed successfully, update the cached header too. */
2059 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2061 need_fw_reset = 0; /* already reset as part of load_fw */
2064 if (!card_fw_usable) {
2067 d = ntohl(drv_fw->fw_ver);
2068 c = ntohl(card_fw->fw_ver);
2069 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2071 device_printf(sc->dev, "Cannot find a usable firmware: "
2072 "fw_install %d, chip state %d, "
2073 "driver compiled with %d.%d.%d.%d, "
2074 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2075 t4_fw_install, state,
2076 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2077 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2078 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2079 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2080 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2081 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2086 /* We're using whatever's on the card and it's known to be good. */
2087 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2088 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2089 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2090 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2091 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2092 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2093 t4_get_tp_version(sc, &sc->params.tp_vers);
2096 if (need_fw_reset &&
2097 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2098 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2099 if (rc != ETIMEDOUT && rc != EIO)
2100 t4_fw_bye(sc, sc->mbox);
2105 rc = get_params__pre_init(sc);
2107 goto done; /* error message displayed already */
2109 /* Partition adapter resources as specified in the config file. */
2110 if (state == DEV_STATE_UNINIT) {
2112 KASSERT(sc->flags & MASTER_PF,
2113 ("%s: trying to change chip settings when not master.",
2116 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2118 goto done; /* error message displayed already */
2120 t4_tweak_chip_settings(sc);
2122 /* get basic stuff going */
2123 rc = -t4_fw_initialize(sc, sc->mbox);
2125 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2129 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2134 free(card_fw, M_CXGBE);
2136 firmware_put(fw, FIRMWARE_UNLOAD);
2137 if (default_cfg != NULL)
2138 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2143 #define FW_PARAM_DEV(param) \
2144 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2145 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2146 #define FW_PARAM_PFVF(param) \
2147 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2148 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2151 * Partition chip resources for use between various PFs, VFs, etc.
2154 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2155 const char *name_prefix)
2157 const struct firmware *cfg = NULL;
2159 struct fw_caps_config_cmd caps;
2160 uint32_t mtype, moff, finicsum, cfcsum;
2163 * Figure out what configuration file to use. Pick the default config
2164 * file for the card if the user hasn't specified one explicitly.
2166 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2167 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2168 /* Card specific overrides go here. */
2169 if (pci_get_device(sc->dev) == 0x440a)
2170 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2172 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2176 * We need to load another module if the profile is anything except
2177 * "default" or "flash".
2179 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2180 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2183 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2184 cfg = firmware_get(s);
2186 if (default_cfg != NULL) {
2187 device_printf(sc->dev,
2188 "unable to load module \"%s\" for "
2189 "configuration profile \"%s\", will use "
2190 "the default config file instead.\n",
2192 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2195 device_printf(sc->dev,
2196 "unable to load module \"%s\" for "
2197 "configuration profile \"%s\", will use "
2198 "the config file on the card's flash "
2199 "instead.\n", s, sc->cfg_file);
2200 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2206 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2207 default_cfg == NULL) {
2208 device_printf(sc->dev,
2209 "default config file not available, will use the config "
2210 "file on the card's flash instead.\n");
2211 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2214 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2216 const uint32_t *cfdata;
2217 uint32_t param, val, addr, off, mw_base, mw_aperture;
2219 KASSERT(cfg != NULL || default_cfg != NULL,
2220 ("%s: no config to upload", __func__));
2223 * Ask the firmware where it wants us to upload the config file.
2225 param = FW_PARAM_DEV(CF);
2226 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2228 /* No support for config file? Shouldn't happen. */
2229 device_printf(sc->dev,
2230 "failed to query config file location: %d.\n", rc);
2233 mtype = G_FW_PARAMS_PARAM_Y(val);
2234 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2237 * XXX: sheer laziness. We deliberately added 4 bytes of
2238 * useless stuffing/comments at the end of the config file so
2239 * it's ok to simply throw away the last remaining bytes when
2240 * the config file is not an exact multiple of 4. This also
2241 * helps with the validate_mt_off_len check.
2244 cflen = cfg->datasize & ~3;
2247 cflen = default_cfg->datasize & ~3;
2248 cfdata = default_cfg->data;
2251 if (cflen > FLASH_CFG_MAX_SIZE) {
2252 device_printf(sc->dev,
2253 "config file too long (%d, max allowed is %d). "
2254 "Will try to use the config on the card, if any.\n",
2255 cflen, FLASH_CFG_MAX_SIZE);
2256 goto use_config_on_flash;
2259 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2261 device_printf(sc->dev,
2262 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2263 "Will try to use the config on the card, if any.\n",
2264 __func__, mtype, moff, cflen, rc);
2265 goto use_config_on_flash;
2268 memwin_info(sc, 2, &mw_base, &mw_aperture);
2270 off = position_memwin(sc, 2, addr);
2271 n = min(cflen, mw_aperture - off);
2272 for (i = 0; i < n; i += 4)
2273 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2278 use_config_on_flash:
2279 mtype = FW_MEMTYPE_CF_FLASH;
2280 moff = t4_flash_cfg_addr(sc);
2283 bzero(&caps, sizeof(caps));
2284 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2285 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2286 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2287 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2288 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2289 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2291 device_printf(sc->dev,
2292 "failed to pre-process config file: %d "
2293 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2297 finicsum = be32toh(caps.finicsum);
2298 cfcsum = be32toh(caps.cfcsum);
2299 if (finicsum != cfcsum) {
2300 device_printf(sc->dev,
2301 "WARNING: config file checksum mismatch: %08x %08x\n",
2304 sc->cfcsum = cfcsum;
2306 #define LIMIT_CAPS(x) do { \
2307 caps.x &= htobe16(t4_##x##_allowed); \
2308 sc->x = htobe16(caps.x); \
2312 * Let the firmware know what features will (not) be used so it can tune
2313 * things accordingly.
2315 LIMIT_CAPS(linkcaps);
2316 LIMIT_CAPS(niccaps);
2317 LIMIT_CAPS(toecaps);
2318 LIMIT_CAPS(rdmacaps);
2319 LIMIT_CAPS(iscsicaps);
2320 LIMIT_CAPS(fcoecaps);
2323 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2324 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2325 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2326 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2328 device_printf(sc->dev,
2329 "failed to process config file: %d.\n", rc);
2333 firmware_put(cfg, FIRMWARE_UNLOAD);
2338 * Retrieve parameters that are needed (or nice to have) very early.
2341 get_params__pre_init(struct adapter *sc)
2344 uint32_t param[2], val[2];
2345 struct fw_devlog_cmd cmd;
2346 struct devlog_params *dlog = &sc->params.devlog;
2348 param[0] = FW_PARAM_DEV(PORTVEC);
2349 param[1] = FW_PARAM_DEV(CCLK);
2350 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2352 device_printf(sc->dev,
2353 "failed to query parameters (pre_init): %d.\n", rc);
2357 sc->params.portvec = val[0];
2358 sc->params.nports = bitcount32(val[0]);
2359 sc->params.vpd.cclk = val[1];
2361 /* Read device log parameters. */
2362 bzero(&cmd, sizeof(cmd));
2363 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2364 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2365 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2366 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2368 device_printf(sc->dev,
2369 "failed to get devlog parameters: %d.\n", rc);
2370 bzero(dlog, sizeof (*dlog));
2371 rc = 0; /* devlog isn't critical for device operation */
2373 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2374 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2375 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2376 dlog->size = be32toh(cmd.memsize_devlog);
2383 * Retrieve various parameters that are of interest to the driver. The device
2384 * has been initialized by the firmware at this point.
2387 get_params__post_init(struct adapter *sc)
2390 uint32_t param[7], val[7];
2391 struct fw_caps_config_cmd caps;
2393 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2394 param[1] = FW_PARAM_PFVF(EQ_START);
2395 param[2] = FW_PARAM_PFVF(FILTER_START);
2396 param[3] = FW_PARAM_PFVF(FILTER_END);
2397 param[4] = FW_PARAM_PFVF(L2T_START);
2398 param[5] = FW_PARAM_PFVF(L2T_END);
2399 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2401 device_printf(sc->dev,
2402 "failed to query parameters (post_init): %d.\n", rc);
2406 sc->sge.iq_start = val[0];
2407 sc->sge.eq_start = val[1];
2408 sc->tids.ftid_base = val[2];
2409 sc->tids.nftids = val[3] - val[2] + 1;
2410 sc->vres.l2t.start = val[4];
2411 sc->vres.l2t.size = val[5] - val[4] + 1;
2412 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2413 ("%s: L2 table size (%u) larger than expected (%u)",
2414 __func__, sc->vres.l2t.size, L2T_SIZE));
2416 /* get capabilites */
2417 bzero(&caps, sizeof(caps));
2418 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2419 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2420 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2421 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2423 device_printf(sc->dev,
2424 "failed to get card capabilities: %d.\n", rc);
2429 /* query offload-related parameters */
2430 param[0] = FW_PARAM_DEV(NTID);
2431 param[1] = FW_PARAM_PFVF(SERVER_START);
2432 param[2] = FW_PARAM_PFVF(SERVER_END);
2433 param[3] = FW_PARAM_PFVF(TDDP_START);
2434 param[4] = FW_PARAM_PFVF(TDDP_END);
2435 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2436 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2438 device_printf(sc->dev,
2439 "failed to query TOE parameters: %d.\n", rc);
2442 sc->tids.ntids = val[0];
2443 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2444 sc->tids.stid_base = val[1];
2445 sc->tids.nstids = val[2] - val[1] + 1;
2446 sc->vres.ddp.start = val[3];
2447 sc->vres.ddp.size = val[4] - val[3] + 1;
2448 sc->params.ofldq_wr_cred = val[5];
2449 sc->params.offload = 1;
2451 if (caps.rdmacaps) {
2452 param[0] = FW_PARAM_PFVF(STAG_START);
2453 param[1] = FW_PARAM_PFVF(STAG_END);
2454 param[2] = FW_PARAM_PFVF(RQ_START);
2455 param[3] = FW_PARAM_PFVF(RQ_END);
2456 param[4] = FW_PARAM_PFVF(PBL_START);
2457 param[5] = FW_PARAM_PFVF(PBL_END);
2458 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2460 device_printf(sc->dev,
2461 "failed to query RDMA parameters(1): %d.\n", rc);
2464 sc->vres.stag.start = val[0];
2465 sc->vres.stag.size = val[1] - val[0] + 1;
2466 sc->vres.rq.start = val[2];
2467 sc->vres.rq.size = val[3] - val[2] + 1;
2468 sc->vres.pbl.start = val[4];
2469 sc->vres.pbl.size = val[5] - val[4] + 1;
2471 param[0] = FW_PARAM_PFVF(SQRQ_START);
2472 param[1] = FW_PARAM_PFVF(SQRQ_END);
2473 param[2] = FW_PARAM_PFVF(CQ_START);
2474 param[3] = FW_PARAM_PFVF(CQ_END);
2475 param[4] = FW_PARAM_PFVF(OCQ_START);
2476 param[5] = FW_PARAM_PFVF(OCQ_END);
2477 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2479 device_printf(sc->dev,
2480 "failed to query RDMA parameters(2): %d.\n", rc);
2483 sc->vres.qp.start = val[0];
2484 sc->vres.qp.size = val[1] - val[0] + 1;
2485 sc->vres.cq.start = val[2];
2486 sc->vres.cq.size = val[3] - val[2] + 1;
2487 sc->vres.ocq.start = val[4];
2488 sc->vres.ocq.size = val[5] - val[4] + 1;
2490 if (caps.iscsicaps) {
2491 param[0] = FW_PARAM_PFVF(ISCSI_START);
2492 param[1] = FW_PARAM_PFVF(ISCSI_END);
2493 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2495 device_printf(sc->dev,
2496 "failed to query iSCSI parameters: %d.\n", rc);
2499 sc->vres.iscsi.start = val[0];
2500 sc->vres.iscsi.size = val[1] - val[0] + 1;
2504 * We've got the params we wanted to query via the firmware. Now grab
2505 * some others directly from the chip.
2507 rc = t4_read_chip_settings(sc);
2513 set_params__post_init(struct adapter *sc)
2515 uint32_t param, val;
2517 /* ask for encapsulated CPLs */
2518 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2520 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2525 #undef FW_PARAM_PFVF
2529 t4_set_desc(struct adapter *sc)
2532 struct adapter_params *p = &sc->params;
2534 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2535 p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2538 device_set_desc_copy(sc->dev, buf);
2542 build_medialist(struct port_info *pi)
2544 struct ifmedia *media = &pi->media;
2549 ifmedia_removeall(media);
2551 m = IFM_ETHER | IFM_FDX;
2552 data = (pi->port_type << 8) | pi->mod_type;
2554 switch(pi->port_type) {
2555 case FW_PORT_TYPE_BT_XFI:
2556 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2559 case FW_PORT_TYPE_BT_XAUI:
2560 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2563 case FW_PORT_TYPE_BT_SGMII:
2564 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2565 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2566 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2567 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2570 case FW_PORT_TYPE_CX4:
2571 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2572 ifmedia_set(media, m | IFM_10G_CX4);
2575 case FW_PORT_TYPE_SFP:
2576 case FW_PORT_TYPE_FIBER_XFI:
2577 case FW_PORT_TYPE_FIBER_XAUI:
2578 switch (pi->mod_type) {
2580 case FW_PORT_MOD_TYPE_LR:
2581 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2582 ifmedia_set(media, m | IFM_10G_LR);
2585 case FW_PORT_MOD_TYPE_SR:
2586 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2587 ifmedia_set(media, m | IFM_10G_SR);
2590 case FW_PORT_MOD_TYPE_LRM:
2591 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2592 ifmedia_set(media, m | IFM_10G_LRM);
2595 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2596 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2597 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2598 ifmedia_set(media, m | IFM_10G_TWINAX);
2601 case FW_PORT_MOD_TYPE_NONE:
2603 ifmedia_add(media, m | IFM_NONE, data, NULL);
2604 ifmedia_set(media, m | IFM_NONE);
2607 case FW_PORT_MOD_TYPE_NA:
2608 case FW_PORT_MOD_TYPE_ER:
2610 device_printf(pi->dev,
2611 "unknown port_type (%d), mod_type (%d)\n",
2612 pi->port_type, pi->mod_type);
2613 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2614 ifmedia_set(media, m | IFM_UNKNOWN);
2619 case FW_PORT_TYPE_QSFP:
2620 switch (pi->mod_type) {
2622 case FW_PORT_MOD_TYPE_LR:
2623 ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2624 ifmedia_set(media, m | IFM_40G_LR4);
2627 case FW_PORT_MOD_TYPE_SR:
2628 ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2629 ifmedia_set(media, m | IFM_40G_SR4);
2632 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2633 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2634 ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2635 ifmedia_set(media, m | IFM_40G_CR4);
2638 case FW_PORT_MOD_TYPE_NONE:
2640 ifmedia_add(media, m | IFM_NONE, data, NULL);
2641 ifmedia_set(media, m | IFM_NONE);
2645 device_printf(pi->dev,
2646 "unknown port_type (%d), mod_type (%d)\n",
2647 pi->port_type, pi->mod_type);
2648 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2649 ifmedia_set(media, m | IFM_UNKNOWN);
2655 device_printf(pi->dev,
2656 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2658 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2659 ifmedia_set(media, m | IFM_UNKNOWN);
2666 #define FW_MAC_EXACT_CHUNK 7
2669 * Program the port's XGMAC based on parameters in ifnet. The caller also
2670 * indicates which parameters should be programmed (the rest are left alone).
2673 update_mac_settings(struct port_info *pi, int flags)
2676 struct ifnet *ifp = pi->ifp;
2677 struct adapter *sc = pi->adapter;
2678 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2680 ASSERT_SYNCHRONIZED_OP(sc);
2681 KASSERT(flags, ("%s: not told what to update.", __func__));
2683 if (flags & XGMAC_MTU)
2686 if (flags & XGMAC_PROMISC)
2687 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2689 if (flags & XGMAC_ALLMULTI)
2690 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2692 if (flags & XGMAC_VLANEX)
2693 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2695 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2698 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2702 if (flags & XGMAC_UCADDR) {
2703 uint8_t ucaddr[ETHER_ADDR_LEN];
2705 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2706 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2707 ucaddr, true, true);
2710 if_printf(ifp, "change_mac failed: %d\n", rc);
2713 pi->xact_addr_filt = rc;
2718 if (flags & XGMAC_MCADDRS) {
2719 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2722 struct ifmultiaddr *ifma;
2725 if_maddr_rlock(ifp);
2726 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2727 if (ifma->ifma_addr->sa_family != AF_LINK)
2730 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2732 if (i == FW_MAC_EXACT_CHUNK) {
2733 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2734 del, i, mcaddr, NULL, &hash, 0);
2737 for (j = 0; j < i; j++) {
2739 "failed to add mc address"
2741 "%02x:%02x:%02x rc=%d\n",
2742 mcaddr[j][0], mcaddr[j][1],
2743 mcaddr[j][2], mcaddr[j][3],
2744 mcaddr[j][4], mcaddr[j][5],
2754 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2755 del, i, mcaddr, NULL, &hash, 0);
2758 for (j = 0; j < i; j++) {
2760 "failed to add mc address"
2762 "%02x:%02x:%02x rc=%d\n",
2763 mcaddr[j][0], mcaddr[j][1],
2764 mcaddr[j][2], mcaddr[j][3],
2765 mcaddr[j][4], mcaddr[j][5],
2772 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2774 if_printf(ifp, "failed to set mc address hash: %d", rc);
2776 if_maddr_runlock(ifp);
2783 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2789 /* the caller thinks it's ok to sleep, but is it really? */
2790 if (flags & SLEEP_OK)
2791 pause("t4slptst", 1);
2802 if (pi && IS_DOOMED(pi)) {
2812 if (!(flags & SLEEP_OK)) {
2817 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2823 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2826 sc->last_op = wmesg;
2827 sc->last_op_thr = curthread;
2831 if (!(flags & HOLD_LOCK) || rc)
2838 end_synchronized_op(struct adapter *sc, int flags)
2841 if (flags & LOCK_HELD)
2842 ADAPTER_LOCK_ASSERT_OWNED(sc);
2846 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2853 cxgbe_init_synchronized(struct port_info *pi)
2855 struct adapter *sc = pi->adapter;
2856 struct ifnet *ifp = pi->ifp;
2859 ASSERT_SYNCHRONIZED_OP(sc);
2861 if (isset(&sc->open_device_map, pi->port_id)) {
2862 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2863 ("mismatch between open_device_map and if_drv_flags"));
2864 return (0); /* already running */
2867 if (!(sc->flags & FULL_INIT_DONE) &&
2868 ((rc = adapter_full_init(sc)) != 0))
2869 return (rc); /* error message displayed already */
2871 if (!(pi->flags & PORT_INIT_DONE) &&
2872 ((rc = port_full_init(pi)) != 0))
2873 return (rc); /* error message displayed already */
2875 rc = update_mac_settings(pi, XGMAC_ALL);
2877 goto done; /* error message displayed already */
2879 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2881 if_printf(ifp, "start_link failed: %d\n", rc);
2885 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2887 if_printf(ifp, "enable_vi failed: %d\n", rc);
2892 setbit(&sc->open_device_map, pi->port_id);
2894 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2897 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2900 cxgbe_uninit_synchronized(pi);
2909 cxgbe_uninit_synchronized(struct port_info *pi)
2911 struct adapter *sc = pi->adapter;
2912 struct ifnet *ifp = pi->ifp;
2915 ASSERT_SYNCHRONIZED_OP(sc);
2918 * Disable the VI so that all its data in either direction is discarded
2919 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2920 * tick) intact as the TP can deliver negative advice or data that it's
2921 * holding in its RAM (for an offloaded connection) even after the VI is
2924 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2926 if_printf(ifp, "disable_vi failed: %d\n", rc);
2930 clrbit(&sc->open_device_map, pi->port_id);
2932 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2935 pi->link_cfg.link_ok = 0;
2936 pi->link_cfg.speed = 0;
2938 t4_os_link_changed(sc, pi->port_id, 0, -1);
2944 * It is ok for this function to fail midway and return right away. t4_detach
2945 * will walk the entire sc->irq list and clean up whatever is valid.
2948 setup_intr_handlers(struct adapter *sc)
2953 struct port_info *pi;
2954 struct sge_rxq *rxq;
2956 struct sge_ofld_rxq *ofld_rxq;
2963 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2964 if (sc->intr_count == 1) {
2965 KASSERT(!(sc->flags & INTR_DIRECT),
2966 ("%s: single interrupt && INTR_DIRECT?", __func__));
2968 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2972 /* Multiple interrupts. */
2973 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2974 ("%s: too few intr.", __func__));
2976 /* The first one is always error intr */
2977 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2983 /* The second one is always the firmware event queue */
2984 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2992 * Note that if INTR_DIRECT is not set then either the NIC rx
2993 * queues or (exclusive or) the TOE rx queueus will be taking
2994 * direct interrupts.
2996 * There is no need to check for is_offload(sc) as nofldrxq
2997 * will be 0 if offload is disabled.
2999 for_each_port(sc, p) {
3004 * Skip over the NIC queues if they aren't taking direct
3007 if (!(sc->flags & INTR_DIRECT) &&
3008 pi->nofldrxq > pi->nrxq)
3011 rxq = &sc->sge.rxq[pi->first_rxq];
3012 for (q = 0; q < pi->nrxq; q++, rxq++) {
3013 snprintf(s, sizeof(s), "%d.%d", p, q);
3014 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3024 * Skip over the offload queues if they aren't taking
3025 * direct interrupts.
3027 if (!(sc->flags & INTR_DIRECT))
3030 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3031 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3032 snprintf(s, sizeof(s), "%d,%d", p, q);
3033 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3048 adapter_full_init(struct adapter *sc)
3052 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3053 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3054 ("%s: FULL_INIT_DONE already", __func__));
3057 * queues that belong to the adapter (not any particular port).
3059 rc = t4_setup_adapter_queues(sc);
3063 for (i = 0; i < nitems(sc->tq); i++) {
3064 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3065 taskqueue_thread_enqueue, &sc->tq[i]);
3066 if (sc->tq[i] == NULL) {
3067 device_printf(sc->dev,
3068 "failed to allocate task queue %d\n", i);
3072 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3073 device_get_nameunit(sc->dev), i);
3077 sc->flags |= FULL_INIT_DONE;
3080 adapter_full_uninit(sc);
3086 adapter_full_uninit(struct adapter *sc)
3090 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3092 t4_teardown_adapter_queues(sc);
3094 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3095 taskqueue_free(sc->tq[i]);
3099 sc->flags &= ~FULL_INIT_DONE;
3105 port_full_init(struct port_info *pi)
3107 struct adapter *sc = pi->adapter;
3108 struct ifnet *ifp = pi->ifp;
3110 struct sge_rxq *rxq;
3113 ASSERT_SYNCHRONIZED_OP(sc);
3114 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3115 ("%s: PORT_INIT_DONE already", __func__));
3117 sysctl_ctx_init(&pi->ctx);
3118 pi->flags |= PORT_SYSCTL_CTX;
3121 * Allocate tx/rx/fl queues for this port.
3123 rc = t4_setup_port_queues(pi);
3125 goto done; /* error message displayed already */
3128 * Setup RSS for this port.
3130 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3132 for_each_rxq(pi, i, rxq) {
3133 rss[i] = rxq->iq.abs_id;
3135 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3136 pi->rss_size, rss, pi->nrxq);
3139 if_printf(ifp, "rss_config failed: %d\n", rc);
3143 pi->flags |= PORT_INIT_DONE;
3146 port_full_uninit(pi);
3155 port_full_uninit(struct port_info *pi)
3157 struct adapter *sc = pi->adapter;
3159 struct sge_rxq *rxq;
3160 struct sge_txq *txq;
3162 struct sge_ofld_rxq *ofld_rxq;
3163 struct sge_wrq *ofld_txq;
3166 if (pi->flags & PORT_INIT_DONE) {
3168 /* Need to quiesce queues. XXX: ctrl queues? */
3170 for_each_txq(pi, i, txq) {
3171 quiesce_eq(sc, &txq->eq);
3175 for_each_ofld_txq(pi, i, ofld_txq) {
3176 quiesce_eq(sc, &ofld_txq->eq);
3180 for_each_rxq(pi, i, rxq) {
3181 quiesce_iq(sc, &rxq->iq);
3182 quiesce_fl(sc, &rxq->fl);
3186 for_each_ofld_rxq(pi, i, ofld_rxq) {
3187 quiesce_iq(sc, &ofld_rxq->iq);
3188 quiesce_fl(sc, &ofld_rxq->fl);
3193 t4_teardown_port_queues(pi);
3194 pi->flags &= ~PORT_INIT_DONE;
3200 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3203 eq->flags |= EQ_DOOMED;
3206 * Wait for the response to a credit flush if one's
3209 while (eq->flags & EQ_CRFLUSHED)
3210 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3213 callout_drain(&eq->tx_callout); /* XXX: iffy */
3214 pause("callout", 10); /* Still iffy */
3216 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3220 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3222 (void) sc; /* unused */
3224 /* Synchronize with the interrupt handler */
3225 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3230 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3232 mtx_lock(&sc->sfl_lock);
3234 fl->flags |= FL_DOOMED;
3236 mtx_unlock(&sc->sfl_lock);
3238 callout_drain(&sc->sfl_callout);
3239 KASSERT((fl->flags & FL_STARVING) == 0,
3240 ("%s: still starving", __func__));
3244 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3245 driver_intr_t *handler, void *arg, char *name)
3250 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3251 RF_SHAREABLE | RF_ACTIVE);
3252 if (irq->res == NULL) {
3253 device_printf(sc->dev,
3254 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3258 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3259 NULL, handler, arg, &irq->tag);
3261 device_printf(sc->dev,
3262 "failed to setup interrupt for rid %d, name %s: %d\n",
3265 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3271 t4_free_irq(struct adapter *sc, struct irq *irq)
3274 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3276 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3278 bzero(irq, sizeof(*irq));
3284 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3287 uint32_t *p = (uint32_t *)(buf + start);
3289 for ( ; start <= end; start += sizeof(uint32_t))
3290 *p++ = t4_read_reg(sc, start);
3294 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3297 const unsigned int *reg_ranges;
3298 static const unsigned int t4_reg_ranges[] = {
3517 static const unsigned int t5_reg_ranges[] = {
3957 reg_ranges = &t4_reg_ranges[0];
3958 n = nitems(t4_reg_ranges);
3960 reg_ranges = &t5_reg_ranges[0];
3961 n = nitems(t5_reg_ranges);
3964 regs->version = chip_id(sc) | chip_rev(sc) << 10;
3965 for (i = 0; i < n; i += 2)
3966 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3970 cxgbe_tick(void *arg)
3972 struct port_info *pi = arg;
3973 struct ifnet *ifp = pi->ifp;
3974 struct sge_txq *txq;
3976 struct port_stats *s = &pi->stats;
3979 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3981 return; /* without scheduling another callout */
3984 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3986 ifp->if_opackets = s->tx_frames - s->tx_pause;
3987 ifp->if_ipackets = s->rx_frames - s->rx_pause;
3988 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3989 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3990 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3991 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3992 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3993 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3997 for_each_txq(pi, i, txq)
3998 drops += txq->br->br_drops;
3999 ifp->if_snd.ifq_drops = drops;
4001 ifp->if_oerrors = s->tx_error_frames;
4002 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4003 s->rx_fcs_err + s->rx_len_err;
4005 callout_schedule(&pi->tick, hz);
4010 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4014 if (arg != ifp || ifp->if_type != IFT_ETHER)
4017 vlan = VLAN_DEVAT(ifp, vid);
4018 VLAN_SETCOOKIE(vlan, ifp);
4022 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4026 panic("%s: opcode 0x%02x on iq %p with payload %p",
4027 __func__, rss->opcode, iq, m);
4029 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4030 __func__, rss->opcode, iq, m);
4037 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4039 uintptr_t *loc, new;
4041 if (opcode >= nitems(sc->cpl_handler))
4044 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4045 loc = (uintptr_t *) &sc->cpl_handler[opcode];
4046 atomic_store_rel_ptr(loc, new);
4052 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4056 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4058 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4059 __func__, iq, ctrl);
4065 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4067 uintptr_t *loc, new;
4069 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4070 loc = (uintptr_t *) &sc->an_handler;
4071 atomic_store_rel_ptr(loc, new);
4077 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4079 const struct cpl_fw6_msg *cpl =
4080 __containerof(rpl, struct cpl_fw6_msg, data[0]);
4083 panic("%s: fw_msg type %d", __func__, cpl->type);
4085 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4091 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4093 uintptr_t *loc, new;
4095 if (type >= nitems(sc->fw_msg_handler))
4099 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4100 * handler dispatch table. Reject any attempt to install a handler for
4103 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4106 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4107 loc = (uintptr_t *) &sc->fw_msg_handler[type];
4108 atomic_store_rel_ptr(loc, new);
4114 t4_sysctls(struct adapter *sc)
4116 struct sysctl_ctx_list *ctx;
4117 struct sysctl_oid *oid;
4118 struct sysctl_oid_list *children, *c0;
4119 static char *caps[] = {
4120 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
4121 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
4122 "\20\1TOE", /* caps[2] toecaps */
4123 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
4124 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
4125 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4126 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4127 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
4129 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4131 ctx = device_get_sysctl_ctx(sc->dev);
4136 oid = device_get_sysctl_tree(sc->dev);
4137 c0 = children = SYSCTL_CHILDREN(oid);
4139 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4140 sc->params.nports, "# of ports");
4142 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4143 NULL, chip_rev(sc), "chip hardware revision");
4145 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4146 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4148 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4149 CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4151 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4152 sc->cfcsum, "config file checksum");
4154 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4155 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4156 sysctl_bitfield, "A", "available doorbells");
4158 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4159 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4160 sysctl_bitfield, "A", "available link capabilities");
4162 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4163 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4164 sysctl_bitfield, "A", "available NIC capabilities");
4166 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4167 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4168 sysctl_bitfield, "A", "available TCP offload capabilities");
4170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4171 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4172 sysctl_bitfield, "A", "available RDMA capabilities");
4174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4175 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4176 sysctl_bitfield, "A", "available iSCSI capabilities");
4178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4179 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4180 sysctl_bitfield, "A", "available FCoE capabilities");
4182 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4183 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4186 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4187 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4188 "interrupt holdoff timer values (us)");
4190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4191 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4192 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4193 "interrupt holdoff packet counter values");
4195 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4196 NULL, sc->tids.nftids, "number of filters");
4200 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4202 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4203 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4204 "logs and miscellaneous information");
4205 children = SYSCTL_CHILDREN(oid);
4207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4208 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4209 sysctl_cctrl, "A", "congestion control");
4211 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4212 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4213 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4215 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4216 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4217 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4219 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4220 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4221 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4223 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4224 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4225 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4227 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4228 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4229 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4231 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4232 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4233 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4235 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4236 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4237 sysctl_cim_la, "A", "CIM logic analyzer");
4239 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4240 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4241 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4243 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4244 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4245 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4247 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4248 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4249 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4251 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4252 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4253 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4255 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4256 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4257 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4259 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4260 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4261 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4263 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4264 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4265 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4268 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4269 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4270 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4272 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4273 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4274 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4277 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4278 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4279 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4281 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4282 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4283 sysctl_cim_qcfg, "A", "CIM queue configuration");
4285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4286 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4287 sysctl_cpl_stats, "A", "CPL statistics");
4289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4290 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4291 sysctl_ddp_stats, "A", "DDP statistics");
4293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4294 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4295 sysctl_devlog, "A", "firmware's device log");
4297 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4298 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4299 sysctl_fcoe_stats, "A", "FCoE statistics");
4301 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4302 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4303 sysctl_hw_sched, "A", "hardware scheduler ");
4305 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4306 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4307 sysctl_l2t, "A", "hardware L2 table");
4309 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4310 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4311 sysctl_lb_stats, "A", "loopback statistics");
4313 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4314 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4315 sysctl_meminfo, "A", "memory regions");
4317 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4318 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4319 sysctl_mps_tcam, "A", "MPS TCAM entries");
4321 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4322 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4323 sysctl_path_mtus, "A", "path MTUs");
4325 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4326 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4327 sysctl_pm_stats, "A", "PM statistics");
4329 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4330 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4331 sysctl_rdma_stats, "A", "RDMA statistics");
4333 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4334 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4335 sysctl_tcp_stats, "A", "TCP statistics");
4337 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4338 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4339 sysctl_tids, "A", "TID information");
4341 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4342 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4343 sysctl_tp_err_stats, "A", "TP error statistics");
4345 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4346 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4347 sysctl_tp_la, "A", "TP logic analyzer");
4349 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4350 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4351 sysctl_tx_rate, "A", "Tx rate");
4353 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4354 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4355 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4359 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4360 sysctl_wcwr_stats, "A", "write combined work requests");
4365 if (is_offload(sc)) {
4369 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4370 NULL, "TOE parameters");
4371 children = SYSCTL_CHILDREN(oid);
4373 sc->tt.sndbuf = 256 * 1024;
4374 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4375 &sc->tt.sndbuf, 0, "max hardware send buffer size");
4378 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4379 &sc->tt.ddp, 0, "DDP allowed");
4381 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4382 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4383 &sc->tt.indsz, 0, "DDP max indicate size allowed");
4386 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4387 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4388 &sc->tt.ddp_thres, 0, "DDP threshold");
4390 sc->tt.rx_coalesce = 1;
4391 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4392 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4401 cxgbe_sysctls(struct port_info *pi)
4403 struct sysctl_ctx_list *ctx;
4404 struct sysctl_oid *oid;
4405 struct sysctl_oid_list *children;
4407 ctx = device_get_sysctl_ctx(pi->dev);
4412 oid = device_get_sysctl_tree(pi->dev);
4413 children = SYSCTL_CHILDREN(oid);
4415 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "linkdnrc", CTLFLAG_RD,
4416 &pi->linkdnrc, 0, "reason why link is down");
4417 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4418 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4419 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4420 "PHY temperature (in Celsius)");
4421 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4422 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4423 "PHY firmware version");
4425 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4426 &pi->nrxq, 0, "# of rx queues");
4427 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4428 &pi->ntxq, 0, "# of tx queues");
4429 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4430 &pi->first_rxq, 0, "index of first rx queue");
4431 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4432 &pi->first_txq, 0, "index of first tx queue");
4435 if (is_offload(pi->adapter)) {
4436 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4438 "# of rx queues for offloaded TCP connections");
4439 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4441 "# of tx queues for offloaded TCP connections");
4442 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4443 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4444 "index of first TOE rx queue");
4445 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4446 CTLFLAG_RD, &pi->first_ofld_txq, 0,
4447 "index of first TOE tx queue");
4451 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4452 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4453 "holdoff timer index");
4454 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4455 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4456 "holdoff packet counter index");
4458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4459 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4461 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4462 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4466 * dev.cxgbe.X.stats.
4468 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4469 NULL, "port statistics");
4470 children = SYSCTL_CHILDREN(oid);
4472 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4473 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4474 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4475 sysctl_handle_t4_reg64, "QU", desc)
4477 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4478 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4479 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4480 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4481 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4482 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4483 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4484 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4485 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4486 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4487 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4488 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4489 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4490 "# of tx frames in this range",
4491 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4492 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4493 "# of tx frames in this range",
4494 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4495 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4496 "# of tx frames in this range",
4497 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4498 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4499 "# of tx frames in this range",
4500 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4501 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4502 "# of tx frames in this range",
4503 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4504 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4505 "# of tx frames in this range",
4506 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4507 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4508 "# of tx frames in this range",
4509 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4510 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4511 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4512 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4514 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4516 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4518 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4520 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4522 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4524 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4525 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4526 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4527 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4528 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4531 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4532 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4533 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4534 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4535 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4536 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4537 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4538 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4539 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4540 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4541 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4542 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4543 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4545 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4546 "# of frames received with bad FCS",
4547 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4548 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4549 "# of frames received with length error",
4550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4551 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4552 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4553 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4555 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4556 "# of rx frames in this range",
4557 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4558 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4559 "# of rx frames in this range",
4560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4561 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4562 "# of rx frames in this range",
4563 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4564 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4565 "# of rx frames in this range",
4566 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4567 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4568 "# of rx frames in this range",
4569 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4570 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4571 "# of rx frames in this range",
4572 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4573 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4574 "# of rx frames in this range",
4575 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4576 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4577 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4578 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4579 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4580 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4581 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4582 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4584 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4586 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4588 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4590 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4592 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4595 #undef SYSCTL_ADD_T4_REG64
4597 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4598 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4599 &pi->stats.name, desc)
4601 /* We get these from port_stats and they may be stale by upto 1s */
4602 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4603 "# drops due to buffer-group 0 overflows");
4604 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4605 "# drops due to buffer-group 1 overflows");
4606 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4607 "# drops due to buffer-group 2 overflows");
4608 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4609 "# drops due to buffer-group 3 overflows");
4610 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4611 "# of buffer-group 0 truncated packets");
4612 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4613 "# of buffer-group 1 truncated packets");
4614 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4615 "# of buffer-group 2 truncated packets");
4616 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4617 "# of buffer-group 3 truncated packets");
4619 #undef SYSCTL_ADD_T4_PORTSTAT
4625 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4630 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4631 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4632 sbuf_printf(&sb, "%d ", *i);
4635 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4641 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4646 rc = sysctl_wire_old_buffer(req, 0);
4650 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4654 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4655 rc = sbuf_finish(sb);
4662 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4664 struct port_info *pi = arg1;
4666 struct adapter *sc = pi->adapter;
4670 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4673 /* XXX: magic numbers */
4674 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4676 end_synchronized_op(sc, 0);
4682 rc = sysctl_handle_int(oidp, &v, 0, req);
4687 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4689 struct port_info *pi = arg1;
4690 struct adapter *sc = pi->adapter;
4692 struct sge_rxq *rxq;
4694 struct sge_ofld_rxq *ofld_rxq;
4700 rc = sysctl_handle_int(oidp, &idx, 0, req);
4701 if (rc != 0 || req->newptr == NULL)
4704 if (idx < 0 || idx >= SGE_NTIMERS)
4707 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4712 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4713 for_each_rxq(pi, i, rxq) {
4714 #ifdef atomic_store_rel_8
4715 atomic_store_rel_8(&rxq->iq.intr_params, v);
4717 rxq->iq.intr_params = v;
4721 for_each_ofld_rxq(pi, i, ofld_rxq) {
4722 #ifdef atomic_store_rel_8
4723 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4725 ofld_rxq->iq.intr_params = v;
4731 end_synchronized_op(sc, LOCK_HELD);
4736 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4738 struct port_info *pi = arg1;
4739 struct adapter *sc = pi->adapter;
4744 rc = sysctl_handle_int(oidp, &idx, 0, req);
4745 if (rc != 0 || req->newptr == NULL)
4748 if (idx < -1 || idx >= SGE_NCOUNTERS)
4751 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4756 if (pi->flags & PORT_INIT_DONE)
4757 rc = EBUSY; /* cannot be changed once the queues are created */
4761 end_synchronized_op(sc, LOCK_HELD);
4766 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4768 struct port_info *pi = arg1;
4769 struct adapter *sc = pi->adapter;
4772 qsize = pi->qsize_rxq;
4774 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4775 if (rc != 0 || req->newptr == NULL)
4778 if (qsize < 128 || (qsize & 7))
4781 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4786 if (pi->flags & PORT_INIT_DONE)
4787 rc = EBUSY; /* cannot be changed once the queues are created */
4789 pi->qsize_rxq = qsize;
4791 end_synchronized_op(sc, LOCK_HELD);
4796 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4798 struct port_info *pi = arg1;
4799 struct adapter *sc = pi->adapter;
4802 qsize = pi->qsize_txq;
4804 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4805 if (rc != 0 || req->newptr == NULL)
4808 /* bufring size must be powerof2 */
4809 if (qsize < 128 || !powerof2(qsize))
4812 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4817 if (pi->flags & PORT_INIT_DONE)
4818 rc = EBUSY; /* cannot be changed once the queues are created */
4820 pi->qsize_txq = qsize;
4822 end_synchronized_op(sc, LOCK_HELD);
4827 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4829 struct adapter *sc = arg1;
4833 val = t4_read_reg64(sc, reg);
4835 return (sysctl_handle_64(oidp, &val, 0, req));
4840 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4842 struct adapter *sc = arg1;
4845 uint16_t incr[NMTUS][NCCTRL_WIN];
4846 static const char *dec_fac[] = {
4847 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4851 rc = sysctl_wire_old_buffer(req, 0);
4855 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4859 t4_read_cong_tbl(sc, incr);
4861 for (i = 0; i < NCCTRL_WIN; ++i) {
4862 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4863 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4864 incr[5][i], incr[6][i], incr[7][i]);
4865 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4866 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4867 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4868 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4871 rc = sbuf_finish(sb);
4877 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4878 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
4879 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4880 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
4884 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4886 struct adapter *sc = arg1;
4888 int rc, i, n, qid = arg2;
4891 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4893 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4894 ("%s: bad qid %d\n", __func__, qid));
4896 if (qid < CIM_NUM_IBQ) {
4899 n = 4 * CIM_IBQ_SIZE;
4900 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4901 rc = t4_read_cim_ibq(sc, qid, buf, n);
4903 /* outbound queue */
4906 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4907 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4908 rc = t4_read_cim_obq(sc, qid, buf, n);
4915 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
4917 rc = sysctl_wire_old_buffer(req, 0);
4921 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4927 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4928 for (i = 0, p = buf; i < n; i += 16, p += 4)
4929 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4932 rc = sbuf_finish(sb);
4940 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4942 struct adapter *sc = arg1;
4948 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4952 rc = sysctl_wire_old_buffer(req, 0);
4956 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4960 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4963 rc = -t4_cim_read_la(sc, buf, NULL);
4967 sbuf_printf(sb, "Status Data PC%s",
4968 cfg & F_UPDBGLACAPTPCONLY ? "" :
4969 " LS0Stat LS0Addr LS0Data");
4971 KASSERT((sc->params.cim_la_size & 7) == 0,
4972 ("%s: p will walk off the end of buf", __func__));
4974 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4975 if (cfg & F_UPDBGLACAPTPCONLY) {
4976 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
4978 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
4979 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4980 p[4] & 0xff, p[5] >> 8);
4981 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
4982 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4983 p[1] & 0xf, p[2] >> 4);
4986 "\n %02x %x%07x %x%07x %08x %08x "
4988 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4989 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4994 rc = sbuf_finish(sb);
5002 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5004 struct adapter *sc = arg1;
5010 rc = sysctl_wire_old_buffer(req, 0);
5014 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5018 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5021 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5024 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5025 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5029 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
5030 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5031 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
5032 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5033 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5034 (p[1] >> 2) | ((p[2] & 3) << 30),
5035 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5039 rc = sbuf_finish(sb);
5046 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5048 struct adapter *sc = arg1;
5054 rc = sysctl_wire_old_buffer(req, 0);
5058 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5062 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5065 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5068 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
5069 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5070 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
5071 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5072 p[4], p[3], p[2], p[1], p[0]);
5075 sbuf_printf(sb, "\n\nCntl ID Data");
5076 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5077 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
5078 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5081 rc = sbuf_finish(sb);
5088 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5090 struct adapter *sc = arg1;
5093 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5094 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5095 uint16_t thres[CIM_NUM_IBQ];
5096 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5097 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5098 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5101 cim_num_obq = CIM_NUM_OBQ;
5102 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5103 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5105 cim_num_obq = CIM_NUM_OBQ_T5;
5106 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5107 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5109 nq = CIM_NUM_IBQ + cim_num_obq;
5111 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5113 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5117 t4_read_cimq_cfg(sc, base, size, thres);
5119 rc = sysctl_wire_old_buffer(req, 0);
5123 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5127 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
5129 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5130 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
5131 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5132 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5133 G_QUEREMFLITS(p[2]) * 16);
5134 for ( ; i < nq; i++, p += 4, wr += 2)
5135 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
5136 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5137 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5138 G_QUEREMFLITS(p[2]) * 16);
5140 rc = sbuf_finish(sb);
5147 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5149 struct adapter *sc = arg1;
5152 struct tp_cpl_stats stats;
5154 rc = sysctl_wire_old_buffer(req, 0);
5158 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5162 t4_tp_get_cpl_stats(sc, &stats);
5164 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
5166 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
5167 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5168 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
5169 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5171 rc = sbuf_finish(sb);
5178 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5180 struct adapter *sc = arg1;
5183 struct tp_usm_stats stats;
5185 rc = sysctl_wire_old_buffer(req, 0);
5189 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5193 t4_get_usm_stats(sc, &stats);
5195 sbuf_printf(sb, "Frames: %u\n", stats.frames);
5196 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5197 sbuf_printf(sb, "Drops: %u", stats.drops);
5199 rc = sbuf_finish(sb);
5205 const char *devlog_level_strings[] = {
5206 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
5207 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
5208 [FW_DEVLOG_LEVEL_ERR] = "ERR",
5209 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
5210 [FW_DEVLOG_LEVEL_INFO] = "INFO",
5211 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
5214 const char *devlog_facility_strings[] = {
5215 [FW_DEVLOG_FACILITY_CORE] = "CORE",
5216 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
5217 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
5218 [FW_DEVLOG_FACILITY_RES] = "RES",
5219 [FW_DEVLOG_FACILITY_HW] = "HW",
5220 [FW_DEVLOG_FACILITY_FLR] = "FLR",
5221 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
5222 [FW_DEVLOG_FACILITY_PHY] = "PHY",
5223 [FW_DEVLOG_FACILITY_MAC] = "MAC",
5224 [FW_DEVLOG_FACILITY_PORT] = "PORT",
5225 [FW_DEVLOG_FACILITY_VI] = "VI",
5226 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
5227 [FW_DEVLOG_FACILITY_ACL] = "ACL",
5228 [FW_DEVLOG_FACILITY_TM] = "TM",
5229 [FW_DEVLOG_FACILITY_QFC] = "QFC",
5230 [FW_DEVLOG_FACILITY_DCB] = "DCB",
5231 [FW_DEVLOG_FACILITY_ETH] = "ETH",
5232 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
5233 [FW_DEVLOG_FACILITY_RI] = "RI",
5234 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
5235 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
5236 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
5237 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
5241 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5243 struct adapter *sc = arg1;
5244 struct devlog_params *dparams = &sc->params.devlog;
5245 struct fw_devlog_e *buf, *e;
5246 int i, j, rc, nentries, first = 0;
5248 uint64_t ftstamp = UINT64_MAX;
5250 if (dparams->start == 0) {
5251 dparams->memtype = 0;
5252 dparams->start = 0x84000;
5253 dparams->size = 32768;
5256 nentries = dparams->size / sizeof(struct fw_devlog_e);
5258 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5262 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5267 for (i = 0; i < nentries; i++) {
5270 if (e->timestamp == 0)
5273 e->timestamp = be64toh(e->timestamp);
5274 e->seqno = be32toh(e->seqno);
5275 for (j = 0; j < 8; j++)
5276 e->params[j] = be32toh(e->params[j]);
5278 if (e->timestamp < ftstamp) {
5279 ftstamp = e->timestamp;
5284 if (buf[first].timestamp == 0)
5285 goto done; /* nothing in the log */
5287 rc = sysctl_wire_old_buffer(req, 0);
5291 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5296 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
5297 "Seq#", "Tstamp", "Level", "Facility", "Message");
5302 if (e->timestamp == 0)
5305 sbuf_printf(sb, "%10d %15ju %8s %8s ",
5306 e->seqno, e->timestamp,
5307 (e->level < nitems(devlog_level_strings) ?
5308 devlog_level_strings[e->level] : "UNKNOWN"),
5309 (e->facility < nitems(devlog_facility_strings) ?
5310 devlog_facility_strings[e->facility] : "UNKNOWN"));
5311 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5312 e->params[2], e->params[3], e->params[4],
5313 e->params[5], e->params[6], e->params[7]);
5315 if (++i == nentries)
5317 } while (i != first);
5319 rc = sbuf_finish(sb);
5327 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5329 struct adapter *sc = arg1;
5332 struct tp_fcoe_stats stats[4];
5334 rc = sysctl_wire_old_buffer(req, 0);
5338 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5342 t4_get_fcoe_stats(sc, 0, &stats[0]);
5343 t4_get_fcoe_stats(sc, 1, &stats[1]);
5344 t4_get_fcoe_stats(sc, 2, &stats[2]);
5345 t4_get_fcoe_stats(sc, 3, &stats[3]);
5347 sbuf_printf(sb, " channel 0 channel 1 "
5348 "channel 2 channel 3\n");
5349 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
5350 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5351 stats[3].octetsDDP);
5352 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
5353 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5354 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5355 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5356 stats[3].framesDrop);
5358 rc = sbuf_finish(sb);
5365 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5367 struct adapter *sc = arg1;
5370 unsigned int map, kbps, ipg, mode;
5371 unsigned int pace_tab[NTX_SCHED];
5373 rc = sysctl_wire_old_buffer(req, 0);
5377 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5381 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5382 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5383 t4_read_pace_tbl(sc, pace_tab);
5385 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
5386 "Class IPG (0.1 ns) Flow IPG (us)");
5388 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5389 t4_get_tx_sched(sc, i, &kbps, &ipg);
5390 sbuf_printf(sb, "\n %u %-5s %u ", i,
5391 (mode & (1 << i)) ? "flow" : "class", map & 3);
5393 sbuf_printf(sb, "%9u ", kbps);
5395 sbuf_printf(sb, " disabled ");
5398 sbuf_printf(sb, "%13u ", ipg);
5400 sbuf_printf(sb, " disabled ");
5403 sbuf_printf(sb, "%10u", pace_tab[i]);
5405 sbuf_printf(sb, " disabled");
5408 rc = sbuf_finish(sb);
5415 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5417 struct adapter *sc = arg1;
5421 struct lb_port_stats s[2];
5422 static const char *stat_name[] = {
5423 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5424 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5425 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5426 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5427 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5428 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5429 "BG2FramesTrunc:", "BG3FramesTrunc:"
5432 rc = sysctl_wire_old_buffer(req, 0);
5436 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5440 memset(s, 0, sizeof(s));
5442 for (i = 0; i < 4; i += 2) {
5443 t4_get_lb_stats(sc, i, &s[0]);
5444 t4_get_lb_stats(sc, i + 1, &s[1]);
5448 sbuf_printf(sb, "%s Loopback %u"
5449 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5451 for (j = 0; j < nitems(stat_name); j++)
5452 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5456 rc = sbuf_finish(sb);
5469 mem_desc_cmp(const void *a, const void *b)
5471 return ((const struct mem_desc *)a)->base -
5472 ((const struct mem_desc *)b)->base;
5476 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5481 size = to - from + 1;
5485 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5486 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5490 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5492 struct adapter *sc = arg1;
5495 uint32_t lo, hi, used, alloc;
5496 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5497 static const char *region[] = {
5498 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5499 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5500 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5501 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5502 "RQUDP region:", "PBL region:", "TXPBL region:",
5503 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5506 struct mem_desc avail[4];
5507 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
5508 struct mem_desc *md = mem;
5510 rc = sysctl_wire_old_buffer(req, 0);
5514 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5518 for (i = 0; i < nitems(mem); i++) {
5523 /* Find and sort the populated memory ranges */
5525 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5526 if (lo & F_EDRAM0_ENABLE) {
5527 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5528 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5529 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5533 if (lo & F_EDRAM1_ENABLE) {
5534 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5535 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5536 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5540 if (lo & F_EXT_MEM_ENABLE) {
5541 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5542 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5543 avail[i].limit = avail[i].base +
5544 (G_EXT_MEM_SIZE(hi) << 20);
5545 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
5548 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5549 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5550 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5551 avail[i].limit = avail[i].base +
5552 (G_EXT_MEM1_SIZE(hi) << 20);
5556 if (!i) /* no memory available */
5558 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5560 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5561 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5562 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5563 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5564 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5565 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5566 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5567 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5568 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5570 /* the next few have explicit upper bounds */
5571 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5572 md->limit = md->base - 1 +
5573 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5574 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5577 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5578 md->limit = md->base - 1 +
5579 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5580 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5583 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5584 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5585 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5586 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5589 md->idx = nitems(region); /* hide it */
5593 #define ulp_region(reg) \
5594 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5595 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5597 ulp_region(RX_ISCSI);
5598 ulp_region(RX_TDDP);
5600 ulp_region(RX_STAG);
5602 ulp_region(RX_RQUDP);
5608 md->idx = nitems(region);
5609 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5610 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5611 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5612 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5616 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5617 md->limit = md->base + sc->tids.ntids - 1;
5619 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5620 md->limit = md->base + sc->tids.ntids - 1;
5623 md->base = sc->vres.ocq.start;
5624 if (sc->vres.ocq.size)
5625 md->limit = md->base + sc->vres.ocq.size - 1;
5627 md->idx = nitems(region); /* hide it */
5630 /* add any address-space holes, there can be up to 3 */
5631 for (n = 0; n < i - 1; n++)
5632 if (avail[n].limit < avail[n + 1].base)
5633 (md++)->base = avail[n].limit;
5635 (md++)->base = avail[n].limit;
5638 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5640 for (lo = 0; lo < i; lo++)
5641 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5642 avail[lo].limit - 1);
5644 sbuf_printf(sb, "\n");
5645 for (i = 0; i < n; i++) {
5646 if (mem[i].idx >= nitems(region))
5647 continue; /* skip holes */
5649 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5650 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5654 sbuf_printf(sb, "\n");
5655 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5656 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5657 mem_region_show(sb, "uP RAM:", lo, hi);
5659 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5660 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5661 mem_region_show(sb, "uP Extmem2:", lo, hi);
5663 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5664 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5666 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5667 (lo & F_PMRXNUMCHN) ? 2 : 1);
5669 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5670 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5671 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5673 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5674 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5675 sbuf_printf(sb, "%u p-structs\n",
5676 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5678 for (i = 0; i < 4; i++) {
5679 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5682 alloc = G_ALLOC(lo);
5684 used = G_T5_USED(lo);
5685 alloc = G_T5_ALLOC(lo);
5687 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5690 for (i = 0; i < 4; i++) {
5691 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5694 alloc = G_ALLOC(lo);
5696 used = G_T5_USED(lo);
5697 alloc = G_T5_ALLOC(lo);
5700 "\nLoopback %d using %u pages out of %u allocated",
5704 rc = sbuf_finish(sb);
5711 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5715 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5719 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5721 struct adapter *sc = arg1;
5725 rc = sysctl_wire_old_buffer(req, 0);
5729 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5734 "Idx Ethernet address Mask Vld Ports PF"
5735 " VF Replication P0 P1 P2 P3 ML");
5736 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5737 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5738 for (i = 0; i < n; i++) {
5739 uint64_t tcamx, tcamy, mask;
5740 uint32_t cls_lo, cls_hi;
5741 uint8_t addr[ETHER_ADDR_LEN];
5743 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5744 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5745 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5746 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5751 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5752 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5753 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
5754 addr[3], addr[4], addr[5], (uintmax_t)mask,
5755 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5756 G_PORTMAP(cls_hi), G_PF(cls_lo),
5757 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5759 if (cls_lo & F_REPLICATE) {
5760 struct fw_ldst_cmd ldst_cmd;
5762 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5763 ldst_cmd.op_to_addrspace =
5764 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5765 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5766 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5767 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5768 ldst_cmd.u.mps.fid_ctl =
5769 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5770 V_FW_LDST_CMD_CTL(i));
5772 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5776 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5777 sizeof(ldst_cmd), &ldst_cmd);
5778 end_synchronized_op(sc, 0);
5782 " ------------ error %3u ------------", rc);
5785 sbuf_printf(sb, " %08x %08x %08x %08x",
5786 be32toh(ldst_cmd.u.mps.rplc127_96),
5787 be32toh(ldst_cmd.u.mps.rplc95_64),
5788 be32toh(ldst_cmd.u.mps.rplc63_32),
5789 be32toh(ldst_cmd.u.mps.rplc31_0));
5792 sbuf_printf(sb, "%36s", "");
5794 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5795 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5796 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5800 (void) sbuf_finish(sb);
5802 rc = sbuf_finish(sb);
5809 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5811 struct adapter *sc = arg1;
5814 uint16_t mtus[NMTUS];
5816 rc = sysctl_wire_old_buffer(req, 0);
5820 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5824 t4_read_mtu_tbl(sc, mtus, NULL);
5826 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5827 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5828 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5829 mtus[14], mtus[15]);
5831 rc = sbuf_finish(sb);
5838 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5840 struct adapter *sc = arg1;
5843 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5844 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5845 static const char *pm_stats[] = {
5846 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5849 rc = sysctl_wire_old_buffer(req, 0);
5853 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5857 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5858 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5860 sbuf_printf(sb, " Tx count Tx cycles "
5861 "Rx count Rx cycles");
5862 for (i = 0; i < PM_NSTATS; i++)
5863 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
5864 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5866 rc = sbuf_finish(sb);
5873 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5875 struct adapter *sc = arg1;
5878 struct tp_rdma_stats stats;
5880 rc = sysctl_wire_old_buffer(req, 0);
5884 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5888 t4_tp_get_rdma_stats(sc, &stats);
5889 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5890 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5892 rc = sbuf_finish(sb);
5899 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5901 struct adapter *sc = arg1;
5904 struct tp_tcp_stats v4, v6;
5906 rc = sysctl_wire_old_buffer(req, 0);
5910 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5914 t4_tp_get_tcp_stats(sc, &v4, &v6);
5917 sbuf_printf(sb, "OutRsts: %20u %20u\n",
5918 v4.tcpOutRsts, v6.tcpOutRsts);
5919 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
5920 v4.tcpInSegs, v6.tcpInSegs);
5921 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
5922 v4.tcpOutSegs, v6.tcpOutSegs);
5923 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
5924 v4.tcpRetransSegs, v6.tcpRetransSegs);
5926 rc = sbuf_finish(sb);
5933 sysctl_tids(SYSCTL_HANDLER_ARGS)
5935 struct adapter *sc = arg1;
5938 struct tid_info *t = &sc->tids;
5940 rc = sysctl_wire_old_buffer(req, 0);
5944 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5949 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5954 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5955 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5958 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5959 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5962 sbuf_printf(sb, "TID range: %u-%u",
5963 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5967 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5968 sbuf_printf(sb, ", in use: %u\n",
5969 atomic_load_acq_int(&t->tids_in_use));
5973 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5974 t->stid_base + t->nstids - 1, t->stids_in_use);
5978 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5979 t->ftid_base + t->nftids - 1);
5982 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5983 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5984 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5986 rc = sbuf_finish(sb);
5993 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5995 struct adapter *sc = arg1;
5998 struct tp_err_stats stats;
6000 rc = sysctl_wire_old_buffer(req, 0);
6004 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6008 t4_tp_get_err_stats(sc, &stats);
6010 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6012 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
6013 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6014 stats.macInErrs[3]);
6015 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
6016 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6017 stats.hdrInErrs[3]);
6018 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
6019 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6020 stats.tcpInErrs[3]);
6021 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
6022 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6023 stats.tcp6InErrs[3]);
6024 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
6025 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6026 stats.tnlCongDrops[3]);
6027 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
6028 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6029 stats.tnlTxDrops[3]);
6030 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
6031 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6032 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6033 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
6034 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6035 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6036 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
6037 stats.ofldNoNeigh, stats.ofldCongDefer);
6039 rc = sbuf_finish(sb);
6052 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6058 uint64_t mask = (1ULL << f->width) - 1;
6059 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6060 ((uintmax_t)v >> f->start) & mask);
6062 if (line_size + len >= 79) {
6064 sbuf_printf(sb, "\n ");
6066 sbuf_printf(sb, "%s ", buf);
6067 line_size += len + 1;
6070 sbuf_printf(sb, "\n");
6073 static struct field_desc tp_la0[] = {
6074 { "RcfOpCodeOut", 60, 4 },
6076 { "WcfState", 52, 4 },
6077 { "RcfOpcSrcOut", 50, 2 },
6078 { "CRxError", 49, 1 },
6079 { "ERxError", 48, 1 },
6080 { "SanityFailed", 47, 1 },
6081 { "SpuriousMsg", 46, 1 },
6082 { "FlushInputMsg", 45, 1 },
6083 { "FlushInputCpl", 44, 1 },
6084 { "RssUpBit", 43, 1 },
6085 { "RssFilterHit", 42, 1 },
6087 { "InitTcb", 31, 1 },
6088 { "LineNumber", 24, 7 },
6090 { "EdataOut", 22, 1 },
6092 { "CdataOut", 20, 1 },
6093 { "EreadPdu", 19, 1 },
6094 { "CreadPdu", 18, 1 },
6095 { "TunnelPkt", 17, 1 },
6096 { "RcfPeerFin", 16, 1 },
6097 { "RcfReasonOut", 12, 4 },
6098 { "TxCchannel", 10, 2 },
6099 { "RcfTxChannel", 8, 2 },
6100 { "RxEchannel", 6, 2 },
6101 { "RcfRxChannel", 5, 1 },
6102 { "RcfDataOutSrdy", 4, 1 },
6104 { "RxOoDvld", 2, 1 },
6105 { "RxCongestion", 1, 1 },
6106 { "TxCongestion", 0, 1 },
6110 static struct field_desc tp_la1[] = {
6111 { "CplCmdIn", 56, 8 },
6112 { "CplCmdOut", 48, 8 },
6113 { "ESynOut", 47, 1 },
6114 { "EAckOut", 46, 1 },
6115 { "EFinOut", 45, 1 },
6116 { "ERstOut", 44, 1 },
6121 { "DataIn", 39, 1 },
6122 { "DataInVld", 38, 1 },
6124 { "RxBufEmpty", 36, 1 },
6126 { "RxFbCongestion", 34, 1 },
6127 { "TxFbCongestion", 33, 1 },
6128 { "TxPktSumSrdy", 32, 1 },
6129 { "RcfUlpType", 28, 4 },
6131 { "Ebypass", 26, 1 },
6133 { "Static0", 24, 1 },
6135 { "Cbypass", 22, 1 },
6137 { "CPktOut", 20, 1 },
6138 { "RxPagePoolFull", 18, 2 },
6139 { "RxLpbkPkt", 17, 1 },
6140 { "TxLpbkPkt", 16, 1 },
6141 { "RxVfValid", 15, 1 },
6142 { "SynLearned", 14, 1 },
6143 { "SetDelEntry", 13, 1 },
6144 { "SetInvEntry", 12, 1 },
6145 { "CpcmdDvld", 11, 1 },
6146 { "CpcmdSave", 10, 1 },
6147 { "RxPstructsFull", 8, 2 },
6148 { "EpcmdDvld", 7, 1 },
6149 { "EpcmdFlush", 6, 1 },
6150 { "EpcmdTrimPrefix", 5, 1 },
6151 { "EpcmdTrimPostfix", 4, 1 },
6152 { "ERssIp4Pkt", 3, 1 },
6153 { "ERssIp6Pkt", 2, 1 },
6154 { "ERssTcpUdpPkt", 1, 1 },
6155 { "ERssFceFipPkt", 0, 1 },
6159 static struct field_desc tp_la2[] = {
6160 { "CplCmdIn", 56, 8 },
6161 { "MpsVfVld", 55, 1 },
6168 { "DataIn", 39, 1 },
6169 { "DataInVld", 38, 1 },
6171 { "RxBufEmpty", 36, 1 },
6173 { "RxFbCongestion", 34, 1 },
6174 { "TxFbCongestion", 33, 1 },
6175 { "TxPktSumSrdy", 32, 1 },
6176 { "RcfUlpType", 28, 4 },
6178 { "Ebypass", 26, 1 },
6180 { "Static0", 24, 1 },
6182 { "Cbypass", 22, 1 },
6184 { "CPktOut", 20, 1 },
6185 { "RxPagePoolFull", 18, 2 },
6186 { "RxLpbkPkt", 17, 1 },
6187 { "TxLpbkPkt", 16, 1 },
6188 { "RxVfValid", 15, 1 },
6189 { "SynLearned", 14, 1 },
6190 { "SetDelEntry", 13, 1 },
6191 { "SetInvEntry", 12, 1 },
6192 { "CpcmdDvld", 11, 1 },
6193 { "CpcmdSave", 10, 1 },
6194 { "RxPstructsFull", 8, 2 },
6195 { "EpcmdDvld", 7, 1 },
6196 { "EpcmdFlush", 6, 1 },
6197 { "EpcmdTrimPrefix", 5, 1 },
6198 { "EpcmdTrimPostfix", 4, 1 },
6199 { "ERssIp4Pkt", 3, 1 },
6200 { "ERssIp6Pkt", 2, 1 },
6201 { "ERssTcpUdpPkt", 1, 1 },
6202 { "ERssFceFipPkt", 0, 1 },
6207 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6210 field_desc_show(sb, *p, tp_la0);
6214 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6218 sbuf_printf(sb, "\n");
6219 field_desc_show(sb, p[0], tp_la0);
6220 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6221 field_desc_show(sb, p[1], tp_la0);
6225 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6229 sbuf_printf(sb, "\n");
6230 field_desc_show(sb, p[0], tp_la0);
6231 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6232 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6236 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6238 struct adapter *sc = arg1;
6243 void (*show_func)(struct sbuf *, uint64_t *, int);
6245 rc = sysctl_wire_old_buffer(req, 0);
6249 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6253 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6255 t4_tp_read_la(sc, buf, NULL);
6258 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6261 show_func = tp_la_show2;
6265 show_func = tp_la_show3;
6269 show_func = tp_la_show;
6272 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6273 (*show_func)(sb, p, i);
6275 rc = sbuf_finish(sb);
6282 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6284 struct adapter *sc = arg1;
6287 u64 nrate[NCHAN], orate[NCHAN];
6289 rc = sysctl_wire_old_buffer(req, 0);
6293 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6297 t4_get_chan_txrate(sc, nrate, orate);
6298 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6300 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
6301 nrate[0], nrate[1], nrate[2], nrate[3]);
6302 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
6303 orate[0], orate[1], orate[2], orate[3]);
6305 rc = sbuf_finish(sb);
6312 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6314 struct adapter *sc = arg1;
6319 rc = sysctl_wire_old_buffer(req, 0);
6323 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6327 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6330 t4_ulprx_read_la(sc, buf);
6333 sbuf_printf(sb, " Pcmd Type Message"
6335 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6336 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
6337 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6340 rc = sbuf_finish(sb);
6347 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6349 struct adapter *sc = arg1;
6353 rc = sysctl_wire_old_buffer(req, 0);
6357 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6361 v = t4_read_reg(sc, A_SGE_STAT_CFG);
6362 if (G_STATSOURCE_T5(v) == 7) {
6363 if (G_STATMODE(v) == 0) {
6364 sbuf_printf(sb, "total %d, incomplete %d",
6365 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6366 t4_read_reg(sc, A_SGE_STAT_MATCH));
6367 } else if (G_STATMODE(v) == 1) {
6368 sbuf_printf(sb, "total %d, data overflow %d",
6369 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6370 t4_read_reg(sc, A_SGE_STAT_MATCH));
6373 rc = sbuf_finish(sb);
6381 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6383 struct buf_ring *br;
6386 TXQ_LOCK_ASSERT_OWNED(txq);
6389 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6391 t4_eth_tx(ifp, txq, m);
6395 t4_tx_callout(void *arg)
6397 struct sge_eq *eq = arg;
6400 if (EQ_TRYLOCK(eq) == 0)
6403 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6406 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6407 callout_schedule(&eq->tx_callout, 1);
6411 EQ_LOCK_ASSERT_OWNED(eq);
6413 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6415 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6416 struct sge_txq *txq = arg;
6417 struct port_info *pi = txq->ifp->if_softc;
6421 struct sge_wrq *wrq = arg;
6426 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6433 t4_tx_task(void *arg, int count)
6435 struct sge_eq *eq = arg;
6438 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6439 struct sge_txq *txq = arg;
6440 txq_start(txq->ifp, txq);
6442 struct sge_wrq *wrq = arg;
6443 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6449 fconf_to_mode(uint32_t fconf)
6453 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6454 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6456 if (fconf & F_FRAGMENTATION)
6457 mode |= T4_FILTER_IP_FRAGMENT;
6459 if (fconf & F_MPSHITTYPE)
6460 mode |= T4_FILTER_MPS_HIT_TYPE;
6462 if (fconf & F_MACMATCH)
6463 mode |= T4_FILTER_MAC_IDX;
6465 if (fconf & F_ETHERTYPE)
6466 mode |= T4_FILTER_ETH_TYPE;
6468 if (fconf & F_PROTOCOL)
6469 mode |= T4_FILTER_IP_PROTO;
6472 mode |= T4_FILTER_IP_TOS;
6475 mode |= T4_FILTER_VLAN;
6477 if (fconf & F_VNIC_ID)
6478 mode |= T4_FILTER_VNIC;
6481 mode |= T4_FILTER_PORT;
6484 mode |= T4_FILTER_FCoE;
6490 mode_to_fconf(uint32_t mode)
6494 if (mode & T4_FILTER_IP_FRAGMENT)
6495 fconf |= F_FRAGMENTATION;
6497 if (mode & T4_FILTER_MPS_HIT_TYPE)
6498 fconf |= F_MPSHITTYPE;
6500 if (mode & T4_FILTER_MAC_IDX)
6501 fconf |= F_MACMATCH;
6503 if (mode & T4_FILTER_ETH_TYPE)
6504 fconf |= F_ETHERTYPE;
6506 if (mode & T4_FILTER_IP_PROTO)
6507 fconf |= F_PROTOCOL;
6509 if (mode & T4_FILTER_IP_TOS)
6512 if (mode & T4_FILTER_VLAN)
6515 if (mode & T4_FILTER_VNIC)
6518 if (mode & T4_FILTER_PORT)
6521 if (mode & T4_FILTER_FCoE)
6528 fspec_to_fconf(struct t4_filter_specification *fs)
6532 if (fs->val.frag || fs->mask.frag)
6533 fconf |= F_FRAGMENTATION;
6535 if (fs->val.matchtype || fs->mask.matchtype)
6536 fconf |= F_MPSHITTYPE;
6538 if (fs->val.macidx || fs->mask.macidx)
6539 fconf |= F_MACMATCH;
6541 if (fs->val.ethtype || fs->mask.ethtype)
6542 fconf |= F_ETHERTYPE;
6544 if (fs->val.proto || fs->mask.proto)
6545 fconf |= F_PROTOCOL;
6547 if (fs->val.tos || fs->mask.tos)
6550 if (fs->val.vlan_vld || fs->mask.vlan_vld)
6553 if (fs->val.vnic_vld || fs->mask.vnic_vld)
6556 if (fs->val.iport || fs->mask.iport)
6559 if (fs->val.fcoe || fs->mask.fcoe)
6566 get_filter_mode(struct adapter *sc, uint32_t *mode)
6571 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6576 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6579 if (sc->params.tp.vlan_pri_map != fconf) {
6580 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6581 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6583 sc->params.tp.vlan_pri_map = fconf;
6586 *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6588 end_synchronized_op(sc, LOCK_HELD);
6593 set_filter_mode(struct adapter *sc, uint32_t mode)
6598 fconf = mode_to_fconf(mode);
6600 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6605 if (sc->tids.ftids_in_use > 0) {
6611 if (sc->offload_map) {
6618 rc = -t4_set_filter_mode(sc, fconf);
6620 sc->filter_mode = fconf;
6626 end_synchronized_op(sc, LOCK_HELD);
6630 static inline uint64_t
6631 get_filter_hits(struct adapter *sc, uint32_t fid)
6633 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6636 memwin_info(sc, 0, &mw_base, NULL);
6637 off = position_memwin(sc, 0,
6638 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6640 hits = t4_read_reg64(sc, mw_base + off + 16);
6641 hits = be64toh(hits);
6643 hits = t4_read_reg(sc, mw_base + off + 24);
6644 hits = be32toh(hits);
6651 get_filter(struct adapter *sc, struct t4_filter *t)
6653 int i, rc, nfilters = sc->tids.nftids;
6654 struct filter_entry *f;
6656 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6661 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6662 t->idx >= nfilters) {
6663 t->idx = 0xffffffff;
6667 f = &sc->tids.ftid_tab[t->idx];
6668 for (i = t->idx; i < nfilters; i++, f++) {
6671 t->l2tidx = f->l2t ? f->l2t->idx : 0;
6672 t->smtidx = f->smtidx;
6674 t->hits = get_filter_hits(sc, t->idx);
6676 t->hits = UINT64_MAX;
6683 t->idx = 0xffffffff;
6685 end_synchronized_op(sc, LOCK_HELD);
6690 set_filter(struct adapter *sc, struct t4_filter *t)
6692 unsigned int nfilters, nports;
6693 struct filter_entry *f;
6696 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6700 nfilters = sc->tids.nftids;
6701 nports = sc->params.nports;
6703 if (nfilters == 0) {
6708 if (!(sc->flags & FULL_INIT_DONE)) {
6713 if (t->idx >= nfilters) {
6718 /* Validate against the global filter mode */
6719 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6720 sc->params.tp.vlan_pri_map) {
6725 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6730 if (t->fs.val.iport >= nports) {
6735 /* Can't specify an iq if not steering to it */
6736 if (!t->fs.dirsteer && t->fs.iq) {
6741 /* IPv6 filter idx must be 4 aligned */
6742 if (t->fs.type == 1 &&
6743 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6748 if (sc->tids.ftid_tab == NULL) {
6749 KASSERT(sc->tids.ftids_in_use == 0,
6750 ("%s: no memory allocated but filters_in_use > 0",
6753 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6754 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6755 if (sc->tids.ftid_tab == NULL) {
6759 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6762 for (i = 0; i < 4; i++) {
6763 f = &sc->tids.ftid_tab[t->idx + i];
6765 if (f->pending || f->valid) {
6774 if (t->fs.type == 0)
6778 f = &sc->tids.ftid_tab[t->idx];
6781 rc = set_filter_wr(sc, t->idx);
6783 end_synchronized_op(sc, 0);
6786 mtx_lock(&sc->tids.ftid_lock);
6788 if (f->pending == 0) {
6789 rc = f->valid ? 0 : EIO;
6793 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6794 PCATCH, "t4setfw", 0)) {
6799 mtx_unlock(&sc->tids.ftid_lock);
6805 del_filter(struct adapter *sc, struct t4_filter *t)
6807 unsigned int nfilters;
6808 struct filter_entry *f;
6811 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6815 nfilters = sc->tids.nftids;
6817 if (nfilters == 0) {
6822 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6823 t->idx >= nfilters) {
6828 if (!(sc->flags & FULL_INIT_DONE)) {
6833 f = &sc->tids.ftid_tab[t->idx];
6845 t->fs = f->fs; /* extra info for the caller */
6846 rc = del_filter_wr(sc, t->idx);
6850 end_synchronized_op(sc, 0);
6853 mtx_lock(&sc->tids.ftid_lock);
6855 if (f->pending == 0) {
6856 rc = f->valid ? EIO : 0;
6860 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6861 PCATCH, "t4delfw", 0)) {
6866 mtx_unlock(&sc->tids.ftid_lock);
6873 clear_filter(struct filter_entry *f)
6876 t4_l2t_release(f->l2t);
6878 bzero(f, sizeof (*f));
6882 set_filter_wr(struct adapter *sc, int fidx)
6884 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6886 struct fw_filter_wr *fwr;
6889 ASSERT_SYNCHRONIZED_OP(sc);
6891 if (f->fs.newdmac || f->fs.newvlan) {
6892 /* This filter needs an L2T entry; allocate one. */
6893 f->l2t = t4_l2t_alloc_switching(sc->l2t);
6896 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6898 t4_l2t_release(f->l2t);
6904 ftid = sc->tids.ftid_base + fidx;
6906 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6911 bzero(fwr, sizeof (*fwr));
6913 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6914 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6916 htobe32(V_FW_FILTER_WR_TID(ftid) |
6917 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6918 V_FW_FILTER_WR_NOREPLY(0) |
6919 V_FW_FILTER_WR_IQ(f->fs.iq));
6920 fwr->del_filter_to_l2tix =
6921 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6922 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6923 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6924 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6925 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6926 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6927 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6928 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6929 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6930 f->fs.newvlan == VLAN_REWRITE) |
6931 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6932 f->fs.newvlan == VLAN_REWRITE) |
6933 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6934 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6935 V_FW_FILTER_WR_PRIO(f->fs.prio) |
6936 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6937 fwr->ethtype = htobe16(f->fs.val.ethtype);
6938 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6939 fwr->frag_to_ovlan_vldm =
6940 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6941 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6942 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6943 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6944 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6945 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6947 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6948 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6949 fwr->maci_to_matchtypem =
6950 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6951 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6952 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6953 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6954 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6955 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6956 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6957 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6958 fwr->ptcl = f->fs.val.proto;
6959 fwr->ptclm = f->fs.mask.proto;
6960 fwr->ttyp = f->fs.val.tos;
6961 fwr->ttypm = f->fs.mask.tos;
6962 fwr->ivlan = htobe16(f->fs.val.vlan);
6963 fwr->ivlanm = htobe16(f->fs.mask.vlan);
6964 fwr->ovlan = htobe16(f->fs.val.vnic);
6965 fwr->ovlanm = htobe16(f->fs.mask.vnic);
6966 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6967 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6968 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6969 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6970 fwr->lp = htobe16(f->fs.val.dport);
6971 fwr->lpm = htobe16(f->fs.mask.dport);
6972 fwr->fp = htobe16(f->fs.val.sport);
6973 fwr->fpm = htobe16(f->fs.mask.sport);
6975 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6978 sc->tids.ftids_in_use++;
6985 del_filter_wr(struct adapter *sc, int fidx)
6987 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6989 struct fw_filter_wr *fwr;
6992 ftid = sc->tids.ftid_base + fidx;
6994 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6998 bzero(fwr, sizeof (*fwr));
7000 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7008 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7010 struct adapter *sc = iq->adapter;
7011 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7012 unsigned int idx = GET_TID(rpl);
7014 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7017 if (idx >= sc->tids.ftid_base &&
7018 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7019 unsigned int rc = G_COOKIE(rpl->cookie);
7020 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7022 mtx_lock(&sc->tids.ftid_lock);
7023 if (rc == FW_FILTER_WR_FLT_ADDED) {
7024 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7026 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7027 f->pending = 0; /* asynchronous setup completed */
7030 if (rc != FW_FILTER_WR_FLT_DELETED) {
7031 /* Add or delete failed, display an error */
7033 "filter %u setup failed with error %u\n",
7038 sc->tids.ftids_in_use--;
7040 wakeup(&sc->tids.ftid_tab);
7041 mtx_unlock(&sc->tids.ftid_lock);
7048 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7052 if (cntxt->cid > M_CTXTQID)
7055 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7056 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7059 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7063 if (sc->flags & FW_OK) {
7064 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7071 * Read via firmware failed or wasn't even attempted. Read directly via
7074 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7076 end_synchronized_op(sc, 0);
7081 load_fw(struct adapter *sc, struct t4_data *fw)
7086 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7090 if (sc->flags & FULL_INIT_DONE) {
7095 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7096 if (fw_data == NULL) {
7101 rc = copyin(fw->data, fw_data, fw->len);
7103 rc = -t4_load_fw(sc, fw_data, fw->len);
7105 free(fw_data, M_CXGBE);
7107 end_synchronized_op(sc, 0);
7112 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7114 uint32_t addr, off, remaining, i, n;
7116 uint32_t mw_base, mw_aperture;
7120 rc = validate_mem_range(sc, mr->addr, mr->len);
7124 memwin_info(sc, win, &mw_base, &mw_aperture);
7125 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7127 remaining = mr->len;
7128 dst = (void *)mr->data;
7131 off = position_memwin(sc, win, addr);
7133 /* number of bytes that we'll copy in the inner loop */
7134 n = min(remaining, mw_aperture - off);
7135 for (i = 0; i < n; i += 4)
7136 *b++ = t4_read_reg(sc, mw_base + off + i);
7138 rc = copyout(buf, dst, n);
7153 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7157 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7160 if (i2cd->len > 1) {
7161 /* XXX: need fw support for longer reads in one go */
7165 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7168 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7169 i2cd->offset, &i2cd->data[0]);
7170 end_synchronized_op(sc, 0);
7176 t4_os_find_pci_capability(struct adapter *sc, int cap)
7180 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7184 t4_os_pci_save_state(struct adapter *sc)
7187 struct pci_devinfo *dinfo;
7190 dinfo = device_get_ivars(dev);
7192 pci_cfg_save(dev, dinfo, 0);
7197 t4_os_pci_restore_state(struct adapter *sc)
7200 struct pci_devinfo *dinfo;
7203 dinfo = device_get_ivars(dev);
7205 pci_cfg_restore(dev, dinfo);
7210 t4_os_portmod_changed(const struct adapter *sc, int idx)
7212 struct port_info *pi = sc->port[idx];
7213 static const char *mod_str[] = {
7214 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7217 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7218 if_printf(pi->ifp, "transceiver unplugged.\n");
7219 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7220 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7221 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7222 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7223 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7224 if_printf(pi->ifp, "%s transceiver inserted.\n",
7225 mod_str[pi->mod_type]);
7227 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7233 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7235 struct port_info *pi = sc->port[idx];
7236 struct ifnet *ifp = pi->ifp;
7240 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7241 if_link_state_change(ifp, LINK_STATE_UP);
7244 pi->linkdnrc = reason;
7245 if_link_state_change(ifp, LINK_STATE_DOWN);
7250 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7254 mtx_lock(&t4_list_lock);
7255 SLIST_FOREACH(sc, &t4_list, link) {
7257 * func should not make any assumptions about what state sc is
7258 * in - the only guarantee is that sc->sc_lock is a valid lock.
7262 mtx_unlock(&t4_list_lock);
7266 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7272 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7278 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7282 struct adapter *sc = dev->si_drv1;
7284 rc = priv_check(td, PRIV_DRIVER);
7289 case CHELSIO_T4_GETREG: {
7290 struct t4_reg *edata = (struct t4_reg *)data;
7292 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7295 if (edata->size == 4)
7296 edata->val = t4_read_reg(sc, edata->addr);
7297 else if (edata->size == 8)
7298 edata->val = t4_read_reg64(sc, edata->addr);
7304 case CHELSIO_T4_SETREG: {
7305 struct t4_reg *edata = (struct t4_reg *)data;
7307 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7310 if (edata->size == 4) {
7311 if (edata->val & 0xffffffff00000000)
7313 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7314 } else if (edata->size == 8)
7315 t4_write_reg64(sc, edata->addr, edata->val);
7320 case CHELSIO_T4_REGDUMP: {
7321 struct t4_regdump *regs = (struct t4_regdump *)data;
7322 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7325 if (regs->len < reglen) {
7326 regs->len = reglen; /* hint to the caller */
7331 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7332 t4_get_regs(sc, regs, buf);
7333 rc = copyout(buf, regs->data, reglen);
7337 case CHELSIO_T4_GET_FILTER_MODE:
7338 rc = get_filter_mode(sc, (uint32_t *)data);
7340 case CHELSIO_T4_SET_FILTER_MODE:
7341 rc = set_filter_mode(sc, *(uint32_t *)data);
7343 case CHELSIO_T4_GET_FILTER:
7344 rc = get_filter(sc, (struct t4_filter *)data);
7346 case CHELSIO_T4_SET_FILTER:
7347 rc = set_filter(sc, (struct t4_filter *)data);
7349 case CHELSIO_T4_DEL_FILTER:
7350 rc = del_filter(sc, (struct t4_filter *)data);
7352 case CHELSIO_T4_GET_SGE_CONTEXT:
7353 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7355 case CHELSIO_T4_LOAD_FW:
7356 rc = load_fw(sc, (struct t4_data *)data);
7358 case CHELSIO_T4_GET_MEM:
7359 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7361 case CHELSIO_T4_GET_I2C:
7362 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7364 case CHELSIO_T4_CLEAR_STATS: {
7366 u_int port_id = *(uint32_t *)data;
7367 struct port_info *pi;
7369 if (port_id >= sc->params.nports)
7373 t4_clr_port_stats(sc, port_id);
7375 pi = sc->port[port_id];
7376 if (pi->flags & PORT_INIT_DONE) {
7377 struct sge_rxq *rxq;
7378 struct sge_txq *txq;
7379 struct sge_wrq *wrq;
7381 for_each_rxq(pi, i, rxq) {
7382 #if defined(INET) || defined(INET6)
7383 rxq->lro.lro_queued = 0;
7384 rxq->lro.lro_flushed = 0;
7387 rxq->vlan_extraction = 0;
7390 for_each_txq(pi, i, txq) {
7393 txq->vlan_insertion = 0;
7397 txq->txpkts_wrs = 0;
7398 txq->txpkts_pkts = 0;
7399 txq->br->br_drops = 0;
7405 /* nothing to clear for each ofld_rxq */
7407 for_each_ofld_txq(pi, i, wrq) {
7412 wrq = &sc->sge.ctrlq[pi->port_id];
7427 toe_capability(struct port_info *pi, int enable)
7430 struct adapter *sc = pi->adapter;
7432 ASSERT_SYNCHRONIZED_OP(sc);
7434 if (!is_offload(sc))
7438 if (!(sc->flags & FULL_INIT_DONE)) {
7439 rc = cxgbe_init_synchronized(pi);
7444 if (isset(&sc->offload_map, pi->port_id))
7447 if (!(sc->flags & TOM_INIT_DONE)) {
7448 rc = t4_activate_uld(sc, ULD_TOM);
7451 "You must kldload t4_tom.ko before trying "
7452 "to enable TOE on a cxgbe interface.\n");
7456 KASSERT(sc->tom_softc != NULL,
7457 ("%s: TOM activated but softc NULL", __func__));
7458 KASSERT(sc->flags & TOM_INIT_DONE,
7459 ("%s: TOM activated but flag not set", __func__));
7462 setbit(&sc->offload_map, pi->port_id);
7464 if (!isset(&sc->offload_map, pi->port_id))
7467 KASSERT(sc->flags & TOM_INIT_DONE,
7468 ("%s: TOM never initialized?", __func__));
7469 clrbit(&sc->offload_map, pi->port_id);
7476 * Add an upper layer driver to the global list.
7479 t4_register_uld(struct uld_info *ui)
7484 mtx_lock(&t4_uld_list_lock);
7485 SLIST_FOREACH(u, &t4_uld_list, link) {
7486 if (u->uld_id == ui->uld_id) {
7492 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7495 mtx_unlock(&t4_uld_list_lock);
7500 t4_unregister_uld(struct uld_info *ui)
7505 mtx_lock(&t4_uld_list_lock);
7507 SLIST_FOREACH(u, &t4_uld_list, link) {
7509 if (ui->refcount > 0) {
7514 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7520 mtx_unlock(&t4_uld_list_lock);
7525 t4_activate_uld(struct adapter *sc, int id)
7528 struct uld_info *ui;
7530 ASSERT_SYNCHRONIZED_OP(sc);
7532 mtx_lock(&t4_uld_list_lock);
7534 SLIST_FOREACH(ui, &t4_uld_list, link) {
7535 if (ui->uld_id == id) {
7536 rc = ui->activate(sc);
7543 mtx_unlock(&t4_uld_list_lock);
7549 t4_deactivate_uld(struct adapter *sc, int id)
7552 struct uld_info *ui;
7554 ASSERT_SYNCHRONIZED_OP(sc);
7556 mtx_lock(&t4_uld_list_lock);
7558 SLIST_FOREACH(ui, &t4_uld_list, link) {
7559 if (ui->uld_id == id) {
7560 rc = ui->deactivate(sc);
7567 mtx_unlock(&t4_uld_list_lock);
7574 * Come up with reasonable defaults for some of the tunables, provided they're
7575 * not set by the user (in which case we'll use the values as is).
7578 tweak_tunables(void)
7580 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
7583 t4_ntxq10g = min(nc, NTXQ_10G);
7586 t4_ntxq1g = min(nc, NTXQ_1G);
7589 t4_nrxq10g = min(nc, NRXQ_10G);
7592 t4_nrxq1g = min(nc, NRXQ_1G);
7595 if (t4_nofldtxq10g < 1)
7596 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7598 if (t4_nofldtxq1g < 1)
7599 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7601 if (t4_nofldrxq10g < 1)
7602 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7604 if (t4_nofldrxq1g < 1)
7605 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7607 if (t4_toecaps_allowed == -1)
7608 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7610 if (t4_toecaps_allowed == -1)
7611 t4_toecaps_allowed = 0;
7614 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7615 t4_tmr_idx_10g = TMR_IDX_10G;
7617 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7618 t4_pktc_idx_10g = PKTC_IDX_10G;
7620 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7621 t4_tmr_idx_1g = TMR_IDX_1G;
7623 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7624 t4_pktc_idx_1g = PKTC_IDX_1G;
7626 if (t4_qsize_txq < 128)
7629 if (t4_qsize_rxq < 128)
7631 while (t4_qsize_rxq & 7)
7634 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7638 mod_event(module_t mod, int cmd, void *arg)
7641 static int loaded = 0;
7645 if (atomic_fetchadd_int(&loaded, 1))
7648 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7649 SLIST_INIT(&t4_list);
7651 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7652 SLIST_INIT(&t4_uld_list);
7658 if (atomic_fetchadd_int(&loaded, -1) > 1)
7661 mtx_lock(&t4_uld_list_lock);
7662 if (!SLIST_EMPTY(&t4_uld_list)) {
7664 mtx_unlock(&t4_uld_list_lock);
7667 mtx_unlock(&t4_uld_list_lock);
7668 mtx_destroy(&t4_uld_list_lock);
7670 mtx_lock(&t4_list_lock);
7671 if (!SLIST_EMPTY(&t4_list)) {
7673 mtx_unlock(&t4_list_lock);
7676 mtx_unlock(&t4_list_lock);
7677 mtx_destroy(&t4_list_lock);
7684 static devclass_t t4_devclass, t5_devclass;
7685 static devclass_t cxgbe_devclass, cxl_devclass;
7687 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7688 MODULE_VERSION(t4nex, 1);
7689 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7691 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7692 MODULE_VERSION(t5nex, 1);
7693 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7695 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7696 MODULE_VERSION(cxgbe, 1);
7698 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7699 MODULE_VERSION(cxl, 1);