2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
35 #include <sys/param.h>
38 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/taskqueue.h>
44 #include <sys/pciio.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pci_private.h>
48 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <net/ethernet.h>
56 #include <net/if_types.h>
57 #include <net/if_dl.h>
58 #include <net/if_vlan_var.h>
60 #include <net/rss_config.h>
62 #if defined(__i386__) || defined(__amd64__)
67 #include "common/common.h"
68 #include "common/t4_msg.h"
69 #include "common/t4_regs.h"
70 #include "common/t4_regs_values.h"
73 #include "t4_mp_ring.h"
75 /* T4 bus driver interface */
76 static int t4_probe(device_t);
77 static int t4_attach(device_t);
78 static int t4_detach(device_t);
79 static device_method_t t4_methods[] = {
80 DEVMETHOD(device_probe, t4_probe),
81 DEVMETHOD(device_attach, t4_attach),
82 DEVMETHOD(device_detach, t4_detach),
86 static driver_t t4_driver = {
89 sizeof(struct adapter)
93 /* T4 port (cxgbe) interface */
94 static int cxgbe_probe(device_t);
95 static int cxgbe_attach(device_t);
96 static int cxgbe_detach(device_t);
97 static device_method_t cxgbe_methods[] = {
98 DEVMETHOD(device_probe, cxgbe_probe),
99 DEVMETHOD(device_attach, cxgbe_attach),
100 DEVMETHOD(device_detach, cxgbe_detach),
103 static driver_t cxgbe_driver = {
106 sizeof(struct port_info)
109 /* T4 VI (vcxgbe) interface */
110 static int vcxgbe_probe(device_t);
111 static int vcxgbe_attach(device_t);
112 static int vcxgbe_detach(device_t);
113 static device_method_t vcxgbe_methods[] = {
114 DEVMETHOD(device_probe, vcxgbe_probe),
115 DEVMETHOD(device_attach, vcxgbe_attach),
116 DEVMETHOD(device_detach, vcxgbe_detach),
119 static driver_t vcxgbe_driver = {
122 sizeof(struct vi_info)
125 static d_ioctl_t t4_ioctl;
126 static d_open_t t4_open;
127 static d_close_t t4_close;
129 static struct cdevsw t4_cdevsw = {
130 .d_version = D_VERSION,
138 /* T5 bus driver interface */
139 static int t5_probe(device_t);
140 static device_method_t t5_methods[] = {
141 DEVMETHOD(device_probe, t5_probe),
142 DEVMETHOD(device_attach, t4_attach),
143 DEVMETHOD(device_detach, t4_detach),
147 static driver_t t5_driver = {
150 sizeof(struct adapter)
154 /* T5 port (cxl) interface */
155 static driver_t cxl_driver = {
158 sizeof(struct port_info)
161 /* T5 VI (vcxl) interface */
162 static driver_t vcxl_driver = {
165 sizeof(struct vi_info)
168 static struct cdevsw t5_cdevsw = {
169 .d_version = D_VERSION,
177 /* ifnet + media interface */
178 static void cxgbe_init(void *);
179 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
180 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
181 static void cxgbe_qflush(struct ifnet *);
182 static int cxgbe_media_change(struct ifnet *);
183 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
185 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
188 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
189 * then ADAPTER_LOCK, then t4_uld_list_lock.
191 static struct sx t4_list_lock;
192 SLIST_HEAD(, adapter) t4_list;
194 static struct sx t4_uld_list_lock;
195 SLIST_HEAD(, uld_info) t4_uld_list;
199 * Tunables. See tweak_tunables() too.
201 * Each tunable is set to a default value here if it's known at compile-time.
202 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
203 * provide a reasonable default when the driver is loaded.
205 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
206 * T5 are under hw.cxl.
210 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
213 static int t4_ntxq10g = -1;
214 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
217 static int t4_nrxq10g = -1;
218 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
221 static int t4_ntxq1g = -1;
222 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
225 static int t4_nrxq1g = -1;
226 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
228 static int t4_rsrv_noflowq = 0;
229 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
232 #define NOFLDTXQ_10G 8
233 static int t4_nofldtxq10g = -1;
234 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
236 #define NOFLDRXQ_10G 2
237 static int t4_nofldrxq10g = -1;
238 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
240 #define NOFLDTXQ_1G 2
241 static int t4_nofldtxq1g = -1;
242 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
244 #define NOFLDRXQ_1G 1
245 static int t4_nofldrxq1g = -1;
246 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
251 static int t4_nnmtxq10g = -1;
252 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
255 static int t4_nnmrxq10g = -1;
256 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
259 static int t4_nnmtxq1g = -1;
260 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
263 static int t4_nnmrxq1g = -1;
264 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
268 * Holdoff parameters for 10G and 1G ports.
270 #define TMR_IDX_10G 1
271 static int t4_tmr_idx_10g = TMR_IDX_10G;
272 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
274 #define PKTC_IDX_10G (-1)
275 static int t4_pktc_idx_10g = PKTC_IDX_10G;
276 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
279 static int t4_tmr_idx_1g = TMR_IDX_1G;
280 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
282 #define PKTC_IDX_1G (-1)
283 static int t4_pktc_idx_1g = PKTC_IDX_1G;
284 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
287 * Size (# of entries) of each tx and rx queue.
289 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
290 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
292 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
293 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
296 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
298 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
299 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
302 * Configuration file.
304 #define DEFAULT_CF "default"
305 #define FLASH_CF "flash"
306 #define UWIRE_CF "uwire"
307 #define FPGA_CF "fpga"
308 static char t4_cfg_file[32] = DEFAULT_CF;
309 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
312 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
313 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
314 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
315 * mark or when signalled to do so, 0 to never emit PAUSE.
317 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
318 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
321 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
322 * encouraged respectively).
324 static unsigned int t4_fw_install = 1;
325 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
328 * ASIC features that will be used. Disable the ones you don't want so that the
329 * chip resources aren't wasted on features that will not be used.
331 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
332 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
334 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
335 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
337 static int t4_toecaps_allowed = -1;
338 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
340 static int t4_rdmacaps_allowed = 0;
341 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
343 static int t4_iscsicaps_allowed = 0;
344 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
346 static int t4_fcoecaps_allowed = 0;
347 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
349 static int t5_write_combine = 0;
350 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
352 static int t4_num_vis = 1;
353 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
355 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
356 static int vi_mac_funcs[] = {
359 FW_VI_FUNC_OPENISCSI,
365 struct intrs_and_queues {
366 uint16_t intr_type; /* INTx, MSI, or MSI-X */
367 uint16_t nirq; /* Total # of vectors */
368 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
369 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
370 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */
371 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */
372 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
373 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
374 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
376 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
377 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
378 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
379 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
382 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */
383 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */
384 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */
385 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */
389 struct filter_entry {
390 uint32_t valid:1; /* filter allocated and valid */
391 uint32_t locked:1; /* filter is administratively locked */
392 uint32_t pending:1; /* filter action is pending firmware reply */
393 uint32_t smtidx:8; /* Source MAC Table index for smac */
394 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
396 struct t4_filter_specification fs;
399 static int map_bars_0_and_4(struct adapter *);
400 static int map_bar_2(struct adapter *);
401 static void setup_memwin(struct adapter *);
402 static int validate_mem_range(struct adapter *, uint32_t, int);
403 static int fwmtype_to_hwmtype(int);
404 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
406 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
407 static uint32_t position_memwin(struct adapter *, int, uint32_t);
408 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
409 struct intrs_and_queues *);
410 static int prep_firmware(struct adapter *);
411 static int partition_resources(struct adapter *, const struct firmware *,
413 static int get_params__pre_init(struct adapter *);
414 static int get_params__post_init(struct adapter *);
415 static int set_params__post_init(struct adapter *);
416 static void t4_set_desc(struct adapter *);
417 static void build_medialist(struct port_info *, struct ifmedia *);
418 static int cxgbe_init_synchronized(struct vi_info *);
419 static int cxgbe_uninit_synchronized(struct vi_info *);
420 static int setup_intr_handlers(struct adapter *);
421 static void quiesce_txq(struct adapter *, struct sge_txq *);
422 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
423 static void quiesce_iq(struct adapter *, struct sge_iq *);
424 static void quiesce_fl(struct adapter *, struct sge_fl *);
425 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
426 driver_intr_t *, void *, char *);
427 static int t4_free_irq(struct adapter *, struct irq *);
428 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
430 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
431 static void vi_refresh_stats(struct adapter *, struct vi_info *);
432 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
433 static void cxgbe_tick(void *);
434 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
435 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
437 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
438 static int fw_msg_not_handled(struct adapter *, const __be64 *);
439 static void t4_sysctls(struct adapter *);
440 static void cxgbe_sysctls(struct port_info *);
441 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
442 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
443 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
444 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
445 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
446 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
447 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
448 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
449 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
450 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
451 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
453 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
454 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
455 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
456 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
457 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
458 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
459 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
460 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
461 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
462 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
463 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
464 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
465 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
466 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
467 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
468 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
469 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
470 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
471 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
472 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
473 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
474 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
475 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
476 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
477 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
479 static uint32_t fconf_to_mode(uint32_t);
480 static uint32_t mode_to_fconf(uint32_t);
481 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
482 static int get_filter_mode(struct adapter *, uint32_t *);
483 static int set_filter_mode(struct adapter *, uint32_t);
484 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
485 static int get_filter(struct adapter *, struct t4_filter *);
486 static int set_filter(struct adapter *, struct t4_filter *);
487 static int del_filter(struct adapter *, struct t4_filter *);
488 static void clear_filter(struct filter_entry *);
489 static int set_filter_wr(struct adapter *, int);
490 static int del_filter_wr(struct adapter *, int);
491 static int get_sge_context(struct adapter *, struct t4_sge_context *);
492 static int load_fw(struct adapter *, struct t4_data *);
493 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
494 static int read_i2c(struct adapter *, struct t4_i2c_data *);
495 static int set_sched_class(struct adapter *, struct t4_sched_params *);
496 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
498 static int toe_capability(struct vi_info *, int);
500 static int mod_event(module_t, int, void *);
506 {0xa000, "Chelsio Terminator 4 FPGA"},
507 {0x4400, "Chelsio T440-dbg"},
508 {0x4401, "Chelsio T420-CR"},
509 {0x4402, "Chelsio T422-CR"},
510 {0x4403, "Chelsio T440-CR"},
511 {0x4404, "Chelsio T420-BCH"},
512 {0x4405, "Chelsio T440-BCH"},
513 {0x4406, "Chelsio T440-CH"},
514 {0x4407, "Chelsio T420-SO"},
515 {0x4408, "Chelsio T420-CX"},
516 {0x4409, "Chelsio T420-BT"},
517 {0x440a, "Chelsio T404-BT"},
518 {0x440e, "Chelsio T440-LP-CR"},
520 {0xb000, "Chelsio Terminator 5 FPGA"},
521 {0x5400, "Chelsio T580-dbg"},
522 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
523 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
524 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
525 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
526 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
527 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
528 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
529 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
530 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
531 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
532 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
533 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
534 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
536 {0x5404, "Chelsio T520-BCH"},
537 {0x5405, "Chelsio T540-BCH"},
538 {0x5406, "Chelsio T540-CH"},
539 {0x5408, "Chelsio T520-CX"},
540 {0x540b, "Chelsio B520-SR"},
541 {0x540c, "Chelsio B504-BT"},
542 {0x540f, "Chelsio Amsterdam"},
543 {0x5413, "Chelsio T580-CHR"},
549 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
550 * exactly the same for both rxq and ofld_rxq.
552 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
553 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
556 /* No easy way to include t4_msg.h before adapter.h so we check this way */
557 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
558 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
560 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
563 t4_probe(device_t dev)
566 uint16_t v = pci_get_vendor(dev);
567 uint16_t d = pci_get_device(dev);
568 uint8_t f = pci_get_function(dev);
570 if (v != PCI_VENDOR_ID_CHELSIO)
573 /* Attach only to PF0 of the FPGA */
574 if (d == 0xa000 && f != 0)
577 for (i = 0; i < nitems(t4_pciids); i++) {
578 if (d == t4_pciids[i].device) {
579 device_set_desc(dev, t4_pciids[i].desc);
580 return (BUS_PROBE_DEFAULT);
588 t5_probe(device_t dev)
591 uint16_t v = pci_get_vendor(dev);
592 uint16_t d = pci_get_device(dev);
593 uint8_t f = pci_get_function(dev);
595 if (v != PCI_VENDOR_ID_CHELSIO)
598 /* Attach only to PF0 of the FPGA */
599 if (d == 0xb000 && f != 0)
602 for (i = 0; i < nitems(t5_pciids); i++) {
603 if (d == t5_pciids[i].device) {
604 device_set_desc(dev, t5_pciids[i].desc);
605 return (BUS_PROBE_DEFAULT);
613 t5_attribute_workaround(device_t dev)
619 * The T5 chips do not properly echo the No Snoop and Relaxed
620 * Ordering attributes when replying to a TLP from a Root
621 * Port. As a workaround, find the parent Root Port and
622 * disable No Snoop and Relaxed Ordering. Note that this
623 * affects all devices under this root port.
625 root_port = pci_find_pcie_root_port(dev);
626 if (root_port == NULL) {
627 device_printf(dev, "Unable to find parent root port\n");
631 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
632 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
633 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
635 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
636 device_get_nameunit(root_port));
640 t4_attach(device_t dev)
643 int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
644 struct intrs_and_queues iaq;
647 int ofld_rqidx, ofld_tqidx;
650 int nm_rqidx, nm_tqidx;
654 sc = device_get_softc(dev);
656 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags);
658 if ((pci_get_device(dev) & 0xff00) == 0x5400)
659 t5_attribute_workaround(dev);
660 pci_enable_busmaster(dev);
661 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
664 pci_set_max_read_req(dev, 4096);
665 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
666 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
667 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
669 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
673 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
674 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
675 device_get_nameunit(dev));
677 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
678 device_get_nameunit(dev));
679 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
680 sx_xlock(&t4_list_lock);
681 SLIST_INSERT_HEAD(&t4_list, sc, link);
682 sx_xunlock(&t4_list_lock);
684 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
685 TAILQ_INIT(&sc->sfl);
686 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
688 mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
690 rc = map_bars_0_and_4(sc);
692 goto done; /* error message displayed already */
695 * This is the real PF# to which we're attaching. Works from within PCI
696 * passthrough environments too, where pci_get_function() could return a
697 * different PF# depending on the passthrough configuration. We need to
698 * use the real PF# in all our communication with the firmware.
700 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
703 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
704 sc->an_handler = an_not_handled;
705 for (i = 0; i < nitems(sc->cpl_handler); i++)
706 sc->cpl_handler[i] = cpl_not_handled;
707 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
708 sc->fw_msg_handler[i] = fw_msg_not_handled;
709 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
710 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
711 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
712 t4_init_sge_cpl_handlers(sc);
714 /* Prepare the adapter for operation */
715 rc = -t4_prep_adapter(sc);
717 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
722 * Do this really early, with the memory windows set up even before the
723 * character device. The userland tool's register i/o and mem read
724 * will work even in "recovery mode".
727 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
728 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
729 device_get_nameunit(dev));
730 if (sc->cdev == NULL)
731 device_printf(dev, "failed to create nexus char device.\n");
733 sc->cdev->si_drv1 = sc;
735 /* Go no further if recovery mode has been requested. */
736 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
737 device_printf(dev, "recovery mode.\n");
741 #if defined(__i386__)
742 if ((cpu_feature & CPUID_CX8) == 0) {
743 device_printf(dev, "64 bit atomics not available.\n");
749 /* Prepare the firmware for operation */
750 rc = prep_firmware(sc);
752 goto done; /* error message displayed already */
754 rc = get_params__post_init(sc);
756 goto done; /* error message displayed already */
758 rc = set_params__post_init(sc);
760 goto done; /* error message displayed already */
764 goto done; /* error message displayed already */
766 rc = t4_create_dma_tag(sc);
768 goto done; /* error message displayed already */
771 * Number of VIs to create per-port. The first VI is the
772 * "main" regular VI for the port. The second VI is used for
773 * netmap if present, and any remaining VIs are used for
774 * additional virtual interfaces.
776 * Limit the number of VIs per port to the number of available
777 * MAC addresses per port.
780 num_vis = t4_num_vis;
786 if (num_vis > nitems(vi_mac_funcs)) {
787 num_vis = nitems(vi_mac_funcs);
788 device_printf(dev, "Number of VIs limited to %d\n", num_vis);
792 * First pass over all the ports - allocate VIs and initialize some
793 * basic parameters like mac address, port type, etc. We also figure
794 * out whether a port is 10G or 1G and use that information when
795 * calculating how many interrupts to attempt to allocate.
798 for_each_port(sc, i) {
799 struct port_info *pi;
802 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
805 /* These must be set before t4_port_init */
809 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
813 * Allocate the "main" VI and initialize parameters
816 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
818 device_printf(dev, "unable to initialize port %d: %d\n",
820 free(pi->vi, M_CXGBE);
826 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
827 pi->link_cfg.requested_fc |= t4_pause_settings;
828 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
829 pi->link_cfg.fc |= t4_pause_settings;
831 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
833 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
834 free(pi->vi, M_CXGBE);
840 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
841 device_get_nameunit(dev), i);
842 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
843 sc->chan_map[pi->tx_chan] = i;
845 if (is_10G_port(pi) || is_40G_port(pi)) {
847 for_each_vi(pi, j, vi) {
848 vi->tmr_idx = t4_tmr_idx_10g;
849 vi->pktc_idx = t4_pktc_idx_10g;
853 for_each_vi(pi, j, vi) {
854 vi->tmr_idx = t4_tmr_idx_1g;
855 vi->pktc_idx = t4_pktc_idx_1g;
861 for_each_vi(pi, j, vi) {
862 vi->qsize_rxq = t4_qsize_rxq;
863 vi->qsize_txq = t4_qsize_txq;
867 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
868 if (pi->dev == NULL) {
870 "failed to add device for port %d.\n", i);
874 pi->vi[0].dev = pi->dev;
875 device_set_softc(pi->dev, pi);
879 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
884 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
886 goto done; /* error message displayed already */
888 sc->intr_type = iaq.intr_type;
889 sc->intr_count = iaq.nirq;
892 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
893 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
895 s->nrxq += (n10g + n1g) * (num_vis - 1);
896 s->ntxq += (n10g + n1g) * (num_vis - 1);
898 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
899 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
900 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
902 if (is_offload(sc)) {
903 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
904 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
906 s->nofldrxq += (n10g + n1g) * (num_vis - 1);
907 s->nofldtxq += (n10g + n1g) * (num_vis - 1);
909 s->neq += s->nofldtxq + s->nofldrxq;
910 s->niq += s->nofldrxq;
912 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
913 M_CXGBE, M_ZERO | M_WAITOK);
914 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
915 M_CXGBE, M_ZERO | M_WAITOK);
919 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
920 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
921 s->neq += s->nnmtxq + s->nnmrxq;
924 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
925 M_CXGBE, M_ZERO | M_WAITOK);
926 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
927 M_CXGBE, M_ZERO | M_WAITOK);
930 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
932 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
934 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
936 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
938 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
941 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
944 t4_init_l2t(sc, M_WAITOK);
947 * Second pass over the ports. This time we know the number of rx and
948 * tx queues that each port should get.
952 ofld_rqidx = ofld_tqidx = 0;
955 nm_rqidx = nm_tqidx = 0;
957 for_each_port(sc, i) {
958 struct port_info *pi = sc->port[i];
964 for_each_vi(pi, j, vi) {
967 vi->flags |= VI_NETMAP | INTR_RXQ;
968 vi->first_rxq = nm_rqidx;
969 vi->first_txq = nm_tqidx;
970 if (is_10G_port(pi) || is_40G_port(pi)) {
971 vi->nrxq = iaq.nnmrxq10g;
972 vi->ntxq = iaq.nnmtxq10g;
974 vi->nrxq = iaq.nnmrxq1g;
975 vi->ntxq = iaq.nnmtxq1g;
977 nm_rqidx += vi->nrxq;
978 nm_tqidx += vi->ntxq;
983 vi->first_rxq = rqidx;
984 vi->first_txq = tqidx;
985 if (is_10G_port(pi) || is_40G_port(pi)) {
986 vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
987 vi->nrxq = j == 0 ? iaq.nrxq10g : 1;
988 vi->ntxq = j == 0 ? iaq.ntxq10g : 1;
990 vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
991 vi->nrxq = j == 0 ? iaq.nrxq1g : 1;
992 vi->ntxq = j == 0 ? iaq.ntxq1g : 1;
996 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
998 vi->rsrv_noflowq = 0;
1004 if (!is_offload(sc))
1006 vi->first_ofld_rxq = ofld_rqidx;
1007 vi->first_ofld_txq = ofld_tqidx;
1008 if (is_10G_port(pi) || is_40G_port(pi)) {
1009 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1010 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1;
1011 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1;
1013 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1014 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1;
1015 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1;
1017 ofld_rqidx += vi->nofldrxq;
1018 ofld_tqidx += vi->nofldtxq;
1023 rc = setup_intr_handlers(sc);
1026 "failed to setup interrupt handlers: %d\n", rc);
1030 rc = bus_generic_attach(dev);
1033 "failed to attach all child ports: %d\n", rc);
1038 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1039 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1040 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1041 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1042 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1047 if (rc != 0 && sc->cdev) {
1048 /* cdev was created and so cxgbetool works; recover that way. */
1050 "error during attach, adapter is now in recovery mode.\n");
1066 t4_detach(device_t dev)
1069 struct port_info *pi;
1072 sc = device_get_softc(dev);
1074 if (sc->flags & FULL_INIT_DONE)
1075 t4_intr_disable(sc);
1078 destroy_dev(sc->cdev);
1082 rc = bus_generic_detach(dev);
1085 "failed to detach child devices: %d\n", rc);
1089 for (i = 0; i < sc->intr_count; i++)
1090 t4_free_irq(sc, &sc->irq[i]);
1092 for (i = 0; i < MAX_NPORTS; i++) {
1095 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1097 device_delete_child(dev, pi->dev);
1099 mtx_destroy(&pi->pi_lock);
1100 free(pi->vi, M_CXGBE);
1105 if (sc->flags & FULL_INIT_DONE)
1106 adapter_full_uninit(sc);
1108 if (sc->flags & FW_OK)
1109 t4_fw_bye(sc, sc->mbox);
1111 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1112 pci_release_msi(dev);
1115 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1119 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1123 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1127 t4_free_l2t(sc->l2t);
1130 free(sc->sge.ofld_rxq, M_CXGBE);
1131 free(sc->sge.ofld_txq, M_CXGBE);
1134 free(sc->sge.nm_rxq, M_CXGBE);
1135 free(sc->sge.nm_txq, M_CXGBE);
1137 free(sc->irq, M_CXGBE);
1138 free(sc->sge.rxq, M_CXGBE);
1139 free(sc->sge.txq, M_CXGBE);
1140 free(sc->sge.ctrlq, M_CXGBE);
1141 free(sc->sge.iqmap, M_CXGBE);
1142 free(sc->sge.eqmap, M_CXGBE);
1143 free(sc->tids.ftid_tab, M_CXGBE);
1144 t4_destroy_dma_tag(sc);
1145 if (mtx_initialized(&sc->sc_lock)) {
1146 sx_xlock(&t4_list_lock);
1147 SLIST_REMOVE(&t4_list, sc, adapter, link);
1148 sx_xunlock(&t4_list_lock);
1149 mtx_destroy(&sc->sc_lock);
1152 callout_drain(&sc->sfl_callout);
1153 if (mtx_initialized(&sc->tids.ftid_lock))
1154 mtx_destroy(&sc->tids.ftid_lock);
1155 if (mtx_initialized(&sc->sfl_lock))
1156 mtx_destroy(&sc->sfl_lock);
1157 if (mtx_initialized(&sc->ifp_lock))
1158 mtx_destroy(&sc->ifp_lock);
1159 if (mtx_initialized(&sc->regwin_lock))
1160 mtx_destroy(&sc->regwin_lock);
1162 bzero(sc, sizeof(*sc));
1168 cxgbe_probe(device_t dev)
1171 struct port_info *pi = device_get_softc(dev);
1173 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1174 device_set_desc_copy(dev, buf);
1176 return (BUS_PROBE_DEFAULT);
1179 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1180 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1181 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1182 #define T4_CAP_ENABLE (T4_CAP)
1185 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1190 vi->xact_addr_filt = -1;
1191 callout_init(&vi->tick, 1);
1193 /* Allocate an ifnet and set it up */
1194 ifp = if_alloc(IFT_ETHER);
1196 device_printf(dev, "Cannot allocate ifnet\n");
1202 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1203 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1205 ifp->if_init = cxgbe_init;
1206 ifp->if_ioctl = cxgbe_ioctl;
1207 ifp->if_transmit = cxgbe_transmit;
1208 ifp->if_qflush = cxgbe_qflush;
1209 ifp->if_get_counter = cxgbe_get_counter;
1211 ifp->if_capabilities = T4_CAP;
1213 if (vi->nofldrxq != 0)
1214 ifp->if_capabilities |= IFCAP_TOE;
1216 ifp->if_capenable = T4_CAP_ENABLE;
1217 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1218 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1220 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1221 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1222 ifp->if_hw_tsomaxsegsize = 65536;
1224 /* Initialize ifmedia for this VI */
1225 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1226 cxgbe_media_status);
1227 build_medialist(vi->pi, &vi->media);
1229 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1230 EVENTHANDLER_PRI_ANY);
1232 ether_ifattach(ifp, vi->hw_addr);
1234 sb = sbuf_new_auto();
1235 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1237 if (ifp->if_capabilities & IFCAP_TOE)
1238 sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1239 vi->nofldtxq, vi->nofldrxq);
1242 device_printf(dev, "%s\n", sbuf_data(sb));
1251 cxgbe_attach(device_t dev)
1253 struct port_info *pi = device_get_softc(dev);
1257 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1259 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1263 for_each_vi(pi, i, vi) {
1267 if (vi->flags & VI_NETMAP) {
1269 * media handled here to keep
1270 * implementation private to this file
1272 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1273 cxgbe_media_status);
1274 build_medialist(pi, &vi->media);
1275 vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
1276 "ncxgbe" : "ncxl", device_get_unit(dev));
1279 vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
1280 "vcxgbe" : "vcxl", -1);
1281 if (vi->dev == NULL) {
1282 device_printf(dev, "failed to add VI %d\n", i);
1285 device_set_softc(vi->dev, vi);
1290 bus_generic_attach(dev);
1296 cxgbe_vi_detach(struct vi_info *vi)
1298 struct ifnet *ifp = vi->ifp;
1300 ether_ifdetach(ifp);
1303 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1305 /* Let detach proceed even if these fail. */
1306 cxgbe_uninit_synchronized(vi);
1307 callout_drain(&vi->tick);
1310 ifmedia_removeall(&vi->media);
1316 cxgbe_detach(device_t dev)
1318 struct port_info *pi = device_get_softc(dev);
1319 struct adapter *sc = pi->adapter;
1322 /* Detach the extra VIs first. */
1323 rc = bus_generic_detach(dev);
1326 device_delete_children(dev);
1328 doom_vi(sc, &pi->vi[0]);
1330 if (pi->flags & HAS_TRACEQ) {
1331 sc->traceq = -1; /* cloner should not create ifnet */
1332 t4_tracer_port_detach(sc);
1335 cxgbe_vi_detach(&pi->vi[0]);
1336 callout_drain(&pi->tick);
1338 end_synchronized_op(sc, 0);
1344 cxgbe_init(void *arg)
1346 struct vi_info *vi = arg;
1347 struct adapter *sc = vi->pi->adapter;
1349 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1351 cxgbe_init_synchronized(vi);
1352 end_synchronized_op(sc, 0);
1356 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1358 int rc = 0, mtu, flags, can_sleep;
1359 struct vi_info *vi = ifp->if_softc;
1360 struct adapter *sc = vi->pi->adapter;
1361 struct ifreq *ifr = (struct ifreq *)data;
1367 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1370 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1374 if (vi->flags & VI_INIT_DONE) {
1375 t4_update_fl_bufsize(ifp);
1376 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1377 rc = update_mac_settings(ifp, XGMAC_MTU);
1379 end_synchronized_op(sc, 0);
1385 rc = begin_synchronized_op(sc, vi,
1386 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1390 if (ifp->if_flags & IFF_UP) {
1391 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1392 flags = vi->if_flags;
1393 if ((ifp->if_flags ^ flags) &
1394 (IFF_PROMISC | IFF_ALLMULTI)) {
1395 if (can_sleep == 1) {
1396 end_synchronized_op(sc, 0);
1400 rc = update_mac_settings(ifp,
1401 XGMAC_PROMISC | XGMAC_ALLMULTI);
1404 if (can_sleep == 0) {
1405 end_synchronized_op(sc, LOCK_HELD);
1409 rc = cxgbe_init_synchronized(vi);
1411 vi->if_flags = ifp->if_flags;
1412 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1413 if (can_sleep == 0) {
1414 end_synchronized_op(sc, LOCK_HELD);
1418 rc = cxgbe_uninit_synchronized(vi);
1420 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1424 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1425 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1428 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1429 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1430 end_synchronized_op(sc, LOCK_HELD);
1434 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1438 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1439 if (mask & IFCAP_TXCSUM) {
1440 ifp->if_capenable ^= IFCAP_TXCSUM;
1441 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1443 if (IFCAP_TSO4 & ifp->if_capenable &&
1444 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1445 ifp->if_capenable &= ~IFCAP_TSO4;
1447 "tso4 disabled due to -txcsum.\n");
1450 if (mask & IFCAP_TXCSUM_IPV6) {
1451 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1452 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1454 if (IFCAP_TSO6 & ifp->if_capenable &&
1455 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1456 ifp->if_capenable &= ~IFCAP_TSO6;
1458 "tso6 disabled due to -txcsum6.\n");
1461 if (mask & IFCAP_RXCSUM)
1462 ifp->if_capenable ^= IFCAP_RXCSUM;
1463 if (mask & IFCAP_RXCSUM_IPV6)
1464 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1467 * Note that we leave CSUM_TSO alone (it is always set). The
1468 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1469 * sending a TSO request our way, so it's sufficient to toggle
1472 if (mask & IFCAP_TSO4) {
1473 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1474 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1475 if_printf(ifp, "enable txcsum first.\n");
1479 ifp->if_capenable ^= IFCAP_TSO4;
1481 if (mask & IFCAP_TSO6) {
1482 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1483 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1484 if_printf(ifp, "enable txcsum6 first.\n");
1488 ifp->if_capenable ^= IFCAP_TSO6;
1490 if (mask & IFCAP_LRO) {
1491 #if defined(INET) || defined(INET6)
1493 struct sge_rxq *rxq;
1495 ifp->if_capenable ^= IFCAP_LRO;
1496 for_each_rxq(vi, i, rxq) {
1497 if (ifp->if_capenable & IFCAP_LRO)
1498 rxq->iq.flags |= IQ_LRO_ENABLED;
1500 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1505 if (mask & IFCAP_TOE) {
1506 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1508 rc = toe_capability(vi, enable);
1512 ifp->if_capenable ^= mask;
1515 if (mask & IFCAP_VLAN_HWTAGGING) {
1516 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1517 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1518 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1520 if (mask & IFCAP_VLAN_MTU) {
1521 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1523 /* Need to find out how to disable auto-mtu-inflation */
1525 if (mask & IFCAP_VLAN_HWTSO)
1526 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1527 if (mask & IFCAP_VLAN_HWCSUM)
1528 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1530 #ifdef VLAN_CAPABILITIES
1531 VLAN_CAPABILITIES(ifp);
1534 end_synchronized_op(sc, 0);
1539 ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
1543 struct ifi2creq i2c;
1545 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1548 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1552 if (i2c.len > sizeof(i2c.data)) {
1556 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1559 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
1560 i2c.offset, i2c.len, &i2c.data[0]);
1561 end_synchronized_op(sc, 0);
1563 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1568 rc = ether_ioctl(ifp, cmd, data);
1575 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1577 struct vi_info *vi = ifp->if_softc;
1578 struct port_info *pi = vi->pi;
1579 struct adapter *sc = pi->adapter;
1580 struct sge_txq *txq;
1585 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1587 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1593 if (__predict_false(rc != 0)) {
1594 MPASS(m == NULL); /* was freed already */
1595 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1600 txq = &sc->sge.txq[vi->first_txq];
1601 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1602 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1606 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1607 if (__predict_false(rc != 0))
1614 cxgbe_qflush(struct ifnet *ifp)
1616 struct vi_info *vi = ifp->if_softc;
1617 struct sge_txq *txq;
1620 /* queues do not exist if !VI_INIT_DONE. */
1621 if (vi->flags & VI_INIT_DONE) {
1622 for_each_txq(vi, i, txq) {
1624 txq->eq.flags &= ~EQ_ENABLED;
1626 while (!mp_ring_is_idle(txq->r)) {
1627 mp_ring_check_drainage(txq->r, 0);
1636 vi_get_counter(struct ifnet *ifp, ift_counter c)
1638 struct vi_info *vi = ifp->if_softc;
1639 struct fw_vi_stats_vf *s = &vi->stats;
1641 vi_refresh_stats(vi->pi->adapter, vi);
1644 case IFCOUNTER_IPACKETS:
1645 return (s->rx_bcast_frames + s->rx_mcast_frames +
1646 s->rx_ucast_frames);
1647 case IFCOUNTER_IERRORS:
1648 return (s->rx_err_frames);
1649 case IFCOUNTER_OPACKETS:
1650 return (s->tx_bcast_frames + s->tx_mcast_frames +
1651 s->tx_ucast_frames + s->tx_offload_frames);
1652 case IFCOUNTER_OERRORS:
1653 return (s->tx_drop_frames);
1654 case IFCOUNTER_IBYTES:
1655 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1657 case IFCOUNTER_OBYTES:
1658 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1659 s->tx_ucast_bytes + s->tx_offload_bytes);
1660 case IFCOUNTER_IMCASTS:
1661 return (s->rx_mcast_frames);
1662 case IFCOUNTER_OMCASTS:
1663 return (s->tx_mcast_frames);
1664 case IFCOUNTER_OQDROPS: {
1668 if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) {
1670 struct sge_txq *txq;
1672 for_each_txq(vi, i, txq)
1673 drops += counter_u64_fetch(txq->r->drops);
1681 return (if_get_counter_default(ifp, c));
1686 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1688 struct vi_info *vi = ifp->if_softc;
1689 struct port_info *pi = vi->pi;
1690 struct adapter *sc = pi->adapter;
1691 struct port_stats *s = &pi->stats;
1694 return (vi_get_counter(ifp, c));
1696 cxgbe_refresh_stats(sc, pi);
1699 case IFCOUNTER_IPACKETS:
1700 return (s->rx_frames - s->rx_pause);
1702 case IFCOUNTER_IERRORS:
1703 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1704 s->rx_fcs_err + s->rx_len_err);
1706 case IFCOUNTER_OPACKETS:
1707 return (s->tx_frames - s->tx_pause);
1709 case IFCOUNTER_OERRORS:
1710 return (s->tx_error_frames);
1712 case IFCOUNTER_IBYTES:
1713 return (s->rx_octets - s->rx_pause * 64);
1715 case IFCOUNTER_OBYTES:
1716 return (s->tx_octets - s->tx_pause * 64);
1718 case IFCOUNTER_IMCASTS:
1719 return (s->rx_mcast_frames - s->rx_pause);
1721 case IFCOUNTER_OMCASTS:
1722 return (s->tx_mcast_frames - s->tx_pause);
1724 case IFCOUNTER_IQDROPS:
1725 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1726 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1727 s->rx_trunc3 + pi->tnl_cong_drops);
1729 case IFCOUNTER_OQDROPS: {
1733 if (vi->flags & VI_INIT_DONE) {
1735 struct sge_txq *txq;
1737 for_each_txq(vi, i, txq)
1738 drops += counter_u64_fetch(txq->r->drops);
1746 return (if_get_counter_default(ifp, c));
1751 cxgbe_media_change(struct ifnet *ifp)
1753 struct vi_info *vi = ifp->if_softc;
1755 device_printf(vi->dev, "%s unimplemented.\n", __func__);
1757 return (EOPNOTSUPP);
1761 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1763 struct vi_info *vi = ifp->if_softc;
1764 struct port_info *pi = vi->pi;
1765 struct ifmedia_entry *cur;
1766 int speed = pi->link_cfg.speed;
1768 cur = vi->media.ifm_cur;
1770 ifmr->ifm_status = IFM_AVALID;
1771 if (!pi->link_cfg.link_ok)
1774 ifmr->ifm_status |= IFM_ACTIVE;
1776 /* active and current will differ iff current media is autoselect. */
1777 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1780 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1781 if (speed == SPEED_10000)
1782 ifmr->ifm_active |= IFM_10G_T;
1783 else if (speed == SPEED_1000)
1784 ifmr->ifm_active |= IFM_1000_T;
1785 else if (speed == SPEED_100)
1786 ifmr->ifm_active |= IFM_100_TX;
1787 else if (speed == SPEED_10)
1788 ifmr->ifm_active |= IFM_10_T;
1790 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1795 vcxgbe_probe(device_t dev)
1798 struct vi_info *vi = device_get_softc(dev);
1800 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
1802 device_set_desc_copy(dev, buf);
1804 return (BUS_PROBE_DEFAULT);
1808 vcxgbe_attach(device_t dev)
1811 struct port_info *pi;
1813 int func, index, rc;
1816 vi = device_get_softc(dev);
1820 index = vi - pi->vi;
1821 KASSERT(index < nitems(vi_mac_funcs),
1822 ("%s: VI %s doesn't have a MAC func", __func__,
1823 device_get_nameunit(dev)));
1824 func = vi_mac_funcs[index];
1825 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
1826 vi->hw_addr, &vi->rss_size, func, 0);
1828 device_printf(dev, "Failed to allocate virtual interface "
1829 "for port %d: %d\n", pi->port_id, -rc);
1834 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1835 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
1836 V_FW_PARAMS_PARAM_YZ(vi->viid);
1837 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1839 vi->rss_base = 0xffff;
1841 /* MPASS((val >> 16) == rss_size); */
1842 vi->rss_base = val & 0xffff;
1845 rc = cxgbe_vi_attach(dev, vi);
1847 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
1854 vcxgbe_detach(device_t dev)
1859 vi = device_get_softc(dev);
1860 sc = vi->pi->adapter;
1864 cxgbe_vi_detach(vi);
1865 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
1867 end_synchronized_op(sc, 0);
1873 t4_fatal_err(struct adapter *sc)
1875 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1876 t4_intr_disable(sc);
1877 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1878 device_get_nameunit(sc->dev));
1882 map_bars_0_and_4(struct adapter *sc)
1884 sc->regs_rid = PCIR_BAR(0);
1885 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1886 &sc->regs_rid, RF_ACTIVE);
1887 if (sc->regs_res == NULL) {
1888 device_printf(sc->dev, "cannot map registers.\n");
1891 sc->bt = rman_get_bustag(sc->regs_res);
1892 sc->bh = rman_get_bushandle(sc->regs_res);
1893 sc->mmio_len = rman_get_size(sc->regs_res);
1894 setbit(&sc->doorbells, DOORBELL_KDB);
1896 sc->msix_rid = PCIR_BAR(4);
1897 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1898 &sc->msix_rid, RF_ACTIVE);
1899 if (sc->msix_res == NULL) {
1900 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1908 map_bar_2(struct adapter *sc)
1912 * T4: only iWARP driver uses the userspace doorbells. There is no need
1913 * to map it if RDMA is disabled.
1915 if (is_t4(sc) && sc->rdmacaps == 0)
1918 sc->udbs_rid = PCIR_BAR(2);
1919 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1920 &sc->udbs_rid, RF_ACTIVE);
1921 if (sc->udbs_res == NULL) {
1922 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1925 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1928 setbit(&sc->doorbells, DOORBELL_UDB);
1929 #if defined(__i386__) || defined(__amd64__)
1930 if (t5_write_combine) {
1934 * Enable write combining on BAR2. This is the
1935 * userspace doorbell BAR and is split into 128B
1936 * (UDBS_SEG_SIZE) doorbell regions, each associated
1937 * with an egress queue. The first 64B has the doorbell
1938 * and the second 64B can be used to submit a tx work
1939 * request with an implicit doorbell.
1942 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1943 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1945 clrbit(&sc->doorbells, DOORBELL_UDB);
1946 setbit(&sc->doorbells, DOORBELL_WCWR);
1947 setbit(&sc->doorbells, DOORBELL_UDBWC);
1949 device_printf(sc->dev,
1950 "couldn't enable write combining: %d\n",
1954 t4_write_reg(sc, A_SGE_STAT_CFG,
1955 V_STATSOURCE_T5(7) | V_STATMODE(0));
1963 static const struct memwin t4_memwin[] = {
1964 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1965 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1966 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1969 static const struct memwin t5_memwin[] = {
1970 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1971 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1972 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1976 setup_memwin(struct adapter *sc)
1978 const struct memwin *mw;
1984 * Read low 32b of bar0 indirectly via the hardware backdoor
1985 * mechanism. Works from within PCI passthrough environments
1986 * too, where rman_get_start() can return a different value. We
1987 * need to program the T4 memory window decoders with the actual
1988 * addresses that will be coming across the PCIe link.
1990 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1991 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1994 n = nitems(t4_memwin);
1996 /* T5 uses the relative offset inside the PCIe BAR */
2000 n = nitems(t5_memwin);
2003 for (i = 0; i < n; i++, mw++) {
2005 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2006 (mw->base + bar0) | V_BIR(0) |
2007 V_WINDOW(ilog2(mw->aperture) - 10));
2011 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2015 * Verify that the memory range specified by the addr/len pair is valid and lies
2016 * entirely within a single region (EDCx or MCx).
2019 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2021 uint32_t em, addr_len, maddr, mlen;
2023 /* Memory can only be accessed in naturally aligned 4 byte units */
2024 if (addr & 3 || len & 3 || len == 0)
2027 /* Enabled memories */
2028 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2029 if (em & F_EDRAM0_ENABLE) {
2030 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2031 maddr = G_EDRAM0_BASE(addr_len) << 20;
2032 mlen = G_EDRAM0_SIZE(addr_len) << 20;
2033 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2034 addr + len <= maddr + mlen)
2037 if (em & F_EDRAM1_ENABLE) {
2038 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2039 maddr = G_EDRAM1_BASE(addr_len) << 20;
2040 mlen = G_EDRAM1_SIZE(addr_len) << 20;
2041 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2042 addr + len <= maddr + mlen)
2045 if (em & F_EXT_MEM_ENABLE) {
2046 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2047 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2048 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
2049 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2050 addr + len <= maddr + mlen)
2053 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
2054 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2055 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2056 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
2057 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
2058 addr + len <= maddr + mlen)
2066 fwmtype_to_hwmtype(int mtype)
2070 case FW_MEMTYPE_EDC0:
2072 case FW_MEMTYPE_EDC1:
2074 case FW_MEMTYPE_EXTMEM:
2076 case FW_MEMTYPE_EXTMEM1:
2079 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2084 * Verify that the memory range specified by the memtype/offset/len pair is
2085 * valid and lies entirely within the memtype specified. The global address of
2086 * the start of the range is returned in addr.
2089 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2092 uint32_t em, addr_len, maddr, mlen;
2094 /* Memory can only be accessed in naturally aligned 4 byte units */
2095 if (off & 3 || len & 3 || len == 0)
2098 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2099 switch (fwmtype_to_hwmtype(mtype)) {
2101 if (!(em & F_EDRAM0_ENABLE))
2103 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2104 maddr = G_EDRAM0_BASE(addr_len) << 20;
2105 mlen = G_EDRAM0_SIZE(addr_len) << 20;
2108 if (!(em & F_EDRAM1_ENABLE))
2110 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2111 maddr = G_EDRAM1_BASE(addr_len) << 20;
2112 mlen = G_EDRAM1_SIZE(addr_len) << 20;
2115 if (!(em & F_EXT_MEM_ENABLE))
2117 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2118 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2119 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
2122 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
2124 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2125 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2126 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
2132 if (mlen > 0 && off < mlen && off + len <= mlen) {
2133 *addr = maddr + off; /* global address */
2141 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
2143 const struct memwin *mw;
2146 KASSERT(win >= 0 && win < nitems(t4_memwin),
2147 ("%s: incorrect memwin# (%d)", __func__, win));
2148 mw = &t4_memwin[win];
2150 KASSERT(win >= 0 && win < nitems(t5_memwin),
2151 ("%s: incorrect memwin# (%d)", __func__, win));
2152 mw = &t5_memwin[win];
2157 if (aperture != NULL)
2158 *aperture = mw->aperture;
2162 * Positions the memory window such that it can be used to access the specified
2163 * address in the chip's address space. The return value is the offset of addr
2164 * from the start of the window.
2167 position_memwin(struct adapter *sc, int n, uint32_t addr)
2172 KASSERT(n >= 0 && n <= 3,
2173 ("%s: invalid window %d.", __func__, n));
2174 KASSERT((addr & 3) == 0,
2175 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
2179 start = addr & ~0xf; /* start must be 16B aligned */
2181 pf = V_PFNUM(sc->pf);
2182 start = addr & ~0x7f; /* start must be 128B aligned */
2184 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
2186 t4_write_reg(sc, reg, start | pf);
2187 t4_read_reg(sc, reg);
2189 return (addr - start);
2193 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2194 struct intrs_and_queues *iaq)
2196 int rc, itype, navail, nrxq10g, nrxq1g, n;
2197 int nofldrxq10g = 0, nofldrxq1g = 0;
2198 int nnmrxq10g = 0, nnmrxq1g = 0;
2200 bzero(iaq, sizeof(*iaq));
2202 iaq->ntxq10g = t4_ntxq10g;
2203 iaq->ntxq1g = t4_ntxq1g;
2204 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2205 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2206 iaq->rsrv_noflowq = t4_rsrv_noflowq;
2208 if (is_offload(sc)) {
2209 iaq->nofldtxq10g = t4_nofldtxq10g;
2210 iaq->nofldtxq1g = t4_nofldtxq1g;
2211 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2212 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2216 iaq->nnmtxq10g = t4_nnmtxq10g;
2217 iaq->nnmtxq1g = t4_nnmtxq1g;
2218 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
2219 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
2222 for (itype = INTR_MSIX; itype; itype >>= 1) {
2224 if ((itype & t4_intr_types) == 0)
2225 continue; /* not allowed */
2227 if (itype == INTR_MSIX)
2228 navail = pci_msix_count(sc->dev);
2229 else if (itype == INTR_MSI)
2230 navail = pci_msi_count(sc->dev);
2237 iaq->intr_type = itype;
2238 iaq->intr_flags_10g = 0;
2239 iaq->intr_flags_1g = 0;
2242 * Best option: an interrupt vector for errors, one for the
2243 * firmware event queue, and one for every rxq (NIC, TOE, and
2246 iaq->nirq = T4_EXTRA_INTR;
2247 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
2248 iaq->nirq += n10g * 2 * (num_vis - 1);
2249 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
2250 iaq->nirq += n1g * 2 * (num_vis - 1);
2251 if (iaq->nirq <= navail &&
2252 (itype != INTR_MSI || powerof2(iaq->nirq))) {
2253 iaq->intr_flags_10g = INTR_ALL;
2254 iaq->intr_flags_1g = INTR_ALL;
2259 * Second best option: a vector for errors, one for the firmware
2260 * event queue, and vectors for either all the NIC rx queues or
2261 * all the TOE rx queues. The queues that don't get vectors
2262 * will forward their interrupts to those that do.
2264 * Note: netmap rx queues cannot be created early and so they
2265 * can't be setup to receive forwarded interrupts for others.
2267 iaq->nirq = T4_EXTRA_INTR;
2268 if (nrxq10g >= nofldrxq10g) {
2269 iaq->intr_flags_10g = INTR_RXQ;
2270 iaq->nirq += n10g * nrxq10g;
2271 iaq->nirq += n10g * (num_vis - 1);
2273 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
2276 iaq->intr_flags_10g = INTR_OFLD_RXQ;
2277 iaq->nirq += n10g * nofldrxq10g;
2279 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
2282 if (nrxq1g >= nofldrxq1g) {
2283 iaq->intr_flags_1g = INTR_RXQ;
2284 iaq->nirq += n1g * nrxq1g;
2285 iaq->nirq += n1g * (num_vis - 1);
2287 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
2290 iaq->intr_flags_1g = INTR_OFLD_RXQ;
2291 iaq->nirq += n1g * nofldrxq1g;
2293 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
2296 if (iaq->nirq <= navail &&
2297 (itype != INTR_MSI || powerof2(iaq->nirq)))
2301 * Next best option: an interrupt vector for errors, one for the
2302 * firmware event queue, and at least one per VI. At this
2303 * point we know we'll have to downsize nrxq and/or nofldrxq
2304 * and/or nnmrxq to fit what's available to us.
2306 iaq->nirq = T4_EXTRA_INTR;
2307 iaq->nirq += (n10g + n1g) * num_vis;
2308 if (iaq->nirq <= navail) {
2309 int leftover = navail - iaq->nirq;
2312 int target = max(nrxq10g, nofldrxq10g);
2314 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2315 INTR_RXQ : INTR_OFLD_RXQ;
2318 while (n < target && leftover >= n10g) {
2323 iaq->nrxq10g = min(n, nrxq10g);
2325 iaq->nofldrxq10g = min(n, nofldrxq10g);
2328 iaq->nnmrxq10g = min(n, nnmrxq10g);
2333 int target = max(nrxq1g, nofldrxq1g);
2335 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2336 INTR_RXQ : INTR_OFLD_RXQ;
2339 while (n < target && leftover >= n1g) {
2344 iaq->nrxq1g = min(n, nrxq1g);
2346 iaq->nofldrxq1g = min(n, nofldrxq1g);
2349 iaq->nnmrxq1g = min(n, nnmrxq1g);
2353 if (itype != INTR_MSI || powerof2(iaq->nirq))
2358 * Least desirable option: one interrupt vector for everything.
2360 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2361 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2364 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2367 iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
2373 if (itype == INTR_MSIX)
2374 rc = pci_alloc_msix(sc->dev, &navail);
2375 else if (itype == INTR_MSI)
2376 rc = pci_alloc_msi(sc->dev, &navail);
2379 if (navail == iaq->nirq)
2383 * Didn't get the number requested. Use whatever number
2384 * the kernel is willing to allocate (it's in navail).
2386 device_printf(sc->dev, "fewer vectors than requested, "
2387 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2388 itype, iaq->nirq, navail);
2389 pci_release_msi(sc->dev);
2393 device_printf(sc->dev,
2394 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2395 itype, rc, iaq->nirq, navail);
2398 device_printf(sc->dev,
2399 "failed to find a usable interrupt type. "
2400 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2401 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2406 #define FW_VERSION(chip) ( \
2407 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2408 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2409 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2410 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2411 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2417 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
2421 .kld_name = "t4fw_cfg",
2422 .fw_mod_name = "t4fw",
2424 .chip = FW_HDR_CHIP_T4,
2425 .fw_ver = htobe32_const(FW_VERSION(T4)),
2426 .intfver_nic = FW_INTFVER(T4, NIC),
2427 .intfver_vnic = FW_INTFVER(T4, VNIC),
2428 .intfver_ofld = FW_INTFVER(T4, OFLD),
2429 .intfver_ri = FW_INTFVER(T4, RI),
2430 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2431 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2432 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2433 .intfver_fcoe = FW_INTFVER(T4, FCOE),
2437 .kld_name = "t5fw_cfg",
2438 .fw_mod_name = "t5fw",
2440 .chip = FW_HDR_CHIP_T5,
2441 .fw_ver = htobe32_const(FW_VERSION(T5)),
2442 .intfver_nic = FW_INTFVER(T5, NIC),
2443 .intfver_vnic = FW_INTFVER(T5, VNIC),
2444 .intfver_ofld = FW_INTFVER(T5, OFLD),
2445 .intfver_ri = FW_INTFVER(T5, RI),
2446 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2447 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2448 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2449 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2454 static struct fw_info *
2455 find_fw_info(int chip)
2459 for (i = 0; i < nitems(fw_info); i++) {
2460 if (fw_info[i].chip == chip)
2461 return (&fw_info[i]);
2467 * Is the given firmware API compatible with the one the driver was compiled
2471 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2474 /* short circuit if it's the exact same firmware version */
2475 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2479 * XXX: Is this too conservative? Perhaps I should limit this to the
2480 * features that are supported in the driver.
2482 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2483 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2484 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2485 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2493 * The firmware in the KLD is usable, but should it be installed? This routine
2494 * explains itself in detail if it indicates the KLD firmware should be
2498 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2502 if (!card_fw_usable) {
2503 reason = "incompatible or unusable";
2508 reason = "older than the version bundled with this driver";
2512 if (t4_fw_install == 2 && k != c) {
2513 reason = "different than the version bundled with this driver";
2520 if (t4_fw_install == 0) {
2521 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2522 "but the driver is prohibited from installing a different "
2523 "firmware on the card.\n",
2524 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2525 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2530 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2531 "installing firmware %u.%u.%u.%u on card.\n",
2532 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2533 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2534 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2535 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2540 * Establish contact with the firmware and determine if we are the master driver
2541 * or not, and whether we are responsible for chip initialization.
2544 prep_firmware(struct adapter *sc)
2546 const struct firmware *fw = NULL, *default_cfg;
2547 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2548 enum dev_state state;
2549 struct fw_info *fw_info;
2550 struct fw_hdr *card_fw; /* fw on the card */
2551 const struct fw_hdr *kld_fw; /* fw in the KLD */
2552 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
2555 /* Contact firmware. */
2556 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2557 if (rc < 0 || state == DEV_STATE_ERR) {
2559 device_printf(sc->dev,
2560 "failed to connect to the firmware: %d, %d.\n", rc, state);
2565 sc->flags |= MASTER_PF;
2566 else if (state == DEV_STATE_UNINIT) {
2568 * We didn't get to be the master so we definitely won't be
2569 * configuring the chip. It's a bug if someone else hasn't
2570 * configured it already.
2572 device_printf(sc->dev, "couldn't be master(%d), "
2573 "device not already initialized either(%d).\n", rc, state);
2577 /* This is the firmware whose headers the driver was compiled against */
2578 fw_info = find_fw_info(chip_id(sc));
2579 if (fw_info == NULL) {
2580 device_printf(sc->dev,
2581 "unable to look up firmware information for chip %d.\n",
2585 drv_fw = &fw_info->fw_hdr;
2588 * The firmware KLD contains many modules. The KLD name is also the
2589 * name of the module that contains the default config file.
2591 default_cfg = firmware_get(fw_info->kld_name);
2593 /* Read the header of the firmware on the card */
2594 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2595 rc = -t4_read_flash(sc, FLASH_FW_START,
2596 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2598 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2600 device_printf(sc->dev,
2601 "Unable to read card's firmware header: %d\n", rc);
2605 /* This is the firmware in the KLD */
2606 fw = firmware_get(fw_info->fw_mod_name);
2608 kld_fw = (const void *)fw->data;
2609 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2615 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2616 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2618 * Common case: the firmware on the card is an exact match and
2619 * the KLD is an exact match too, or the KLD is
2620 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2621 * here -- use cxgbetool loadfw if you want to reinstall the
2622 * same firmware as the one on the card.
2624 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2625 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2626 be32toh(card_fw->fw_ver))) {
2628 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2630 device_printf(sc->dev,
2631 "failed to install firmware: %d\n", rc);
2635 /* Installed successfully, update the cached header too. */
2636 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2638 need_fw_reset = 0; /* already reset as part of load_fw */
2641 if (!card_fw_usable) {
2644 d = ntohl(drv_fw->fw_ver);
2645 c = ntohl(card_fw->fw_ver);
2646 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2648 device_printf(sc->dev, "Cannot find a usable firmware: "
2649 "fw_install %d, chip state %d, "
2650 "driver compiled with %d.%d.%d.%d, "
2651 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2652 t4_fw_install, state,
2653 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2654 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2655 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2656 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2657 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2658 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2663 /* We're using whatever's on the card and it's known to be good. */
2664 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2665 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2666 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2667 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2668 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2669 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2670 t4_get_tp_version(sc, &sc->params.tp_vers);
2673 if (need_fw_reset &&
2674 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2675 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2676 if (rc != ETIMEDOUT && rc != EIO)
2677 t4_fw_bye(sc, sc->mbox);
2682 rc = get_params__pre_init(sc);
2684 goto done; /* error message displayed already */
2686 /* Partition adapter resources as specified in the config file. */
2687 if (state == DEV_STATE_UNINIT) {
2689 KASSERT(sc->flags & MASTER_PF,
2690 ("%s: trying to change chip settings when not master.",
2693 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2695 goto done; /* error message displayed already */
2697 t4_tweak_chip_settings(sc);
2699 /* get basic stuff going */
2700 rc = -t4_fw_initialize(sc, sc->mbox);
2702 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2706 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2711 free(card_fw, M_CXGBE);
2713 firmware_put(fw, FIRMWARE_UNLOAD);
2714 if (default_cfg != NULL)
2715 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2720 #define FW_PARAM_DEV(param) \
2721 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2722 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2723 #define FW_PARAM_PFVF(param) \
2724 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2725 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2728 * Partition chip resources for use between various PFs, VFs, etc.
2731 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2732 const char *name_prefix)
2734 const struct firmware *cfg = NULL;
2736 struct fw_caps_config_cmd caps;
2737 uint32_t mtype, moff, finicsum, cfcsum;
2740 * Figure out what configuration file to use. Pick the default config
2741 * file for the card if the user hasn't specified one explicitly.
2743 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2744 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2745 /* Card specific overrides go here. */
2746 if (pci_get_device(sc->dev) == 0x440a)
2747 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2749 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2753 * We need to load another module if the profile is anything except
2754 * "default" or "flash".
2756 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2757 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2760 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2761 cfg = firmware_get(s);
2763 if (default_cfg != NULL) {
2764 device_printf(sc->dev,
2765 "unable to load module \"%s\" for "
2766 "configuration profile \"%s\", will use "
2767 "the default config file instead.\n",
2769 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2772 device_printf(sc->dev,
2773 "unable to load module \"%s\" for "
2774 "configuration profile \"%s\", will use "
2775 "the config file on the card's flash "
2776 "instead.\n", s, sc->cfg_file);
2777 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2783 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2784 default_cfg == NULL) {
2785 device_printf(sc->dev,
2786 "default config file not available, will use the config "
2787 "file on the card's flash instead.\n");
2788 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2791 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2793 const uint32_t *cfdata;
2794 uint32_t param, val, addr, off, mw_base, mw_aperture;
2796 KASSERT(cfg != NULL || default_cfg != NULL,
2797 ("%s: no config to upload", __func__));
2800 * Ask the firmware where it wants us to upload the config file.
2802 param = FW_PARAM_DEV(CF);
2803 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2805 /* No support for config file? Shouldn't happen. */
2806 device_printf(sc->dev,
2807 "failed to query config file location: %d.\n", rc);
2810 mtype = G_FW_PARAMS_PARAM_Y(val);
2811 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2814 * XXX: sheer laziness. We deliberately added 4 bytes of
2815 * useless stuffing/comments at the end of the config file so
2816 * it's ok to simply throw away the last remaining bytes when
2817 * the config file is not an exact multiple of 4. This also
2818 * helps with the validate_mt_off_len check.
2821 cflen = cfg->datasize & ~3;
2824 cflen = default_cfg->datasize & ~3;
2825 cfdata = default_cfg->data;
2828 if (cflen > FLASH_CFG_MAX_SIZE) {
2829 device_printf(sc->dev,
2830 "config file too long (%d, max allowed is %d). "
2831 "Will try to use the config on the card, if any.\n",
2832 cflen, FLASH_CFG_MAX_SIZE);
2833 goto use_config_on_flash;
2836 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2838 device_printf(sc->dev,
2839 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2840 "Will try to use the config on the card, if any.\n",
2841 __func__, mtype, moff, cflen, rc);
2842 goto use_config_on_flash;
2845 memwin_info(sc, 2, &mw_base, &mw_aperture);
2847 off = position_memwin(sc, 2, addr);
2848 n = min(cflen, mw_aperture - off);
2849 for (i = 0; i < n; i += 4)
2850 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2855 use_config_on_flash:
2856 mtype = FW_MEMTYPE_FLASH;
2857 moff = t4_flash_cfg_addr(sc);
2860 bzero(&caps, sizeof(caps));
2861 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2862 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2863 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2864 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2865 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2866 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2868 device_printf(sc->dev,
2869 "failed to pre-process config file: %d "
2870 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2874 finicsum = be32toh(caps.finicsum);
2875 cfcsum = be32toh(caps.cfcsum);
2876 if (finicsum != cfcsum) {
2877 device_printf(sc->dev,
2878 "WARNING: config file checksum mismatch: %08x %08x\n",
2881 sc->cfcsum = cfcsum;
2883 #define LIMIT_CAPS(x) do { \
2884 caps.x &= htobe16(t4_##x##_allowed); \
2888 * Let the firmware know what features will (not) be used so it can tune
2889 * things accordingly.
2891 LIMIT_CAPS(linkcaps);
2892 LIMIT_CAPS(niccaps);
2893 LIMIT_CAPS(toecaps);
2894 LIMIT_CAPS(rdmacaps);
2895 LIMIT_CAPS(iscsicaps);
2896 LIMIT_CAPS(fcoecaps);
2899 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2900 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2901 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2902 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2904 device_printf(sc->dev,
2905 "failed to process config file: %d.\n", rc);
2909 firmware_put(cfg, FIRMWARE_UNLOAD);
2914 * Retrieve parameters that are needed (or nice to have) very early.
2917 get_params__pre_init(struct adapter *sc)
2920 uint32_t param[2], val[2];
2921 struct fw_devlog_cmd cmd;
2922 struct devlog_params *dlog = &sc->params.devlog;
2924 param[0] = FW_PARAM_DEV(PORTVEC);
2925 param[1] = FW_PARAM_DEV(CCLK);
2926 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2928 device_printf(sc->dev,
2929 "failed to query parameters (pre_init): %d.\n", rc);
2933 sc->params.portvec = val[0];
2934 sc->params.nports = bitcount32(val[0]);
2935 sc->params.vpd.cclk = val[1];
2937 /* Read device log parameters. */
2938 bzero(&cmd, sizeof(cmd));
2939 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2940 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2941 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2942 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2944 device_printf(sc->dev,
2945 "failed to get devlog parameters: %d.\n", rc);
2946 bzero(dlog, sizeof (*dlog));
2947 rc = 0; /* devlog isn't critical for device operation */
2949 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2950 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2951 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2952 dlog->size = be32toh(cmd.memsize_devlog);
2959 * Retrieve various parameters that are of interest to the driver. The device
2960 * has been initialized by the firmware at this point.
2963 get_params__post_init(struct adapter *sc)
2966 uint32_t param[7], val[7];
2967 struct fw_caps_config_cmd caps;
2969 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2970 param[1] = FW_PARAM_PFVF(EQ_START);
2971 param[2] = FW_PARAM_PFVF(FILTER_START);
2972 param[3] = FW_PARAM_PFVF(FILTER_END);
2973 param[4] = FW_PARAM_PFVF(L2T_START);
2974 param[5] = FW_PARAM_PFVF(L2T_END);
2975 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2977 device_printf(sc->dev,
2978 "failed to query parameters (post_init): %d.\n", rc);
2982 sc->sge.iq_start = val[0];
2983 sc->sge.eq_start = val[1];
2984 sc->tids.ftid_base = val[2];
2985 sc->tids.nftids = val[3] - val[2] + 1;
2986 sc->params.ftid_min = val[2];
2987 sc->params.ftid_max = val[3];
2988 sc->vres.l2t.start = val[4];
2989 sc->vres.l2t.size = val[5] - val[4] + 1;
2990 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2991 ("%s: L2 table size (%u) larger than expected (%u)",
2992 __func__, sc->vres.l2t.size, L2T_SIZE));
2994 /* get capabilites */
2995 bzero(&caps, sizeof(caps));
2996 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2997 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2998 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2999 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3001 device_printf(sc->dev,
3002 "failed to get card capabilities: %d.\n", rc);
3006 #define READ_CAPS(x) do { \
3007 sc->x = htobe16(caps.x); \
3009 READ_CAPS(linkcaps);
3012 READ_CAPS(rdmacaps);
3013 READ_CAPS(iscsicaps);
3014 READ_CAPS(fcoecaps);
3016 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3017 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3018 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3019 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3020 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3022 device_printf(sc->dev,
3023 "failed to query NIC parameters: %d.\n", rc);
3026 sc->tids.etid_base = val[0];
3027 sc->params.etid_min = val[0];
3028 sc->tids.netids = val[1] - val[0] + 1;
3029 sc->params.netids = sc->tids.netids;
3030 sc->params.eo_wr_cred = val[2];
3031 sc->params.ethoffload = 1;
3035 /* query offload-related parameters */
3036 param[0] = FW_PARAM_DEV(NTID);
3037 param[1] = FW_PARAM_PFVF(SERVER_START);
3038 param[2] = FW_PARAM_PFVF(SERVER_END);
3039 param[3] = FW_PARAM_PFVF(TDDP_START);
3040 param[4] = FW_PARAM_PFVF(TDDP_END);
3041 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3042 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3044 device_printf(sc->dev,
3045 "failed to query TOE parameters: %d.\n", rc);
3048 sc->tids.ntids = val[0];
3049 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3050 sc->tids.stid_base = val[1];
3051 sc->tids.nstids = val[2] - val[1] + 1;
3052 sc->vres.ddp.start = val[3];
3053 sc->vres.ddp.size = val[4] - val[3] + 1;
3054 sc->params.ofldq_wr_cred = val[5];
3055 sc->params.offload = 1;
3058 param[0] = FW_PARAM_PFVF(STAG_START);
3059 param[1] = FW_PARAM_PFVF(STAG_END);
3060 param[2] = FW_PARAM_PFVF(RQ_START);
3061 param[3] = FW_PARAM_PFVF(RQ_END);
3062 param[4] = FW_PARAM_PFVF(PBL_START);
3063 param[5] = FW_PARAM_PFVF(PBL_END);
3064 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3066 device_printf(sc->dev,
3067 "failed to query RDMA parameters(1): %d.\n", rc);
3070 sc->vres.stag.start = val[0];
3071 sc->vres.stag.size = val[1] - val[0] + 1;
3072 sc->vres.rq.start = val[2];
3073 sc->vres.rq.size = val[3] - val[2] + 1;
3074 sc->vres.pbl.start = val[4];
3075 sc->vres.pbl.size = val[5] - val[4] + 1;
3077 param[0] = FW_PARAM_PFVF(SQRQ_START);
3078 param[1] = FW_PARAM_PFVF(SQRQ_END);
3079 param[2] = FW_PARAM_PFVF(CQ_START);
3080 param[3] = FW_PARAM_PFVF(CQ_END);
3081 param[4] = FW_PARAM_PFVF(OCQ_START);
3082 param[5] = FW_PARAM_PFVF(OCQ_END);
3083 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3085 device_printf(sc->dev,
3086 "failed to query RDMA parameters(2): %d.\n", rc);
3089 sc->vres.qp.start = val[0];
3090 sc->vres.qp.size = val[1] - val[0] + 1;
3091 sc->vres.cq.start = val[2];
3092 sc->vres.cq.size = val[3] - val[2] + 1;
3093 sc->vres.ocq.start = val[4];
3094 sc->vres.ocq.size = val[5] - val[4] + 1;
3096 if (sc->iscsicaps) {
3097 param[0] = FW_PARAM_PFVF(ISCSI_START);
3098 param[1] = FW_PARAM_PFVF(ISCSI_END);
3099 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3101 device_printf(sc->dev,
3102 "failed to query iSCSI parameters: %d.\n", rc);
3105 sc->vres.iscsi.start = val[0];
3106 sc->vres.iscsi.size = val[1] - val[0] + 1;
3110 * We've got the params we wanted to query via the firmware. Now grab
3111 * some others directly from the chip.
3113 rc = t4_read_chip_settings(sc);
3119 set_params__post_init(struct adapter *sc)
3121 uint32_t param, val;
3123 /* ask for encapsulated CPLs */
3124 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3126 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3131 #undef FW_PARAM_PFVF
3135 t4_set_desc(struct adapter *sc)
3138 struct adapter_params *p = &sc->params;
3140 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
3141 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
3142 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
3144 device_set_desc_copy(sc->dev, buf);
3148 build_medialist(struct port_info *pi, struct ifmedia *media)
3154 ifmedia_removeall(media);
3156 m = IFM_ETHER | IFM_FDX;
3158 switch(pi->port_type) {
3159 case FW_PORT_TYPE_BT_XFI:
3160 case FW_PORT_TYPE_BT_XAUI:
3161 ifmedia_add(media, m | IFM_10G_T, 0, NULL);
3164 case FW_PORT_TYPE_BT_SGMII:
3165 ifmedia_add(media, m | IFM_1000_T, 0, NULL);
3166 ifmedia_add(media, m | IFM_100_TX, 0, NULL);
3167 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
3168 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
3171 case FW_PORT_TYPE_CX4:
3172 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
3173 ifmedia_set(media, m | IFM_10G_CX4);
3176 case FW_PORT_TYPE_QSFP_10G:
3177 case FW_PORT_TYPE_SFP:
3178 case FW_PORT_TYPE_FIBER_XFI:
3179 case FW_PORT_TYPE_FIBER_XAUI:
3180 switch (pi->mod_type) {
3182 case FW_PORT_MOD_TYPE_LR:
3183 ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
3184 ifmedia_set(media, m | IFM_10G_LR);
3187 case FW_PORT_MOD_TYPE_SR:
3188 ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
3189 ifmedia_set(media, m | IFM_10G_SR);
3192 case FW_PORT_MOD_TYPE_LRM:
3193 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
3194 ifmedia_set(media, m | IFM_10G_LRM);
3197 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3198 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3199 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
3200 ifmedia_set(media, m | IFM_10G_TWINAX);
3203 case FW_PORT_MOD_TYPE_NONE:
3205 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3206 ifmedia_set(media, m | IFM_NONE);
3209 case FW_PORT_MOD_TYPE_NA:
3210 case FW_PORT_MOD_TYPE_ER:
3212 device_printf(pi->dev,
3213 "unknown port_type (%d), mod_type (%d)\n",
3214 pi->port_type, pi->mod_type);
3215 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3216 ifmedia_set(media, m | IFM_UNKNOWN);
3221 case FW_PORT_TYPE_QSFP:
3222 switch (pi->mod_type) {
3224 case FW_PORT_MOD_TYPE_LR:
3225 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
3226 ifmedia_set(media, m | IFM_40G_LR4);
3229 case FW_PORT_MOD_TYPE_SR:
3230 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
3231 ifmedia_set(media, m | IFM_40G_SR4);
3234 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3235 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3236 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
3237 ifmedia_set(media, m | IFM_40G_CR4);
3240 case FW_PORT_MOD_TYPE_NONE:
3242 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3243 ifmedia_set(media, m | IFM_NONE);
3247 device_printf(pi->dev,
3248 "unknown port_type (%d), mod_type (%d)\n",
3249 pi->port_type, pi->mod_type);
3250 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3251 ifmedia_set(media, m | IFM_UNKNOWN);
3257 device_printf(pi->dev,
3258 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
3260 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3261 ifmedia_set(media, m | IFM_UNKNOWN);
3268 #define FW_MAC_EXACT_CHUNK 7
3271 * Program the port's XGMAC based on parameters in ifnet. The caller also
3272 * indicates which parameters should be programmed (the rest are left alone).
3275 update_mac_settings(struct ifnet *ifp, int flags)
3278 struct vi_info *vi = ifp->if_softc;
3279 struct port_info *pi = vi->pi;
3280 struct adapter *sc = pi->adapter;
3281 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3283 ASSERT_SYNCHRONIZED_OP(sc);
3284 KASSERT(flags, ("%s: not told what to update.", __func__));
3286 if (flags & XGMAC_MTU)
3289 if (flags & XGMAC_PROMISC)
3290 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3292 if (flags & XGMAC_ALLMULTI)
3293 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3295 if (flags & XGMAC_VLANEX)
3296 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3298 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3299 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
3300 allmulti, 1, vlanex, false);
3302 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3308 if (flags & XGMAC_UCADDR) {
3309 uint8_t ucaddr[ETHER_ADDR_LEN];
3311 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3312 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
3313 ucaddr, true, true);
3316 if_printf(ifp, "change_mac failed: %d\n", rc);
3319 vi->xact_addr_filt = rc;
3324 if (flags & XGMAC_MCADDRS) {
3325 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3328 struct ifmultiaddr *ifma;
3331 if_maddr_rlock(ifp);
3332 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3333 if (ifma->ifma_addr->sa_family != AF_LINK)
3336 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3337 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3340 if (i == FW_MAC_EXACT_CHUNK) {
3341 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
3342 del, i, mcaddr, NULL, &hash, 0);
3345 for (j = 0; j < i; j++) {
3347 "failed to add mc address"
3349 "%02x:%02x:%02x rc=%d\n",
3350 mcaddr[j][0], mcaddr[j][1],
3351 mcaddr[j][2], mcaddr[j][3],
3352 mcaddr[j][4], mcaddr[j][5],
3362 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
3363 mcaddr, NULL, &hash, 0);
3366 for (j = 0; j < i; j++) {
3368 "failed to add mc address"
3370 "%02x:%02x:%02x rc=%d\n",
3371 mcaddr[j][0], mcaddr[j][1],
3372 mcaddr[j][2], mcaddr[j][3],
3373 mcaddr[j][4], mcaddr[j][5],
3380 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
3382 if_printf(ifp, "failed to set mc address hash: %d", rc);
3384 if_maddr_runlock(ifp);
3391 * {begin|end}_synchronized_op must be called from the same thread.
3394 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
3400 /* the caller thinks it's ok to sleep, but is it really? */
3401 if (flags & SLEEP_OK)
3402 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3403 "begin_synchronized_op");
3414 if (vi && IS_DOOMED(vi)) {
3424 if (!(flags & SLEEP_OK)) {
3429 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3435 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3438 sc->last_op = wmesg;
3439 sc->last_op_thr = curthread;
3440 sc->last_op_flags = flags;
3444 if (!(flags & HOLD_LOCK) || rc)
3451 * Tell if_ioctl and if_init that the VI is going away. This is
3452 * special variant of begin_synchronized_op and must be paired with a
3453 * call to end_synchronized_op.
3456 doom_vi(struct adapter *sc, struct vi_info *vi)
3463 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
3466 sc->last_op = "t4detach";
3467 sc->last_op_thr = curthread;
3468 sc->last_op_flags = 0;
3474 * {begin|end}_synchronized_op must be called from the same thread.
3477 end_synchronized_op(struct adapter *sc, int flags)
3480 if (flags & LOCK_HELD)
3481 ADAPTER_LOCK_ASSERT_OWNED(sc);
3485 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3492 cxgbe_init_synchronized(struct vi_info *vi)
3494 struct port_info *pi = vi->pi;
3495 struct adapter *sc = pi->adapter;
3496 struct ifnet *ifp = vi->ifp;
3498 struct sge_txq *txq;
3500 ASSERT_SYNCHRONIZED_OP(sc);
3502 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3503 return (0); /* already running */
3505 if (!(sc->flags & FULL_INIT_DONE) &&
3506 ((rc = adapter_full_init(sc)) != 0))
3507 return (rc); /* error message displayed already */
3509 if (!(vi->flags & VI_INIT_DONE) &&
3510 ((rc = vi_full_init(vi)) != 0))
3511 return (rc); /* error message displayed already */
3513 rc = update_mac_settings(ifp, XGMAC_ALL);
3515 goto done; /* error message displayed already */
3517 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
3519 if_printf(ifp, "enable_vi failed: %d\n", rc);
3524 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
3528 for_each_txq(vi, i, txq) {
3530 txq->eq.flags |= EQ_ENABLED;
3535 * The first iq of the first port to come up is used for tracing.
3537 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
3538 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
3539 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
3540 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3541 V_QUEUENUMBER(sc->traceq));
3542 pi->flags |= HAS_TRACEQ;
3547 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3551 callout_reset(&vi->tick, hz, vi_tick, vi);
3553 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3557 cxgbe_uninit_synchronized(vi);
3566 cxgbe_uninit_synchronized(struct vi_info *vi)
3568 struct port_info *pi = vi->pi;
3569 struct adapter *sc = pi->adapter;
3570 struct ifnet *ifp = vi->ifp;
3572 struct sge_txq *txq;
3574 ASSERT_SYNCHRONIZED_OP(sc);
3576 if (!(vi->flags & VI_INIT_DONE)) {
3577 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
3578 ("uninited VI is running"));
3583 * Disable the VI so that all its data in either direction is discarded
3584 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
3585 * tick) intact as the TP can deliver negative advice or data that it's
3586 * holding in its RAM (for an offloaded connection) even after the VI is
3589 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
3591 if_printf(ifp, "disable_vi failed: %d\n", rc);
3595 for_each_txq(vi, i, txq) {
3597 txq->eq.flags &= ~EQ_ENABLED;
3603 callout_stop(&pi->tick);
3605 callout_stop(&vi->tick);
3606 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3610 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3612 if (pi->up_vis > 0) {
3618 pi->link_cfg.link_ok = 0;
3619 pi->link_cfg.speed = 0;
3621 t4_os_link_changed(sc, pi->port_id, 0, -1);
3627 * It is ok for this function to fail midway and return right away. t4_detach
3628 * will walk the entire sc->irq list and clean up whatever is valid.
3631 setup_intr_handlers(struct adapter *sc)
3633 int rc, rid, p, q, v;
3636 struct port_info *pi;
3638 struct sge_rxq *rxq;
3640 struct sge_ofld_rxq *ofld_rxq;
3643 struct sge_nm_rxq *nm_rxq;
3650 rid = sc->intr_type == INTR_INTX ? 0 : 1;
3651 if (sc->intr_count == 1)
3652 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3654 /* Multiple interrupts. */
3655 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3656 ("%s: too few intr.", __func__));
3658 /* The first one is always error intr */
3659 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3665 /* The second one is always the firmware event queue */
3666 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3672 for_each_port(sc, p) {
3674 for_each_vi(pi, v, vi) {
3675 vi->first_intr = rid - 1;
3677 if (vi->flags & VI_NETMAP) {
3678 for_each_nm_rxq(vi, q, nm_rxq) {
3679 snprintf(s, sizeof(s), "%d-%d", p, q);
3680 rc = t4_alloc_irq(sc, irq, rid,
3681 t4_nm_intr, nm_rxq, s);
3691 if (vi->flags & INTR_RXQ) {
3692 for_each_rxq(vi, q, rxq) {
3694 snprintf(s, sizeof(s), "%d.%d",
3697 snprintf(s, sizeof(s),
3698 "%d(%d).%d", p, v, q);
3699 rc = t4_alloc_irq(sc, irq, rid,
3709 if (vi->flags & INTR_OFLD_RXQ) {
3710 for_each_ofld_rxq(vi, q, ofld_rxq) {
3711 snprintf(s, sizeof(s), "%d,%d", p, q);
3712 rc = t4_alloc_irq(sc, irq, rid,
3713 t4_intr, ofld_rxq, s);
3724 MPASS(irq == &sc->irq[sc->intr_count]);
3730 adapter_full_init(struct adapter *sc)
3734 ASSERT_SYNCHRONIZED_OP(sc);
3735 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3736 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3737 ("%s: FULL_INIT_DONE already", __func__));
3740 * queues that belong to the adapter (not any particular port).
3742 rc = t4_setup_adapter_queues(sc);
3746 for (i = 0; i < nitems(sc->tq); i++) {
3747 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3748 taskqueue_thread_enqueue, &sc->tq[i]);
3749 if (sc->tq[i] == NULL) {
3750 device_printf(sc->dev,
3751 "failed to allocate task queue %d\n", i);
3755 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3756 device_get_nameunit(sc->dev), i);
3760 sc->flags |= FULL_INIT_DONE;
3763 adapter_full_uninit(sc);
3769 adapter_full_uninit(struct adapter *sc)
3773 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3775 t4_teardown_adapter_queues(sc);
3777 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3778 taskqueue_free(sc->tq[i]);
3782 sc->flags &= ~FULL_INIT_DONE;
3788 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
3789 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
3790 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
3791 RSS_HASHTYPE_RSS_UDP_IPV6)
3793 /* Translates kernel hash types to hardware. */
3795 hashconfig_to_hashen(int hashconfig)
3799 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
3800 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
3801 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
3802 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
3803 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
3804 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3805 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3807 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
3808 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3809 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3811 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
3812 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3813 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
3814 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3819 /* Translates hardware hash types to kernel. */
3821 hashen_to_hashconfig(int hashen)
3825 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
3827 * If UDP hashing was enabled it must have been enabled for
3828 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
3829 * enabling any 4-tuple hash is nonsense configuration.
3831 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3832 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
3834 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3835 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
3836 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3837 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
3839 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3840 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
3841 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3842 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
3843 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3844 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
3845 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3846 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
3848 return (hashconfig);
3853 vi_full_init(struct vi_info *vi)
3855 struct adapter *sc = vi->pi->adapter;
3856 struct ifnet *ifp = vi->ifp;
3858 struct sge_rxq *rxq;
3859 int rc, i, j, hashen;
3861 int nbuckets = rss_getnumbuckets();
3862 int hashconfig = rss_gethashconfig();
3864 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3865 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3868 ASSERT_SYNCHRONIZED_OP(sc);
3869 KASSERT((vi->flags & VI_INIT_DONE) == 0,
3870 ("%s: VI_INIT_DONE already", __func__));
3872 sysctl_ctx_init(&vi->ctx);
3873 vi->flags |= VI_SYSCTL_CTX;
3876 * Allocate tx/rx/fl queues for this VI.
3878 rc = t4_setup_vi_queues(vi);
3880 goto done; /* error message displayed already */
3883 /* Netmap VIs configure RSS when netmap is enabled. */
3884 if (vi->flags & VI_NETMAP) {
3885 vi->flags |= VI_INIT_DONE;
3891 * Setup RSS for this VI. Save a copy of the RSS table for later use.
3893 if (vi->nrxq > vi->rss_size) {
3894 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
3895 "some queues will never receive traffic.\n", vi->nrxq,
3897 } else if (vi->rss_size % vi->nrxq) {
3898 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
3899 "expect uneven traffic distribution.\n", vi->nrxq,
3903 MPASS(RSS_KEYSIZE == 40);
3904 if (vi->nrxq != nbuckets) {
3905 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
3906 "performance will be impacted.\n", vi->nrxq, nbuckets);
3909 rss_getkey((void *)&raw_rss_key[0]);
3910 for (i = 0; i < nitems(rss_key); i++) {
3911 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
3913 t4_write_rss_key(sc, (void *)&rss_key[0], -1);
3915 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3916 for (i = 0; i < vi->rss_size;) {
3918 j = rss_get_indirection_to_bucket(i);
3920 rxq = &sc->sge.rxq[vi->first_rxq + j];
3921 rss[i++] = rxq->iq.abs_id;
3923 for_each_rxq(vi, j, rxq) {
3924 rss[i++] = rxq->iq.abs_id;
3925 if (i == vi->rss_size)
3931 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
3934 if_printf(ifp, "rss_config failed: %d\n", rc);
3939 hashen = hashconfig_to_hashen(hashconfig);
3942 * We may have had to enable some hashes even though the global config
3943 * wants them disabled. This is a potential problem that must be
3944 * reported to the user.
3946 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
3949 * If we consider only the supported hash types, then the enabled hashes
3950 * are a superset of the requested hashes. In other words, there cannot
3951 * be any supported hash that was requested but not enabled, but there
3952 * can be hashes that were not requested but had to be enabled.
3954 extra &= SUPPORTED_RSS_HASHTYPES;
3955 MPASS((extra & hashconfig) == 0);
3959 "global RSS config (0x%x) cannot be accomodated.\n",
3962 if (extra & RSS_HASHTYPE_RSS_IPV4)
3963 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
3964 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
3965 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
3966 if (extra & RSS_HASHTYPE_RSS_IPV6)
3967 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
3968 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
3969 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
3970 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
3971 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
3972 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
3973 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
3975 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
3976 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
3977 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3978 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
3980 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]);
3982 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
3987 vi->flags |= VI_INIT_DONE;
3999 vi_full_uninit(struct vi_info *vi)
4001 struct port_info *pi = vi->pi;
4002 struct adapter *sc = pi->adapter;
4004 struct sge_rxq *rxq;
4005 struct sge_txq *txq;
4007 struct sge_ofld_rxq *ofld_rxq;
4008 struct sge_wrq *ofld_txq;
4011 if (vi->flags & VI_INIT_DONE) {
4013 /* Need to quiesce queues. */
4015 if (vi->flags & VI_NETMAP)
4019 /* XXX: Only for the first VI? */
4021 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4023 for_each_txq(vi, i, txq) {
4024 quiesce_txq(sc, txq);
4028 for_each_ofld_txq(vi, i, ofld_txq) {
4029 quiesce_wrq(sc, ofld_txq);
4033 for_each_rxq(vi, i, rxq) {
4034 quiesce_iq(sc, &rxq->iq);
4035 quiesce_fl(sc, &rxq->fl);
4039 for_each_ofld_rxq(vi, i, ofld_rxq) {
4040 quiesce_iq(sc, &ofld_rxq->iq);
4041 quiesce_fl(sc, &ofld_rxq->fl);
4044 free(vi->rss, M_CXGBE);
4050 t4_teardown_vi_queues(vi);
4051 vi->flags &= ~VI_INIT_DONE;
4057 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4059 struct sge_eq *eq = &txq->eq;
4060 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4062 (void) sc; /* unused */
4066 MPASS((eq->flags & EQ_ENABLED) == 0);
4070 /* Wait for the mp_ring to empty. */
4071 while (!mp_ring_is_idle(txq->r)) {
4072 mp_ring_check_drainage(txq->r, 0);
4073 pause("rquiesce", 1);
4076 /* Then wait for the hardware to finish. */
4077 while (spg->cidx != htobe16(eq->pidx))
4078 pause("equiesce", 1);
4080 /* Finally, wait for the driver to reclaim all descriptors. */
4081 while (eq->cidx != eq->pidx)
4082 pause("dquiesce", 1);
4086 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4093 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4095 (void) sc; /* unused */
4097 /* Synchronize with the interrupt handler */
4098 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4103 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4105 mtx_lock(&sc->sfl_lock);
4107 fl->flags |= FL_DOOMED;
4109 callout_stop(&sc->sfl_callout);
4110 mtx_unlock(&sc->sfl_lock);
4112 KASSERT((fl->flags & FL_STARVING) == 0,
4113 ("%s: still starving", __func__));
4117 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4118 driver_intr_t *handler, void *arg, char *name)
4123 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4124 RF_SHAREABLE | RF_ACTIVE);
4125 if (irq->res == NULL) {
4126 device_printf(sc->dev,
4127 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4131 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4132 NULL, handler, arg, &irq->tag);
4134 device_printf(sc->dev,
4135 "failed to setup interrupt for rid %d, name %s: %d\n",
4138 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
4144 t4_free_irq(struct adapter *sc, struct irq *irq)
4147 bus_teardown_intr(sc->dev, irq->res, irq->tag);
4149 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4151 bzero(irq, sizeof(*irq));
4157 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
4160 uint32_t *p = (uint32_t *)(buf + start);
4162 for ( ; start <= end; start += sizeof(uint32_t))
4163 *p++ = t4_read_reg(sc, start);
4167 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4170 const unsigned int *reg_ranges;
4171 static const unsigned int t4_reg_ranges[] = {
4391 static const unsigned int t5_reg_ranges[] = {
4832 reg_ranges = &t4_reg_ranges[0];
4833 n = nitems(t4_reg_ranges);
4835 reg_ranges = &t5_reg_ranges[0];
4836 n = nitems(t5_reg_ranges);
4839 regs->version = chip_id(sc) | chip_rev(sc) << 10;
4840 for (i = 0; i < n; i += 2)
4841 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4844 #define A_PL_INDIR_CMD 0x1f8
4846 #define S_PL_AUTOINC 31
4847 #define M_PL_AUTOINC 0x1U
4848 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
4849 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4851 #define S_PL_VFID 20
4852 #define M_PL_VFID 0xffU
4853 #define V_PL_VFID(x) ((x) << S_PL_VFID)
4854 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
4857 #define M_PL_ADDR 0xfffffU
4858 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
4859 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
4861 #define A_PL_INDIR_DATA 0x1fc
4864 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
4868 mtx_assert(&sc->regwin_lock, MA_OWNED);
4869 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4870 V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg)));
4871 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
4872 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
4873 return (((uint64_t)stats[1]) << 32 | stats[0]);
4877 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
4878 struct fw_vi_stats_vf *stats)
4881 #define GET_STAT(name) \
4882 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
4884 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
4885 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
4886 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
4887 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
4888 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
4889 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
4890 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
4891 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
4892 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
4893 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
4894 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
4895 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
4896 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
4897 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
4898 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
4899 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
4905 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
4909 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4910 V_PL_VFID(G_FW_VIID_VIN(viid)) |
4911 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
4912 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
4913 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
4914 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
4918 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
4921 const struct timeval interval = {0, 250000}; /* 250ms */
4923 if (!(vi->flags & VI_INIT_DONE))
4927 timevalsub(&tv, &interval);
4928 if (timevalcmp(&tv, &vi->last_refreshed, <))
4931 mtx_lock(&sc->regwin_lock);
4932 t4_get_vi_stats(sc, vi->viid, &vi->stats);
4933 getmicrotime(&vi->last_refreshed);
4934 mtx_unlock(&sc->regwin_lock);
4938 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4941 u_int v, tnl_cong_drops;
4943 const struct timeval interval = {0, 250000}; /* 250ms */
4946 timevalsub(&tv, &interval);
4947 if (timevalcmp(&tv, &pi->last_refreshed, <))
4951 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4952 for (i = 0; i < NCHAN; i++) {
4953 if (pi->rx_chan_map & (1 << i)) {
4954 mtx_lock(&sc->regwin_lock);
4955 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4956 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4957 mtx_unlock(&sc->regwin_lock);
4958 tnl_cong_drops += v;
4961 pi->tnl_cong_drops = tnl_cong_drops;
4962 getmicrotime(&pi->last_refreshed);
4966 cxgbe_tick(void *arg)
4968 struct port_info *pi = arg;
4969 struct adapter *sc = pi->adapter;
4971 PORT_LOCK_ASSERT_OWNED(pi);
4972 cxgbe_refresh_stats(sc, pi);
4974 callout_schedule(&pi->tick, hz);
4980 struct vi_info *vi = arg;
4981 struct adapter *sc = vi->pi->adapter;
4983 vi_refresh_stats(sc, vi);
4985 callout_schedule(&vi->tick, hz);
4989 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4993 if (arg != ifp || ifp->if_type != IFT_ETHER)
4996 vlan = VLAN_DEVAT(ifp, vid);
4997 VLAN_SETCOOKIE(vlan, ifp);
5001 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
5005 panic("%s: opcode 0x%02x on iq %p with payload %p",
5006 __func__, rss->opcode, iq, m);
5008 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
5009 __func__, rss->opcode, iq, m);
5016 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
5018 uintptr_t *loc, new;
5020 if (opcode >= nitems(sc->cpl_handler))
5023 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
5024 loc = (uintptr_t *) &sc->cpl_handler[opcode];
5025 atomic_store_rel_ptr(loc, new);
5031 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
5035 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
5037 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
5038 __func__, iq, ctrl);
5044 t4_register_an_handler(struct adapter *sc, an_handler_t h)
5046 uintptr_t *loc, new;
5048 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
5049 loc = (uintptr_t *) &sc->an_handler;
5050 atomic_store_rel_ptr(loc, new);
5056 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
5058 const struct cpl_fw6_msg *cpl =
5059 __containerof(rpl, struct cpl_fw6_msg, data[0]);
5062 panic("%s: fw_msg type %d", __func__, cpl->type);
5064 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
5070 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
5072 uintptr_t *loc, new;
5074 if (type >= nitems(sc->fw_msg_handler))
5078 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
5079 * handler dispatch table. Reject any attempt to install a handler for
5082 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
5085 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
5086 loc = (uintptr_t *) &sc->fw_msg_handler[type];
5087 atomic_store_rel_ptr(loc, new);
5093 t4_sysctls(struct adapter *sc)
5095 struct sysctl_ctx_list *ctx;
5096 struct sysctl_oid *oid;
5097 struct sysctl_oid_list *children, *c0;
5098 static char *caps[] = {
5099 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
5100 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */
5101 "\6HASHFILTER\7ETHOFLD",
5102 "\20\1TOE", /* caps[2] toecaps */
5103 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
5104 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
5105 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
5106 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
5107 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
5108 "\4PO_INITIAOR\5PO_TARGET"
5110 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5112 ctx = device_get_sysctl_ctx(sc->dev);
5117 oid = device_get_sysctl_tree(sc->dev);
5118 c0 = children = SYSCTL_CHILDREN(oid);
5120 sc->sc_do_rxcopy = 1;
5121 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5122 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5124 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5125 sc->params.nports, "# of ports");
5127 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5128 NULL, chip_rev(sc), "chip hardware revision");
5130 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5131 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5133 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5134 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5136 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5137 sc->cfcsum, "config file checksum");
5139 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5140 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
5141 sysctl_bitfield, "A", "available doorbells");
5143 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
5144 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
5145 sysctl_bitfield, "A", "available link capabilities");
5147 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
5148 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
5149 sysctl_bitfield, "A", "available NIC capabilities");
5151 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
5152 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
5153 sysctl_bitfield, "A", "available TCP offload capabilities");
5155 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
5156 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
5157 sysctl_bitfield, "A", "available RDMA capabilities");
5159 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
5160 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
5161 sysctl_bitfield, "A", "available iSCSI capabilities");
5163 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
5164 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
5165 sysctl_bitfield, "A", "available FCoE capabilities");
5167 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5168 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5171 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
5172 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
5173 "interrupt holdoff timer values (us)");
5175 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5176 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
5177 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
5178 "interrupt holdoff packet counter values");
5180 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5181 NULL, sc->tids.nftids, "number of filters");
5183 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5184 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5185 "chip temperature (in Celsius)");
5187 t4_sge_sysctls(sc, ctx, children);
5189 sc->lro_timeout = 100;
5190 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5191 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5193 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW,
5194 &sc->debug_flags, 0, "flags to enable runtime debugging");
5198 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
5200 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5201 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5202 "logs and miscellaneous information");
5203 children = SYSCTL_CHILDREN(oid);
5205 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5206 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5207 sysctl_cctrl, "A", "congestion control");
5209 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5210 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5211 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5213 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5214 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5215 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5217 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5218 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5219 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5221 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5222 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5223 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5225 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5226 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5227 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5229 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5230 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5231 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5233 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5234 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5235 sysctl_cim_la, "A", "CIM logic analyzer");
5237 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5238 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5239 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5241 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5242 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5243 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5245 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5246 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5247 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5249 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5250 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5251 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5253 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5254 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5255 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5257 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5258 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5259 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5261 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5262 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5263 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5266 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5267 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5268 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5270 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5271 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5272 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5275 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5276 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5277 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5279 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5280 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5281 sysctl_cim_qcfg, "A", "CIM queue configuration");
5283 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5284 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5285 sysctl_cpl_stats, "A", "CPL statistics");
5287 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5288 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5289 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5291 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5292 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5293 sysctl_devlog, "A", "firmware's device log");
5295 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5296 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5297 sysctl_fcoe_stats, "A", "FCoE statistics");
5299 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5300 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5301 sysctl_hw_sched, "A", "hardware scheduler ");
5303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5304 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5305 sysctl_l2t, "A", "hardware L2 table");
5307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5308 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5309 sysctl_lb_stats, "A", "loopback statistics");
5311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5312 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5313 sysctl_meminfo, "A", "memory regions");
5315 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5316 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5317 sysctl_mps_tcam, "A", "MPS TCAM entries");
5319 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5320 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5321 sysctl_path_mtus, "A", "path MTUs");
5323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5324 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5325 sysctl_pm_stats, "A", "PM statistics");
5327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5328 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5329 sysctl_rdma_stats, "A", "RDMA statistics");
5331 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5332 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5333 sysctl_tcp_stats, "A", "TCP statistics");
5335 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5336 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5337 sysctl_tids, "A", "TID information");
5339 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5340 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5341 sysctl_tp_err_stats, "A", "TP error statistics");
5343 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5344 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5345 sysctl_tp_la, "A", "TP logic analyzer");
5347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5348 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5349 sysctl_tx_rate, "A", "Tx rate");
5351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5352 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5353 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5357 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5358 sysctl_wcwr_stats, "A", "write combined work requests");
5363 if (is_offload(sc)) {
5367 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5368 NULL, "TOE parameters");
5369 children = SYSCTL_CHILDREN(oid);
5371 sc->tt.sndbuf = 256 * 1024;
5372 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5373 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5376 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5377 &sc->tt.ddp, 0, "DDP allowed");
5379 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
5380 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
5381 &sc->tt.indsz, 0, "DDP max indicate size allowed");
5384 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
5385 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
5386 &sc->tt.ddp_thres, 0, "DDP threshold");
5388 sc->tt.rx_coalesce = 1;
5389 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5390 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5392 sc->tt.tx_align = 1;
5393 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5394 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5400 vi_sysctls(struct vi_info *vi)
5402 struct sysctl_ctx_list *ctx;
5403 struct sysctl_oid *oid;
5404 struct sysctl_oid_list *children;
5406 ctx = device_get_sysctl_ctx(vi->dev);
5409 * dev.[nv](cxgbe|cxl).X.
5411 oid = device_get_sysctl_tree(vi->dev);
5412 children = SYSCTL_CHILDREN(oid);
5414 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5415 vi->viid, "VI identifer");
5416 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5417 &vi->nrxq, 0, "# of rx queues");
5418 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5419 &vi->ntxq, 0, "# of tx queues");
5420 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5421 &vi->first_rxq, 0, "index of first rx queue");
5422 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5423 &vi->first_txq, 0, "index of first tx queue");
5425 if (vi->flags & VI_NETMAP)
5428 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
5429 CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5430 "Reserve queue 0 for non-flowid packets");
5433 if (vi->nofldrxq != 0) {
5434 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5436 "# of rx queues for offloaded TCP connections");
5437 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5439 "# of tx queues for offloaded TCP connections");
5440 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5441 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5442 "index of first TOE rx queue");
5443 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5444 CTLFLAG_RD, &vi->first_ofld_txq, 0,
5445 "index of first TOE tx queue");
5449 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5450 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5451 "holdoff timer index");
5452 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5453 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5454 "holdoff packet counter index");
5456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5457 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5460 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5465 cxgbe_sysctls(struct port_info *pi)
5467 struct sysctl_ctx_list *ctx;
5468 struct sysctl_oid *oid;
5469 struct sysctl_oid_list *children;
5470 struct adapter *sc = pi->adapter;
5472 ctx = device_get_sysctl_ctx(pi->dev);
5477 oid = device_get_sysctl_tree(pi->dev);
5478 children = SYSCTL_CHILDREN(oid);
5480 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5481 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5482 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5483 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5484 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5485 "PHY temperature (in Celsius)");
5486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5487 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5488 "PHY firmware version");
5491 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5492 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
5493 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5496 * dev.cxgbe.X.stats.
5498 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5499 NULL, "port statistics");
5500 children = SYSCTL_CHILDREN(oid);
5501 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5502 &pi->tx_parse_error, 0,
5503 "# of tx packets with invalid length or # of segments");
5505 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5506 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5507 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5508 sysctl_handle_t4_reg64, "QU", desc)
5510 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5511 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5512 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5514 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5516 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5518 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5520 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5522 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5523 "# of tx frames in this range",
5524 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5525 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5526 "# of tx frames in this range",
5527 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5528 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5529 "# of tx frames in this range",
5530 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5531 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5532 "# of tx frames in this range",
5533 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5534 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5535 "# of tx frames in this range",
5536 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5537 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5538 "# of tx frames in this range",
5539 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5540 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5541 "# of tx frames in this range",
5542 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5543 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5545 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5547 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5549 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5551 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5552 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5553 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5555 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5556 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5557 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5559 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5561 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5562 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5564 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5565 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5566 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5567 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5568 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5569 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5570 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5571 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5572 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5573 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5574 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5575 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5576 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5577 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5578 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5579 "# of frames received with bad FCS",
5580 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5581 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5582 "# of frames received with length error",
5583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5584 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5586 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5588 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5589 "# of rx frames in this range",
5590 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5591 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5592 "# of rx frames in this range",
5593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5594 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5595 "# of rx frames in this range",
5596 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5597 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5598 "# of rx frames in this range",
5599 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5600 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5601 "# of rx frames in this range",
5602 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5603 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5604 "# of rx frames in this range",
5605 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5606 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5607 "# of rx frames in this range",
5608 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5609 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5610 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5611 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5612 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5613 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5614 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5615 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5616 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5617 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5618 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5619 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5620 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5621 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5622 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5623 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5624 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5625 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5626 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5628 #undef SYSCTL_ADD_T4_REG64
5630 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5631 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5632 &pi->stats.name, desc)
5634 /* We get these from port_stats and they may be stale by upto 1s */
5635 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5636 "# drops due to buffer-group 0 overflows");
5637 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5638 "# drops due to buffer-group 1 overflows");
5639 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5640 "# drops due to buffer-group 2 overflows");
5641 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5642 "# drops due to buffer-group 3 overflows");
5643 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5644 "# of buffer-group 0 truncated packets");
5645 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5646 "# of buffer-group 1 truncated packets");
5647 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5648 "# of buffer-group 2 truncated packets");
5649 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5650 "# of buffer-group 3 truncated packets");
5652 #undef SYSCTL_ADD_T4_PORTSTAT
5656 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5658 int rc, *i, space = 0;
5661 sbuf_new_for_sysctl(&sb, NULL, 64, req);
5662 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5664 sbuf_printf(&sb, " ");
5665 sbuf_printf(&sb, "%d", *i);
5668 rc = sbuf_finish(&sb);
5674 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5679 rc = sysctl_wire_old_buffer(req, 0);
5683 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5687 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5688 rc = sbuf_finish(sb);
5695 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5697 struct port_info *pi = arg1;
5699 struct adapter *sc = pi->adapter;
5703 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5706 /* XXX: magic numbers */
5707 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5709 end_synchronized_op(sc, 0);
5715 rc = sysctl_handle_int(oidp, &v, 0, req);
5720 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5722 struct vi_info *vi = arg1;
5725 val = vi->rsrv_noflowq;
5726 rc = sysctl_handle_int(oidp, &val, 0, req);
5727 if (rc != 0 || req->newptr == NULL)
5730 if ((val >= 1) && (vi->ntxq > 1))
5731 vi->rsrv_noflowq = 1;
5733 vi->rsrv_noflowq = 0;
5739 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5741 struct vi_info *vi = arg1;
5742 struct adapter *sc = vi->pi->adapter;
5744 struct sge_rxq *rxq;
5746 struct sge_ofld_rxq *ofld_rxq;
5752 rc = sysctl_handle_int(oidp, &idx, 0, req);
5753 if (rc != 0 || req->newptr == NULL)
5756 if (idx < 0 || idx >= SGE_NTIMERS)
5759 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5764 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5765 for_each_rxq(vi, i, rxq) {
5766 #ifdef atomic_store_rel_8
5767 atomic_store_rel_8(&rxq->iq.intr_params, v);
5769 rxq->iq.intr_params = v;
5773 for_each_ofld_rxq(vi, i, ofld_rxq) {
5774 #ifdef atomic_store_rel_8
5775 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5777 ofld_rxq->iq.intr_params = v;
5783 end_synchronized_op(sc, LOCK_HELD);
5788 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5790 struct vi_info *vi = arg1;
5791 struct adapter *sc = vi->pi->adapter;
5796 rc = sysctl_handle_int(oidp, &idx, 0, req);
5797 if (rc != 0 || req->newptr == NULL)
5800 if (idx < -1 || idx >= SGE_NCOUNTERS)
5803 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5808 if (vi->flags & VI_INIT_DONE)
5809 rc = EBUSY; /* cannot be changed once the queues are created */
5813 end_synchronized_op(sc, LOCK_HELD);
5818 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5820 struct vi_info *vi = arg1;
5821 struct adapter *sc = vi->pi->adapter;
5824 qsize = vi->qsize_rxq;
5826 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5827 if (rc != 0 || req->newptr == NULL)
5830 if (qsize < 128 || (qsize & 7))
5833 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5838 if (vi->flags & VI_INIT_DONE)
5839 rc = EBUSY; /* cannot be changed once the queues are created */
5841 vi->qsize_rxq = qsize;
5843 end_synchronized_op(sc, LOCK_HELD);
5848 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5850 struct vi_info *vi = arg1;
5851 struct adapter *sc = vi->pi->adapter;
5854 qsize = vi->qsize_txq;
5856 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5857 if (rc != 0 || req->newptr == NULL)
5860 if (qsize < 128 || qsize > 65536)
5863 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5868 if (vi->flags & VI_INIT_DONE)
5869 rc = EBUSY; /* cannot be changed once the queues are created */
5871 vi->qsize_txq = qsize;
5873 end_synchronized_op(sc, LOCK_HELD);
5878 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5880 struct port_info *pi = arg1;
5881 struct adapter *sc = pi->adapter;
5882 struct link_config *lc = &pi->link_cfg;
5885 if (req->newptr == NULL) {
5887 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5889 rc = sysctl_wire_old_buffer(req, 0);
5893 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5897 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5898 rc = sbuf_finish(sb);
5904 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5907 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5913 if (s[0] < '0' || s[0] > '9')
5914 return (EINVAL); /* not a number */
5916 if (n & ~(PAUSE_TX | PAUSE_RX))
5917 return (EINVAL); /* some other bit is set too */
5919 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5923 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5924 int link_ok = lc->link_ok;
5926 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5927 lc->requested_fc |= n;
5928 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc);
5929 lc->link_ok = link_ok; /* restore */
5931 end_synchronized_op(sc, 0);
5938 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5940 struct adapter *sc = arg1;
5944 val = t4_read_reg64(sc, reg);
5946 return (sysctl_handle_64(oidp, &val, 0, req));
5950 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5952 struct adapter *sc = arg1;
5954 uint32_t param, val;
5956 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5959 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5960 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5961 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5962 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5963 end_synchronized_op(sc, 0);
5967 /* unknown is returned as 0 but we display -1 in that case */
5968 t = val == 0 ? -1 : val;
5970 rc = sysctl_handle_int(oidp, &t, 0, req);
5976 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5978 struct adapter *sc = arg1;
5981 uint16_t incr[NMTUS][NCCTRL_WIN];
5982 static const char *dec_fac[] = {
5983 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5987 rc = sysctl_wire_old_buffer(req, 0);
5991 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5995 t4_read_cong_tbl(sc, incr);
5997 for (i = 0; i < NCCTRL_WIN; ++i) {
5998 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5999 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6000 incr[5][i], incr[6][i], incr[7][i]);
6001 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6002 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6003 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6004 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6007 rc = sbuf_finish(sb);
6013 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6014 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
6015 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6016 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
6020 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6022 struct adapter *sc = arg1;
6024 int rc, i, n, qid = arg2;
6027 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
6029 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6030 ("%s: bad qid %d\n", __func__, qid));
6032 if (qid < CIM_NUM_IBQ) {
6035 n = 4 * CIM_IBQ_SIZE;
6036 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6037 rc = t4_read_cim_ibq(sc, qid, buf, n);
6039 /* outbound queue */
6042 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6043 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6044 rc = t4_read_cim_obq(sc, qid, buf, n);
6051 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6053 rc = sysctl_wire_old_buffer(req, 0);
6057 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6063 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6064 for (i = 0, p = buf; i < n; i += 16, p += 4)
6065 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6068 rc = sbuf_finish(sb);
6076 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6078 struct adapter *sc = arg1;
6084 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6088 rc = sysctl_wire_old_buffer(req, 0);
6092 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6096 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6099 rc = -t4_cim_read_la(sc, buf, NULL);
6103 sbuf_printf(sb, "Status Data PC%s",
6104 cfg & F_UPDBGLACAPTPCONLY ? "" :
6105 " LS0Stat LS0Addr LS0Data");
6107 KASSERT((sc->params.cim_la_size & 7) == 0,
6108 ("%s: p will walk off the end of buf", __func__));
6110 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
6111 if (cfg & F_UPDBGLACAPTPCONLY) {
6112 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
6114 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
6115 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6116 p[4] & 0xff, p[5] >> 8);
6117 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
6118 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6119 p[1] & 0xf, p[2] >> 4);
6122 "\n %02x %x%07x %x%07x %08x %08x "
6124 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6125 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6130 rc = sbuf_finish(sb);
6138 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6140 struct adapter *sc = arg1;
6146 rc = sysctl_wire_old_buffer(req, 0);
6150 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6154 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6157 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6160 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6161 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6165 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
6166 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6167 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
6168 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6169 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6170 (p[1] >> 2) | ((p[2] & 3) << 30),
6171 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6175 rc = sbuf_finish(sb);
6182 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6184 struct adapter *sc = arg1;
6190 rc = sysctl_wire_old_buffer(req, 0);
6194 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6198 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6201 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6204 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
6205 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
6206 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
6207 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6208 p[4], p[3], p[2], p[1], p[0]);
6211 sbuf_printf(sb, "\n\nCntl ID Data");
6212 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
6213 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
6214 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6217 rc = sbuf_finish(sb);
6224 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6226 struct adapter *sc = arg1;
6229 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6230 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6231 uint16_t thres[CIM_NUM_IBQ];
6232 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6233 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6234 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6237 cim_num_obq = CIM_NUM_OBQ;
6238 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6239 obq_rdaddr = A_UP_OBQ_0_REALADDR;
6241 cim_num_obq = CIM_NUM_OBQ_T5;
6242 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6243 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6245 nq = CIM_NUM_IBQ + cim_num_obq;
6247 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6249 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6253 t4_read_cimq_cfg(sc, base, size, thres);
6255 rc = sysctl_wire_old_buffer(req, 0);
6259 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6263 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
6265 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6266 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
6267 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6268 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6269 G_QUEREMFLITS(p[2]) * 16);
6270 for ( ; i < nq; i++, p += 4, wr += 2)
6271 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
6272 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6273 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6274 G_QUEREMFLITS(p[2]) * 16);
6276 rc = sbuf_finish(sb);
6283 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6285 struct adapter *sc = arg1;
6288 struct tp_cpl_stats stats;
6290 rc = sysctl_wire_old_buffer(req, 0);
6294 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6298 t4_tp_get_cpl_stats(sc, &stats);
6300 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6302 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
6303 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6304 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
6305 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6307 rc = sbuf_finish(sb);
6314 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6316 struct adapter *sc = arg1;
6319 struct tp_usm_stats stats;
6321 rc = sysctl_wire_old_buffer(req, 0);
6325 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6329 t4_get_usm_stats(sc, &stats);
6331 sbuf_printf(sb, "Frames: %u\n", stats.frames);
6332 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6333 sbuf_printf(sb, "Drops: %u", stats.drops);
6335 rc = sbuf_finish(sb);
6341 const char *devlog_level_strings[] = {
6342 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
6343 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
6344 [FW_DEVLOG_LEVEL_ERR] = "ERR",
6345 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
6346 [FW_DEVLOG_LEVEL_INFO] = "INFO",
6347 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
6350 const char *devlog_facility_strings[] = {
6351 [FW_DEVLOG_FACILITY_CORE] = "CORE",
6352 [FW_DEVLOG_FACILITY_CF] = "CF",
6353 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
6354 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
6355 [FW_DEVLOG_FACILITY_RES] = "RES",
6356 [FW_DEVLOG_FACILITY_HW] = "HW",
6357 [FW_DEVLOG_FACILITY_FLR] = "FLR",
6358 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
6359 [FW_DEVLOG_FACILITY_PHY] = "PHY",
6360 [FW_DEVLOG_FACILITY_MAC] = "MAC",
6361 [FW_DEVLOG_FACILITY_PORT] = "PORT",
6362 [FW_DEVLOG_FACILITY_VI] = "VI",
6363 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
6364 [FW_DEVLOG_FACILITY_ACL] = "ACL",
6365 [FW_DEVLOG_FACILITY_TM] = "TM",
6366 [FW_DEVLOG_FACILITY_QFC] = "QFC",
6367 [FW_DEVLOG_FACILITY_DCB] = "DCB",
6368 [FW_DEVLOG_FACILITY_ETH] = "ETH",
6369 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
6370 [FW_DEVLOG_FACILITY_RI] = "RI",
6371 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
6372 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
6373 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
6374 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
6378 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6380 struct adapter *sc = arg1;
6381 struct devlog_params *dparams = &sc->params.devlog;
6382 struct fw_devlog_e *buf, *e;
6383 int i, j, rc, nentries, first = 0, m;
6385 uint64_t ftstamp = UINT64_MAX;
6387 if (dparams->start == 0) {
6388 dparams->memtype = FW_MEMTYPE_EDC0;
6389 dparams->start = 0x84000;
6390 dparams->size = 32768;
6393 nentries = dparams->size / sizeof(struct fw_devlog_e);
6395 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6399 m = fwmtype_to_hwmtype(dparams->memtype);
6400 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
6404 for (i = 0; i < nentries; i++) {
6407 if (e->timestamp == 0)
6410 e->timestamp = be64toh(e->timestamp);
6411 e->seqno = be32toh(e->seqno);
6412 for (j = 0; j < 8; j++)
6413 e->params[j] = be32toh(e->params[j]);
6415 if (e->timestamp < ftstamp) {
6416 ftstamp = e->timestamp;
6421 if (buf[first].timestamp == 0)
6422 goto done; /* nothing in the log */
6424 rc = sysctl_wire_old_buffer(req, 0);
6428 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6433 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
6434 "Seq#", "Tstamp", "Level", "Facility", "Message");
6439 if (e->timestamp == 0)
6442 sbuf_printf(sb, "%10d %15ju %8s %8s ",
6443 e->seqno, e->timestamp,
6444 (e->level < nitems(devlog_level_strings) ?
6445 devlog_level_strings[e->level] : "UNKNOWN"),
6446 (e->facility < nitems(devlog_facility_strings) ?
6447 devlog_facility_strings[e->facility] : "UNKNOWN"));
6448 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6449 e->params[2], e->params[3], e->params[4],
6450 e->params[5], e->params[6], e->params[7]);
6452 if (++i == nentries)
6454 } while (i != first);
6456 rc = sbuf_finish(sb);
6464 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6466 struct adapter *sc = arg1;
6469 struct tp_fcoe_stats stats[4];
6471 rc = sysctl_wire_old_buffer(req, 0);
6475 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6479 t4_get_fcoe_stats(sc, 0, &stats[0]);
6480 t4_get_fcoe_stats(sc, 1, &stats[1]);
6481 t4_get_fcoe_stats(sc, 2, &stats[2]);
6482 t4_get_fcoe_stats(sc, 3, &stats[3]);
6484 sbuf_printf(sb, " channel 0 channel 1 "
6485 "channel 2 channel 3\n");
6486 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
6487 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
6488 stats[3].octetsDDP);
6489 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
6490 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
6491 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
6492 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
6493 stats[3].framesDrop);
6495 rc = sbuf_finish(sb);
6502 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6504 struct adapter *sc = arg1;
6507 unsigned int map, kbps, ipg, mode;
6508 unsigned int pace_tab[NTX_SCHED];
6510 rc = sysctl_wire_old_buffer(req, 0);
6514 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6518 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6519 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6520 t4_read_pace_tbl(sc, pace_tab);
6522 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
6523 "Class IPG (0.1 ns) Flow IPG (us)");
6525 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6526 t4_get_tx_sched(sc, i, &kbps, &ipg);
6527 sbuf_printf(sb, "\n %u %-5s %u ", i,
6528 (mode & (1 << i)) ? "flow" : "class", map & 3);
6530 sbuf_printf(sb, "%9u ", kbps);
6532 sbuf_printf(sb, " disabled ");
6535 sbuf_printf(sb, "%13u ", ipg);
6537 sbuf_printf(sb, " disabled ");
6540 sbuf_printf(sb, "%10u", pace_tab[i]);
6542 sbuf_printf(sb, " disabled");
6545 rc = sbuf_finish(sb);
6552 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6554 struct adapter *sc = arg1;
6558 struct lb_port_stats s[2];
6559 static const char *stat_name[] = {
6560 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6561 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6562 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
6563 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6564 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6565 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6566 "BG2FramesTrunc:", "BG3FramesTrunc:"
6569 rc = sysctl_wire_old_buffer(req, 0);
6573 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6577 memset(s, 0, sizeof(s));
6579 for (i = 0; i < 4; i += 2) {
6580 t4_get_lb_stats(sc, i, &s[0]);
6581 t4_get_lb_stats(sc, i + 1, &s[1]);
6585 sbuf_printf(sb, "%s Loopback %u"
6586 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6588 for (j = 0; j < nitems(stat_name); j++)
6589 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6593 rc = sbuf_finish(sb);
6600 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6603 struct port_info *pi = arg1;
6605 static const char *linkdnreasons[] = {
6606 "non-specific", "remote fault", "autoneg failed", "reserved3",
6607 "PHY overheated", "unknown", "rx los", "reserved7"
6610 rc = sysctl_wire_old_buffer(req, 0);
6613 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6617 if (pi->linkdnrc < 0)
6618 sbuf_printf(sb, "n/a");
6619 else if (pi->linkdnrc < nitems(linkdnreasons))
6620 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
6622 sbuf_printf(sb, "%d", pi->linkdnrc);
6624 rc = sbuf_finish(sb);
6637 mem_desc_cmp(const void *a, const void *b)
6639 return ((const struct mem_desc *)a)->base -
6640 ((const struct mem_desc *)b)->base;
6644 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6649 size = to - from + 1;
6653 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6654 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6658 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6660 struct adapter *sc = arg1;
6663 uint32_t lo, hi, used, alloc;
6664 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6665 static const char *region[] = {
6666 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6667 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6668 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6669 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6670 "RQUDP region:", "PBL region:", "TXPBL region:",
6671 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6674 struct mem_desc avail[4];
6675 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
6676 struct mem_desc *md = mem;
6678 rc = sysctl_wire_old_buffer(req, 0);
6682 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6686 for (i = 0; i < nitems(mem); i++) {
6691 /* Find and sort the populated memory ranges */
6693 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6694 if (lo & F_EDRAM0_ENABLE) {
6695 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6696 avail[i].base = G_EDRAM0_BASE(hi) << 20;
6697 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6701 if (lo & F_EDRAM1_ENABLE) {
6702 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6703 avail[i].base = G_EDRAM1_BASE(hi) << 20;
6704 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6708 if (lo & F_EXT_MEM_ENABLE) {
6709 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6710 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6711 avail[i].limit = avail[i].base +
6712 (G_EXT_MEM_SIZE(hi) << 20);
6713 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
6716 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
6717 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6718 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6719 avail[i].limit = avail[i].base +
6720 (G_EXT_MEM1_SIZE(hi) << 20);
6724 if (!i) /* no memory available */
6726 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6728 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6729 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6730 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6731 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6732 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6733 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6734 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6735 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6736 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6738 /* the next few have explicit upper bounds */
6739 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6740 md->limit = md->base - 1 +
6741 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6742 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6745 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6746 md->limit = md->base - 1 +
6747 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6748 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6751 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6752 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
6753 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6754 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
6757 md->idx = nitems(region); /* hide it */
6761 #define ulp_region(reg) \
6762 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6763 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6765 ulp_region(RX_ISCSI);
6766 ulp_region(RX_TDDP);
6768 ulp_region(RX_STAG);
6770 ulp_region(RX_RQUDP);
6776 md->idx = nitems(region);
6777 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
6778 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
6779 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
6780 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
6784 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6785 md->limit = md->base + sc->tids.ntids - 1;
6787 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6788 md->limit = md->base + sc->tids.ntids - 1;
6791 md->base = sc->vres.ocq.start;
6792 if (sc->vres.ocq.size)
6793 md->limit = md->base + sc->vres.ocq.size - 1;
6795 md->idx = nitems(region); /* hide it */
6798 /* add any address-space holes, there can be up to 3 */
6799 for (n = 0; n < i - 1; n++)
6800 if (avail[n].limit < avail[n + 1].base)
6801 (md++)->base = avail[n].limit;
6803 (md++)->base = avail[n].limit;
6806 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6808 for (lo = 0; lo < i; lo++)
6809 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6810 avail[lo].limit - 1);
6812 sbuf_printf(sb, "\n");
6813 for (i = 0; i < n; i++) {
6814 if (mem[i].idx >= nitems(region))
6815 continue; /* skip holes */
6817 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6818 mem_region_show(sb, region[mem[i].idx], mem[i].base,
6822 sbuf_printf(sb, "\n");
6823 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6824 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6825 mem_region_show(sb, "uP RAM:", lo, hi);
6827 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6828 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6829 mem_region_show(sb, "uP Extmem2:", lo, hi);
6831 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6832 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6834 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6835 (lo & F_PMRXNUMCHN) ? 2 : 1);
6837 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6838 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6839 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6841 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6842 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6843 sbuf_printf(sb, "%u p-structs\n",
6844 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6846 for (i = 0; i < 4; i++) {
6847 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6850 alloc = G_ALLOC(lo);
6852 used = G_T5_USED(lo);
6853 alloc = G_T5_ALLOC(lo);
6855 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6858 for (i = 0; i < 4; i++) {
6859 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6862 alloc = G_ALLOC(lo);
6864 used = G_T5_USED(lo);
6865 alloc = G_T5_ALLOC(lo);
6868 "\nLoopback %d using %u pages out of %u allocated",
6872 rc = sbuf_finish(sb);
6879 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6883 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6887 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6889 struct adapter *sc = arg1;
6893 rc = sysctl_wire_old_buffer(req, 0);
6897 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6902 "Idx Ethernet address Mask Vld Ports PF"
6903 " VF Replication P0 P1 P2 P3 ML");
6904 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6905 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6906 for (i = 0; i < n; i++) {
6907 uint64_t tcamx, tcamy, mask;
6908 uint32_t cls_lo, cls_hi;
6909 uint8_t addr[ETHER_ADDR_LEN];
6911 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6912 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6913 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6914 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6919 tcamxy2valmask(tcamx, tcamy, addr, &mask);
6920 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6921 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
6922 addr[3], addr[4], addr[5], (uintmax_t)mask,
6923 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6924 G_PORTMAP(cls_hi), G_PF(cls_lo),
6925 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6927 if (cls_lo & F_REPLICATE) {
6928 struct fw_ldst_cmd ldst_cmd;
6930 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6931 ldst_cmd.op_to_addrspace =
6932 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6933 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6934 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6935 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6936 ldst_cmd.u.mps.rplc.fid_idx =
6937 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6938 V_FW_LDST_CMD_IDX(i));
6940 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6944 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6945 sizeof(ldst_cmd), &ldst_cmd);
6946 end_synchronized_op(sc, 0);
6950 " ------------ error %3u ------------", rc);
6953 sbuf_printf(sb, " %08x %08x %08x %08x",
6954 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
6955 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
6956 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
6957 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
6960 sbuf_printf(sb, "%36s", "");
6962 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6963 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6964 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6968 (void) sbuf_finish(sb);
6970 rc = sbuf_finish(sb);
6977 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6979 struct adapter *sc = arg1;
6982 uint16_t mtus[NMTUS];
6984 rc = sysctl_wire_old_buffer(req, 0);
6988 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6992 t4_read_mtu_tbl(sc, mtus, NULL);
6994 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6995 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6996 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6997 mtus[14], mtus[15]);
6999 rc = sbuf_finish(sb);
7006 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7008 struct adapter *sc = arg1;
7011 uint32_t cnt[PM_NSTATS];
7012 uint64_t cyc[PM_NSTATS];
7013 static const char *rx_stats[] = {
7014 "Read:", "Write bypass:", "Write mem:", "Flush:"
7016 static const char *tx_stats[] = {
7017 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
7020 rc = sysctl_wire_old_buffer(req, 0);
7024 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7028 t4_pmtx_get_stats(sc, cnt, cyc);
7029 sbuf_printf(sb, " Tx pcmds Tx bytes");
7030 for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
7031 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
7034 t4_pmrx_get_stats(sc, cnt, cyc);
7035 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
7036 for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
7037 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
7040 rc = sbuf_finish(sb);
7047 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7049 struct adapter *sc = arg1;
7052 struct tp_rdma_stats stats;
7054 rc = sysctl_wire_old_buffer(req, 0);
7058 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7062 t4_tp_get_rdma_stats(sc, &stats);
7063 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7064 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7066 rc = sbuf_finish(sb);
7073 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7075 struct adapter *sc = arg1;
7078 struct tp_tcp_stats v4, v6;
7080 rc = sysctl_wire_old_buffer(req, 0);
7084 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7088 t4_tp_get_tcp_stats(sc, &v4, &v6);
7091 sbuf_printf(sb, "OutRsts: %20u %20u\n",
7092 v4.tcpOutRsts, v6.tcpOutRsts);
7093 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
7094 v4.tcpInSegs, v6.tcpInSegs);
7095 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
7096 v4.tcpOutSegs, v6.tcpOutSegs);
7097 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
7098 v4.tcpRetransSegs, v6.tcpRetransSegs);
7100 rc = sbuf_finish(sb);
7107 sysctl_tids(SYSCTL_HANDLER_ARGS)
7109 struct adapter *sc = arg1;
7112 struct tid_info *t = &sc->tids;
7114 rc = sysctl_wire_old_buffer(req, 0);
7118 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7123 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7128 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7129 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7132 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
7133 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7136 sbuf_printf(sb, "TID range: %u-%u",
7137 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
7141 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
7142 sbuf_printf(sb, ", in use: %u\n",
7143 atomic_load_acq_int(&t->tids_in_use));
7147 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7148 t->stid_base + t->nstids - 1, t->stids_in_use);
7152 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7153 t->ftid_base + t->nftids - 1);
7157 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7158 t->etid_base + t->netids - 1);
7161 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7162 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7163 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7165 rc = sbuf_finish(sb);
7172 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7174 struct adapter *sc = arg1;
7177 struct tp_err_stats stats;
7179 rc = sysctl_wire_old_buffer(req, 0);
7183 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7187 t4_tp_get_err_stats(sc, &stats);
7189 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
7191 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
7192 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
7193 stats.macInErrs[3]);
7194 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
7195 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
7196 stats.hdrInErrs[3]);
7197 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
7198 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
7199 stats.tcpInErrs[3]);
7200 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
7201 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
7202 stats.tcp6InErrs[3]);
7203 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
7204 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
7205 stats.tnlCongDrops[3]);
7206 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
7207 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
7208 stats.tnlTxDrops[3]);
7209 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
7210 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
7211 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
7212 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
7213 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
7214 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
7215 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
7216 stats.ofldNoNeigh, stats.ofldCongDefer);
7218 rc = sbuf_finish(sb);
7231 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7237 uint64_t mask = (1ULL << f->width) - 1;
7238 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7239 ((uintmax_t)v >> f->start) & mask);
7241 if (line_size + len >= 79) {
7243 sbuf_printf(sb, "\n ");
7245 sbuf_printf(sb, "%s ", buf);
7246 line_size += len + 1;
7249 sbuf_printf(sb, "\n");
7252 static struct field_desc tp_la0[] = {
7253 { "RcfOpCodeOut", 60, 4 },
7255 { "WcfState", 52, 4 },
7256 { "RcfOpcSrcOut", 50, 2 },
7257 { "CRxError", 49, 1 },
7258 { "ERxError", 48, 1 },
7259 { "SanityFailed", 47, 1 },
7260 { "SpuriousMsg", 46, 1 },
7261 { "FlushInputMsg", 45, 1 },
7262 { "FlushInputCpl", 44, 1 },
7263 { "RssUpBit", 43, 1 },
7264 { "RssFilterHit", 42, 1 },
7266 { "InitTcb", 31, 1 },
7267 { "LineNumber", 24, 7 },
7269 { "EdataOut", 22, 1 },
7271 { "CdataOut", 20, 1 },
7272 { "EreadPdu", 19, 1 },
7273 { "CreadPdu", 18, 1 },
7274 { "TunnelPkt", 17, 1 },
7275 { "RcfPeerFin", 16, 1 },
7276 { "RcfReasonOut", 12, 4 },
7277 { "TxCchannel", 10, 2 },
7278 { "RcfTxChannel", 8, 2 },
7279 { "RxEchannel", 6, 2 },
7280 { "RcfRxChannel", 5, 1 },
7281 { "RcfDataOutSrdy", 4, 1 },
7283 { "RxOoDvld", 2, 1 },
7284 { "RxCongestion", 1, 1 },
7285 { "TxCongestion", 0, 1 },
7289 static struct field_desc tp_la1[] = {
7290 { "CplCmdIn", 56, 8 },
7291 { "CplCmdOut", 48, 8 },
7292 { "ESynOut", 47, 1 },
7293 { "EAckOut", 46, 1 },
7294 { "EFinOut", 45, 1 },
7295 { "ERstOut", 44, 1 },
7300 { "DataIn", 39, 1 },
7301 { "DataInVld", 38, 1 },
7303 { "RxBufEmpty", 36, 1 },
7305 { "RxFbCongestion", 34, 1 },
7306 { "TxFbCongestion", 33, 1 },
7307 { "TxPktSumSrdy", 32, 1 },
7308 { "RcfUlpType", 28, 4 },
7310 { "Ebypass", 26, 1 },
7312 { "Static0", 24, 1 },
7314 { "Cbypass", 22, 1 },
7316 { "CPktOut", 20, 1 },
7317 { "RxPagePoolFull", 18, 2 },
7318 { "RxLpbkPkt", 17, 1 },
7319 { "TxLpbkPkt", 16, 1 },
7320 { "RxVfValid", 15, 1 },
7321 { "SynLearned", 14, 1 },
7322 { "SetDelEntry", 13, 1 },
7323 { "SetInvEntry", 12, 1 },
7324 { "CpcmdDvld", 11, 1 },
7325 { "CpcmdSave", 10, 1 },
7326 { "RxPstructsFull", 8, 2 },
7327 { "EpcmdDvld", 7, 1 },
7328 { "EpcmdFlush", 6, 1 },
7329 { "EpcmdTrimPrefix", 5, 1 },
7330 { "EpcmdTrimPostfix", 4, 1 },
7331 { "ERssIp4Pkt", 3, 1 },
7332 { "ERssIp6Pkt", 2, 1 },
7333 { "ERssTcpUdpPkt", 1, 1 },
7334 { "ERssFceFipPkt", 0, 1 },
7338 static struct field_desc tp_la2[] = {
7339 { "CplCmdIn", 56, 8 },
7340 { "MpsVfVld", 55, 1 },
7347 { "DataIn", 39, 1 },
7348 { "DataInVld", 38, 1 },
7350 { "RxBufEmpty", 36, 1 },
7352 { "RxFbCongestion", 34, 1 },
7353 { "TxFbCongestion", 33, 1 },
7354 { "TxPktSumSrdy", 32, 1 },
7355 { "RcfUlpType", 28, 4 },
7357 { "Ebypass", 26, 1 },
7359 { "Static0", 24, 1 },
7361 { "Cbypass", 22, 1 },
7363 { "CPktOut", 20, 1 },
7364 { "RxPagePoolFull", 18, 2 },
7365 { "RxLpbkPkt", 17, 1 },
7366 { "TxLpbkPkt", 16, 1 },
7367 { "RxVfValid", 15, 1 },
7368 { "SynLearned", 14, 1 },
7369 { "SetDelEntry", 13, 1 },
7370 { "SetInvEntry", 12, 1 },
7371 { "CpcmdDvld", 11, 1 },
7372 { "CpcmdSave", 10, 1 },
7373 { "RxPstructsFull", 8, 2 },
7374 { "EpcmdDvld", 7, 1 },
7375 { "EpcmdFlush", 6, 1 },
7376 { "EpcmdTrimPrefix", 5, 1 },
7377 { "EpcmdTrimPostfix", 4, 1 },
7378 { "ERssIp4Pkt", 3, 1 },
7379 { "ERssIp6Pkt", 2, 1 },
7380 { "ERssTcpUdpPkt", 1, 1 },
7381 { "ERssFceFipPkt", 0, 1 },
7386 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
7389 field_desc_show(sb, *p, tp_la0);
7393 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
7397 sbuf_printf(sb, "\n");
7398 field_desc_show(sb, p[0], tp_la0);
7399 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7400 field_desc_show(sb, p[1], tp_la0);
7404 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
7408 sbuf_printf(sb, "\n");
7409 field_desc_show(sb, p[0], tp_la0);
7410 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7411 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
7415 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
7417 struct adapter *sc = arg1;
7422 void (*show_func)(struct sbuf *, uint64_t *, int);
7424 rc = sysctl_wire_old_buffer(req, 0);
7428 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7432 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
7434 t4_tp_read_la(sc, buf, NULL);
7437 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7440 show_func = tp_la_show2;
7444 show_func = tp_la_show3;
7448 show_func = tp_la_show;
7451 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7452 (*show_func)(sb, p, i);
7454 rc = sbuf_finish(sb);
7461 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7463 struct adapter *sc = arg1;
7466 u64 nrate[NCHAN], orate[NCHAN];
7468 rc = sysctl_wire_old_buffer(req, 0);
7472 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7476 t4_get_chan_txrate(sc, nrate, orate);
7477 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
7479 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
7480 nrate[0], nrate[1], nrate[2], nrate[3]);
7481 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
7482 orate[0], orate[1], orate[2], orate[3]);
7484 rc = sbuf_finish(sb);
7491 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7493 struct adapter *sc = arg1;
7498 rc = sysctl_wire_old_buffer(req, 0);
7502 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7506 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7509 t4_ulprx_read_la(sc, buf);
7512 sbuf_printf(sb, " Pcmd Type Message"
7514 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7515 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
7516 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7519 rc = sbuf_finish(sb);
7526 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7528 struct adapter *sc = arg1;
7532 rc = sysctl_wire_old_buffer(req, 0);
7536 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7540 v = t4_read_reg(sc, A_SGE_STAT_CFG);
7541 if (G_STATSOURCE_T5(v) == 7) {
7542 if (G_STATMODE(v) == 0) {
7543 sbuf_printf(sb, "total %d, incomplete %d",
7544 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7545 t4_read_reg(sc, A_SGE_STAT_MATCH));
7546 } else if (G_STATMODE(v) == 1) {
7547 sbuf_printf(sb, "total %d, data overflow %d",
7548 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7549 t4_read_reg(sc, A_SGE_STAT_MATCH));
7552 rc = sbuf_finish(sb);
7560 fconf_to_mode(uint32_t fconf)
7564 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7565 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7567 if (fconf & F_FRAGMENTATION)
7568 mode |= T4_FILTER_IP_FRAGMENT;
7570 if (fconf & F_MPSHITTYPE)
7571 mode |= T4_FILTER_MPS_HIT_TYPE;
7573 if (fconf & F_MACMATCH)
7574 mode |= T4_FILTER_MAC_IDX;
7576 if (fconf & F_ETHERTYPE)
7577 mode |= T4_FILTER_ETH_TYPE;
7579 if (fconf & F_PROTOCOL)
7580 mode |= T4_FILTER_IP_PROTO;
7583 mode |= T4_FILTER_IP_TOS;
7586 mode |= T4_FILTER_VLAN;
7588 if (fconf & F_VNIC_ID)
7589 mode |= T4_FILTER_VNIC;
7592 mode |= T4_FILTER_PORT;
7595 mode |= T4_FILTER_FCoE;
7601 mode_to_fconf(uint32_t mode)
7605 if (mode & T4_FILTER_IP_FRAGMENT)
7606 fconf |= F_FRAGMENTATION;
7608 if (mode & T4_FILTER_MPS_HIT_TYPE)
7609 fconf |= F_MPSHITTYPE;
7611 if (mode & T4_FILTER_MAC_IDX)
7612 fconf |= F_MACMATCH;
7614 if (mode & T4_FILTER_ETH_TYPE)
7615 fconf |= F_ETHERTYPE;
7617 if (mode & T4_FILTER_IP_PROTO)
7618 fconf |= F_PROTOCOL;
7620 if (mode & T4_FILTER_IP_TOS)
7623 if (mode & T4_FILTER_VLAN)
7626 if (mode & T4_FILTER_VNIC)
7629 if (mode & T4_FILTER_PORT)
7632 if (mode & T4_FILTER_FCoE)
7639 fspec_to_fconf(struct t4_filter_specification *fs)
7643 if (fs->val.frag || fs->mask.frag)
7644 fconf |= F_FRAGMENTATION;
7646 if (fs->val.matchtype || fs->mask.matchtype)
7647 fconf |= F_MPSHITTYPE;
7649 if (fs->val.macidx || fs->mask.macidx)
7650 fconf |= F_MACMATCH;
7652 if (fs->val.ethtype || fs->mask.ethtype)
7653 fconf |= F_ETHERTYPE;
7655 if (fs->val.proto || fs->mask.proto)
7656 fconf |= F_PROTOCOL;
7658 if (fs->val.tos || fs->mask.tos)
7661 if (fs->val.vlan_vld || fs->mask.vlan_vld)
7664 if (fs->val.vnic_vld || fs->mask.vnic_vld)
7667 if (fs->val.iport || fs->mask.iport)
7670 if (fs->val.fcoe || fs->mask.fcoe)
7677 get_filter_mode(struct adapter *sc, uint32_t *mode)
7682 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7687 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
7690 if (sc->params.tp.vlan_pri_map != fconf) {
7691 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
7692 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
7696 *mode = fconf_to_mode(fconf);
7698 end_synchronized_op(sc, LOCK_HELD);
7703 set_filter_mode(struct adapter *sc, uint32_t mode)
7708 fconf = mode_to_fconf(mode);
7710 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7715 if (sc->tids.ftids_in_use > 0) {
7721 if (uld_active(sc, ULD_TOM)) {
7727 rc = -t4_set_filter_mode(sc, fconf);
7729 end_synchronized_op(sc, LOCK_HELD);
7733 static inline uint64_t
7734 get_filter_hits(struct adapter *sc, uint32_t fid)
7736 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7739 memwin_info(sc, 0, &mw_base, NULL);
7740 off = position_memwin(sc, 0,
7741 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7743 hits = t4_read_reg64(sc, mw_base + off + 16);
7744 hits = be64toh(hits);
7746 hits = t4_read_reg(sc, mw_base + off + 24);
7747 hits = be32toh(hits);
7754 get_filter(struct adapter *sc, struct t4_filter *t)
7756 int i, rc, nfilters = sc->tids.nftids;
7757 struct filter_entry *f;
7759 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7764 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7765 t->idx >= nfilters) {
7766 t->idx = 0xffffffff;
7770 f = &sc->tids.ftid_tab[t->idx];
7771 for (i = t->idx; i < nfilters; i++, f++) {
7774 t->l2tidx = f->l2t ? f->l2t->idx : 0;
7775 t->smtidx = f->smtidx;
7777 t->hits = get_filter_hits(sc, t->idx);
7779 t->hits = UINT64_MAX;
7786 t->idx = 0xffffffff;
7788 end_synchronized_op(sc, LOCK_HELD);
7793 set_filter(struct adapter *sc, struct t4_filter *t)
7795 unsigned int nfilters, nports;
7796 struct filter_entry *f;
7799 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7803 nfilters = sc->tids.nftids;
7804 nports = sc->params.nports;
7806 if (nfilters == 0) {
7811 if (!(sc->flags & FULL_INIT_DONE)) {
7816 if (t->idx >= nfilters) {
7821 /* Validate against the global filter mode */
7822 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7823 sc->params.tp.vlan_pri_map) {
7828 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7833 if (t->fs.val.iport >= nports) {
7838 /* Can't specify an iq if not steering to it */
7839 if (!t->fs.dirsteer && t->fs.iq) {
7844 /* IPv6 filter idx must be 4 aligned */
7845 if (t->fs.type == 1 &&
7846 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7851 if (sc->tids.ftid_tab == NULL) {
7852 KASSERT(sc->tids.ftids_in_use == 0,
7853 ("%s: no memory allocated but filters_in_use > 0",
7856 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7857 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7858 if (sc->tids.ftid_tab == NULL) {
7862 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7865 for (i = 0; i < 4; i++) {
7866 f = &sc->tids.ftid_tab[t->idx + i];
7868 if (f->pending || f->valid) {
7877 if (t->fs.type == 0)
7881 f = &sc->tids.ftid_tab[t->idx];
7884 rc = set_filter_wr(sc, t->idx);
7886 end_synchronized_op(sc, 0);
7889 mtx_lock(&sc->tids.ftid_lock);
7891 if (f->pending == 0) {
7892 rc = f->valid ? 0 : EIO;
7896 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7897 PCATCH, "t4setfw", 0)) {
7902 mtx_unlock(&sc->tids.ftid_lock);
7908 del_filter(struct adapter *sc, struct t4_filter *t)
7910 unsigned int nfilters;
7911 struct filter_entry *f;
7914 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7918 nfilters = sc->tids.nftids;
7920 if (nfilters == 0) {
7925 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7926 t->idx >= nfilters) {
7931 if (!(sc->flags & FULL_INIT_DONE)) {
7936 f = &sc->tids.ftid_tab[t->idx];
7948 t->fs = f->fs; /* extra info for the caller */
7949 rc = del_filter_wr(sc, t->idx);
7953 end_synchronized_op(sc, 0);
7956 mtx_lock(&sc->tids.ftid_lock);
7958 if (f->pending == 0) {
7959 rc = f->valid ? EIO : 0;
7963 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7964 PCATCH, "t4delfw", 0)) {
7969 mtx_unlock(&sc->tids.ftid_lock);
7976 clear_filter(struct filter_entry *f)
7979 t4_l2t_release(f->l2t);
7981 bzero(f, sizeof (*f));
7985 set_filter_wr(struct adapter *sc, int fidx)
7987 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7988 struct fw_filter_wr *fwr;
7990 struct wrq_cookie cookie;
7992 ASSERT_SYNCHRONIZED_OP(sc);
7994 if (f->fs.newdmac || f->fs.newvlan) {
7995 /* This filter needs an L2T entry; allocate one. */
7996 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7999 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8001 t4_l2t_release(f->l2t);
8007 ftid = sc->tids.ftid_base + fidx;
8009 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8012 bzero(fwr, sizeof(*fwr));
8014 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8015 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8017 htobe32(V_FW_FILTER_WR_TID(ftid) |
8018 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8019 V_FW_FILTER_WR_NOREPLY(0) |
8020 V_FW_FILTER_WR_IQ(f->fs.iq));
8021 fwr->del_filter_to_l2tix =
8022 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8023 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8024 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8025 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8026 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8027 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8028 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8029 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8030 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8031 f->fs.newvlan == VLAN_REWRITE) |
8032 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8033 f->fs.newvlan == VLAN_REWRITE) |
8034 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8035 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8036 V_FW_FILTER_WR_PRIO(f->fs.prio) |
8037 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8038 fwr->ethtype = htobe16(f->fs.val.ethtype);
8039 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8040 fwr->frag_to_ovlan_vldm =
8041 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
8042 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
8043 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
8044 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
8045 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
8046 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
8048 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
8049 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
8050 fwr->maci_to_matchtypem =
8051 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
8052 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
8053 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
8054 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
8055 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
8056 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
8057 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
8058 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
8059 fwr->ptcl = f->fs.val.proto;
8060 fwr->ptclm = f->fs.mask.proto;
8061 fwr->ttyp = f->fs.val.tos;
8062 fwr->ttypm = f->fs.mask.tos;
8063 fwr->ivlan = htobe16(f->fs.val.vlan);
8064 fwr->ivlanm = htobe16(f->fs.mask.vlan);
8065 fwr->ovlan = htobe16(f->fs.val.vnic);
8066 fwr->ovlanm = htobe16(f->fs.mask.vnic);
8067 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
8068 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
8069 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
8070 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
8071 fwr->lp = htobe16(f->fs.val.dport);
8072 fwr->lpm = htobe16(f->fs.mask.dport);
8073 fwr->fp = htobe16(f->fs.val.sport);
8074 fwr->fpm = htobe16(f->fs.mask.sport);
8076 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
8079 sc->tids.ftids_in_use++;
8081 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8086 del_filter_wr(struct adapter *sc, int fidx)
8088 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8089 struct fw_filter_wr *fwr;
8091 struct wrq_cookie cookie;
8093 ftid = sc->tids.ftid_base + fidx;
8095 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8098 bzero(fwr, sizeof (*fwr));
8100 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
8103 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8108 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8110 struct adapter *sc = iq->adapter;
8111 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
8112 unsigned int idx = GET_TID(rpl);
8114 struct filter_entry *f;
8116 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
8119 if (is_ftid(sc, idx)) {
8121 idx -= sc->tids.ftid_base;
8122 f = &sc->tids.ftid_tab[idx];
8123 rc = G_COOKIE(rpl->cookie);
8125 mtx_lock(&sc->tids.ftid_lock);
8126 if (rc == FW_FILTER_WR_FLT_ADDED) {
8127 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
8129 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
8130 f->pending = 0; /* asynchronous setup completed */
8133 if (rc != FW_FILTER_WR_FLT_DELETED) {
8134 /* Add or delete failed, display an error */
8136 "filter %u setup failed with error %u\n",
8141 sc->tids.ftids_in_use--;
8143 wakeup(&sc->tids.ftid_tab);
8144 mtx_unlock(&sc->tids.ftid_lock);
8151 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8155 if (cntxt->cid > M_CTXTQID)
8158 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8159 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8162 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8166 if (sc->flags & FW_OK) {
8167 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8174 * Read via firmware failed or wasn't even attempted. Read directly via
8177 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8179 end_synchronized_op(sc, 0);
8184 load_fw(struct adapter *sc, struct t4_data *fw)
8189 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8193 if (sc->flags & FULL_INIT_DONE) {
8198 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8199 if (fw_data == NULL) {
8204 rc = copyin(fw->data, fw_data, fw->len);
8206 rc = -t4_load_fw(sc, fw_data, fw->len);
8208 free(fw_data, M_CXGBE);
8210 end_synchronized_op(sc, 0);
8215 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
8217 uint32_t addr, off, remaining, i, n;
8219 uint32_t mw_base, mw_aperture;
8223 rc = validate_mem_range(sc, mr->addr, mr->len);
8227 memwin_info(sc, win, &mw_base, &mw_aperture);
8228 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
8230 remaining = mr->len;
8231 dst = (void *)mr->data;
8234 off = position_memwin(sc, win, addr);
8236 /* number of bytes that we'll copy in the inner loop */
8237 n = min(remaining, mw_aperture - off);
8238 for (i = 0; i < n; i += 4)
8239 *b++ = t4_read_reg(sc, mw_base + off + i);
8241 rc = copyout(buf, dst, n);
8256 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
8260 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
8263 if (i2cd->len > sizeof(i2cd->data))
8266 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
8269 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
8270 i2cd->offset, i2cd->len, &i2cd->data[0]);
8271 end_synchronized_op(sc, 0);
8277 in_range(int val, int lo, int hi)
8280 return (val < 0 || (val <= hi && val >= lo));
8284 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
8286 int fw_subcmd, fw_type, rc;
8288 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
8292 if (!(sc->flags & FULL_INIT_DONE)) {
8298 * Translate the cxgbetool parameters into T4 firmware parameters. (The
8299 * sub-command and type are in common locations.)
8301 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
8302 fw_subcmd = FW_SCHED_SC_CONFIG;
8303 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
8304 fw_subcmd = FW_SCHED_SC_PARAMS;
8309 if (p->type == SCHED_CLASS_TYPE_PACKET)
8310 fw_type = FW_SCHED_TYPE_PKTSCHED;
8316 if (fw_subcmd == FW_SCHED_SC_CONFIG) {
8317 /* Vet our parameters ..*/
8318 if (p->u.config.minmax < 0) {
8323 /* And pass the request to the firmware ...*/
8324 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
8328 if (fw_subcmd == FW_SCHED_SC_PARAMS) {
8334 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
8335 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
8336 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
8337 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
8338 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
8339 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
8345 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
8346 fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
8347 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
8348 fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
8354 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
8355 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
8356 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
8357 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
8363 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
8364 fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
8365 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
8366 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
8372 /* Vet our parameters ... */
8373 if (!in_range(p->u.params.channel, 0, 3) ||
8374 !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
8375 !in_range(p->u.params.minrate, 0, 10000000) ||
8376 !in_range(p->u.params.maxrate, 0, 10000000) ||
8377 !in_range(p->u.params.weight, 0, 100)) {
8383 * Translate any unset parameters into the firmware's
8384 * nomenclature and/or fail the call if the parameters
8387 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
8388 p->u.params.channel < 0 || p->u.params.cl < 0) {
8392 if (p->u.params.minrate < 0)
8393 p->u.params.minrate = 0;
8394 if (p->u.params.maxrate < 0) {
8395 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
8396 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
8400 p->u.params.maxrate = 0;
8402 if (p->u.params.weight < 0) {
8403 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
8407 p->u.params.weight = 0;
8409 if (p->u.params.pktsize < 0) {
8410 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
8411 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
8415 p->u.params.pktsize = 0;
8418 /* See what the firmware thinks of the request ... */
8419 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
8420 fw_rateunit, fw_ratemode, p->u.params.channel,
8421 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
8422 p->u.params.weight, p->u.params.pktsize, 1);
8428 end_synchronized_op(sc, 0);
8433 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
8435 struct port_info *pi = NULL;
8437 struct sge_txq *txq;
8438 uint32_t fw_mnem, fw_queue, fw_class;
8441 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
8445 if (!(sc->flags & FULL_INIT_DONE)) {
8450 if (p->port >= sc->params.nports) {
8455 /* XXX: Only supported for the main VI. */
8456 pi = sc->port[p->port];
8458 if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
8464 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
8465 * Scheduling Class in this case).
8467 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
8468 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
8469 fw_class = p->cl < 0 ? 0xffffffff : p->cl;
8472 * If op.queue is non-negative, then we're only changing the scheduling
8473 * on a single specified TX queue.
8475 if (p->queue >= 0) {
8476 txq = &sc->sge.txq[vi->first_txq + p->queue];
8477 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8478 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8484 * Change the scheduling on all the TX queues for the
8487 for_each_txq(vi, i, txq) {
8488 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8489 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8497 end_synchronized_op(sc, 0);
8502 t4_os_find_pci_capability(struct adapter *sc, int cap)
8506 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8510 t4_os_pci_save_state(struct adapter *sc)
8513 struct pci_devinfo *dinfo;
8516 dinfo = device_get_ivars(dev);
8518 pci_cfg_save(dev, dinfo, 0);
8523 t4_os_pci_restore_state(struct adapter *sc)
8526 struct pci_devinfo *dinfo;
8529 dinfo = device_get_ivars(dev);
8531 pci_cfg_restore(dev, dinfo);
8536 t4_os_portmod_changed(const struct adapter *sc, int idx)
8538 struct port_info *pi = sc->port[idx];
8542 static const char *mod_str[] = {
8543 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8546 for_each_vi(pi, v, vi) {
8547 build_medialist(pi, &vi->media);
8550 ifp = pi->vi[0].ifp;
8551 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8552 if_printf(ifp, "transceiver unplugged.\n");
8553 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8554 if_printf(ifp, "unknown transceiver inserted.\n");
8555 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8556 if_printf(ifp, "unsupported transceiver inserted.\n");
8557 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8558 if_printf(ifp, "%s transceiver inserted.\n",
8559 mod_str[pi->mod_type]);
8561 if_printf(ifp, "transceiver (type %d) inserted.\n",
8567 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8569 struct port_info *pi = sc->port[idx];
8578 pi->linkdnrc = reason;
8580 for_each_vi(pi, v, vi) {
8586 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8587 if_link_state_change(ifp, LINK_STATE_UP);
8589 if_link_state_change(ifp, LINK_STATE_DOWN);
8595 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8599 sx_slock(&t4_list_lock);
8600 SLIST_FOREACH(sc, &t4_list, link) {
8602 * func should not make any assumptions about what state sc is
8603 * in - the only guarantee is that sc->sc_lock is a valid lock.
8607 sx_sunlock(&t4_list_lock);
8611 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
8617 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
8623 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
8627 struct adapter *sc = dev->si_drv1;
8629 rc = priv_check(td, PRIV_DRIVER);
8634 case CHELSIO_T4_GETREG: {
8635 struct t4_reg *edata = (struct t4_reg *)data;
8637 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8640 if (edata->size == 4)
8641 edata->val = t4_read_reg(sc, edata->addr);
8642 else if (edata->size == 8)
8643 edata->val = t4_read_reg64(sc, edata->addr);
8649 case CHELSIO_T4_SETREG: {
8650 struct t4_reg *edata = (struct t4_reg *)data;
8652 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8655 if (edata->size == 4) {
8656 if (edata->val & 0xffffffff00000000)
8658 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
8659 } else if (edata->size == 8)
8660 t4_write_reg64(sc, edata->addr, edata->val);
8665 case CHELSIO_T4_REGDUMP: {
8666 struct t4_regdump *regs = (struct t4_regdump *)data;
8667 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
8670 if (regs->len < reglen) {
8671 regs->len = reglen; /* hint to the caller */
8676 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
8677 t4_get_regs(sc, regs, buf);
8678 rc = copyout(buf, regs->data, reglen);
8682 case CHELSIO_T4_GET_FILTER_MODE:
8683 rc = get_filter_mode(sc, (uint32_t *)data);
8685 case CHELSIO_T4_SET_FILTER_MODE:
8686 rc = set_filter_mode(sc, *(uint32_t *)data);
8688 case CHELSIO_T4_GET_FILTER:
8689 rc = get_filter(sc, (struct t4_filter *)data);
8691 case CHELSIO_T4_SET_FILTER:
8692 rc = set_filter(sc, (struct t4_filter *)data);
8694 case CHELSIO_T4_DEL_FILTER:
8695 rc = del_filter(sc, (struct t4_filter *)data);
8697 case CHELSIO_T4_GET_SGE_CONTEXT:
8698 rc = get_sge_context(sc, (struct t4_sge_context *)data);
8700 case CHELSIO_T4_LOAD_FW:
8701 rc = load_fw(sc, (struct t4_data *)data);
8703 case CHELSIO_T4_GET_MEM:
8704 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
8706 case CHELSIO_T4_GET_I2C:
8707 rc = read_i2c(sc, (struct t4_i2c_data *)data);
8709 case CHELSIO_T4_CLEAR_STATS: {
8711 u_int port_id = *(uint32_t *)data;
8712 struct port_info *pi;
8715 if (port_id >= sc->params.nports)
8717 pi = sc->port[port_id];
8720 t4_clr_port_stats(sc, pi->tx_chan);
8721 pi->tx_parse_error = 0;
8722 mtx_lock(&sc->regwin_lock);
8723 for_each_vi(pi, v, vi) {
8724 if (vi->flags & VI_INIT_DONE)
8725 t4_clr_vi_stats(sc, vi->viid);
8727 mtx_unlock(&sc->regwin_lock);
8730 * Since this command accepts a port, clear stats for
8731 * all VIs on this port.
8733 for_each_vi(pi, v, vi) {
8734 if (vi->flags & VI_INIT_DONE) {
8735 struct sge_rxq *rxq;
8736 struct sge_txq *txq;
8737 struct sge_wrq *wrq;
8739 if (vi->flags & VI_NETMAP)
8742 for_each_rxq(vi, i, rxq) {
8743 #if defined(INET) || defined(INET6)
8744 rxq->lro.lro_queued = 0;
8745 rxq->lro.lro_flushed = 0;
8748 rxq->vlan_extraction = 0;
8751 for_each_txq(vi, i, txq) {
8754 txq->vlan_insertion = 0;
8758 txq->txpkts0_wrs = 0;
8759 txq->txpkts1_wrs = 0;
8760 txq->txpkts0_pkts = 0;
8761 txq->txpkts1_pkts = 0;
8762 mp_ring_reset_stats(txq->r);
8766 /* nothing to clear for each ofld_rxq */
8768 for_each_ofld_txq(vi, i, wrq) {
8769 wrq->tx_wrs_direct = 0;
8770 wrq->tx_wrs_copied = 0;
8774 if (IS_MAIN_VI(vi)) {
8775 wrq = &sc->sge.ctrlq[pi->port_id];
8776 wrq->tx_wrs_direct = 0;
8777 wrq->tx_wrs_copied = 0;
8783 case CHELSIO_T4_SCHED_CLASS:
8784 rc = set_sched_class(sc, (struct t4_sched_params *)data);
8786 case CHELSIO_T4_SCHED_QUEUE:
8787 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8789 case CHELSIO_T4_GET_TRACER:
8790 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8792 case CHELSIO_T4_SET_TRACER:
8793 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8804 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8805 const unsigned int *pgsz_order)
8807 struct vi_info *vi = ifp->if_softc;
8808 struct adapter *sc = vi->pi->adapter;
8810 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8811 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8812 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8813 V_HPZ3(pgsz_order[3]));
8817 toe_capability(struct vi_info *vi, int enable)
8820 struct port_info *pi = vi->pi;
8821 struct adapter *sc = pi->adapter;
8823 ASSERT_SYNCHRONIZED_OP(sc);
8825 if (!is_offload(sc))
8829 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
8830 /* TOE is already enabled. */
8835 * We need the port's queues around so that we're able to send
8836 * and receive CPLs to/from the TOE even if the ifnet for this
8837 * port has never been UP'd administratively.
8839 if (!(vi->flags & VI_INIT_DONE)) {
8840 rc = cxgbe_init_synchronized(vi);
8844 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
8845 rc = cxgbe_init_synchronized(&pi->vi[0]);
8850 if (isset(&sc->offload_map, pi->port_id)) {
8851 /* TOE is enabled on another VI of this port. */
8856 if (!uld_active(sc, ULD_TOM)) {
8857 rc = t4_activate_uld(sc, ULD_TOM);
8860 "You must kldload t4_tom.ko before trying "
8861 "to enable TOE on a cxgbe interface.\n");
8865 KASSERT(sc->tom_softc != NULL,
8866 ("%s: TOM activated but softc NULL", __func__));
8867 KASSERT(uld_active(sc, ULD_TOM),
8868 ("%s: TOM activated but flag not set", __func__));
8871 /* Activate iWARP and iSCSI too, if the modules are loaded. */
8872 if (!uld_active(sc, ULD_IWARP))
8873 (void) t4_activate_uld(sc, ULD_IWARP);
8874 if (!uld_active(sc, ULD_ISCSI))
8875 (void) t4_activate_uld(sc, ULD_ISCSI);
8878 setbit(&sc->offload_map, pi->port_id);
8882 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
8885 KASSERT(uld_active(sc, ULD_TOM),
8886 ("%s: TOM never initialized?", __func__));
8887 clrbit(&sc->offload_map, pi->port_id);
8894 * Add an upper layer driver to the global list.
8897 t4_register_uld(struct uld_info *ui)
8902 sx_xlock(&t4_uld_list_lock);
8903 SLIST_FOREACH(u, &t4_uld_list, link) {
8904 if (u->uld_id == ui->uld_id) {
8910 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8913 sx_xunlock(&t4_uld_list_lock);
8918 t4_unregister_uld(struct uld_info *ui)
8923 sx_xlock(&t4_uld_list_lock);
8925 SLIST_FOREACH(u, &t4_uld_list, link) {
8927 if (ui->refcount > 0) {
8932 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8938 sx_xunlock(&t4_uld_list_lock);
8943 t4_activate_uld(struct adapter *sc, int id)
8946 struct uld_info *ui;
8948 ASSERT_SYNCHRONIZED_OP(sc);
8950 if (id < 0 || id > ULD_MAX)
8952 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
8954 sx_slock(&t4_uld_list_lock);
8956 SLIST_FOREACH(ui, &t4_uld_list, link) {
8957 if (ui->uld_id == id) {
8958 if (!(sc->flags & FULL_INIT_DONE)) {
8959 rc = adapter_full_init(sc);
8964 rc = ui->activate(sc);
8966 setbit(&sc->active_ulds, id);
8973 sx_sunlock(&t4_uld_list_lock);
8979 t4_deactivate_uld(struct adapter *sc, int id)
8982 struct uld_info *ui;
8984 ASSERT_SYNCHRONIZED_OP(sc);
8986 if (id < 0 || id > ULD_MAX)
8990 sx_slock(&t4_uld_list_lock);
8992 SLIST_FOREACH(ui, &t4_uld_list, link) {
8993 if (ui->uld_id == id) {
8994 rc = ui->deactivate(sc);
8996 clrbit(&sc->active_ulds, id);
9003 sx_sunlock(&t4_uld_list_lock);
9009 uld_active(struct adapter *sc, int uld_id)
9012 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9014 return (isset(&sc->active_ulds, uld_id));
9019 * Come up with reasonable defaults for some of the tunables, provided they're
9020 * not set by the user (in which case we'll use the values as is).
9023 tweak_tunables(void)
9025 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
9027 if (t4_ntxq10g < 1) {
9029 t4_ntxq10g = rss_getnumbuckets();
9031 t4_ntxq10g = min(nc, NTXQ_10G);
9035 if (t4_ntxq1g < 1) {
9037 /* XXX: way too many for 1GbE? */
9038 t4_ntxq1g = rss_getnumbuckets();
9040 t4_ntxq1g = min(nc, NTXQ_1G);
9044 if (t4_nrxq10g < 1) {
9046 t4_nrxq10g = rss_getnumbuckets();
9048 t4_nrxq10g = min(nc, NRXQ_10G);
9052 if (t4_nrxq1g < 1) {
9054 /* XXX: way too many for 1GbE? */
9055 t4_nrxq1g = rss_getnumbuckets();
9057 t4_nrxq1g = min(nc, NRXQ_1G);
9062 if (t4_nofldtxq10g < 1)
9063 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
9065 if (t4_nofldtxq1g < 1)
9066 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
9068 if (t4_nofldrxq10g < 1)
9069 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
9071 if (t4_nofldrxq1g < 1)
9072 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
9074 if (t4_toecaps_allowed == -1)
9075 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9077 if (t4_toecaps_allowed == -1)
9078 t4_toecaps_allowed = 0;
9082 if (t4_nnmtxq10g < 1)
9083 t4_nnmtxq10g = min(nc, NNMTXQ_10G);
9085 if (t4_nnmtxq1g < 1)
9086 t4_nnmtxq1g = min(nc, NNMTXQ_1G);
9088 if (t4_nnmrxq10g < 1)
9089 t4_nnmrxq10g = min(nc, NNMRXQ_10G);
9091 if (t4_nnmrxq1g < 1)
9092 t4_nnmrxq1g = min(nc, NNMRXQ_1G);
9095 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
9096 t4_tmr_idx_10g = TMR_IDX_10G;
9098 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
9099 t4_pktc_idx_10g = PKTC_IDX_10G;
9101 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
9102 t4_tmr_idx_1g = TMR_IDX_1G;
9104 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
9105 t4_pktc_idx_1g = PKTC_IDX_1G;
9107 if (t4_qsize_txq < 128)
9110 if (t4_qsize_rxq < 128)
9112 while (t4_qsize_rxq & 7)
9115 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9118 static struct sx mlu; /* mod load unload */
9119 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
9122 mod_event(module_t mod, int cmd, void *arg)
9125 static int loaded = 0;
9130 if (loaded++ == 0) {
9132 sx_init(&t4_list_lock, "T4/T5 adapters");
9133 SLIST_INIT(&t4_list);
9135 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
9136 SLIST_INIT(&t4_uld_list);
9138 t4_tracer_modload();
9146 if (--loaded == 0) {
9149 sx_slock(&t4_list_lock);
9150 if (!SLIST_EMPTY(&t4_list)) {
9152 sx_sunlock(&t4_list_lock);
9156 sx_slock(&t4_uld_list_lock);
9157 if (!SLIST_EMPTY(&t4_uld_list)) {
9159 sx_sunlock(&t4_uld_list_lock);
9160 sx_sunlock(&t4_list_lock);
9165 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
9166 uprintf("%ju clusters with custom free routine "
9167 "still is use.\n", t4_sge_extfree_refs());
9168 pause("t4unload", 2 * hz);
9171 sx_sunlock(&t4_uld_list_lock);
9173 sx_sunlock(&t4_list_lock);
9175 if (t4_sge_extfree_refs() == 0) {
9176 t4_tracer_modunload();
9178 sx_destroy(&t4_uld_list_lock);
9180 sx_destroy(&t4_list_lock);
9185 loaded++; /* undo earlier decrement */
9196 static devclass_t t4_devclass, t5_devclass;
9197 static devclass_t cxgbe_devclass, cxl_devclass;
9198 static devclass_t vcxgbe_devclass, vcxl_devclass;
9200 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
9201 MODULE_VERSION(t4nex, 1);
9202 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
9204 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
9205 #endif /* DEV_NETMAP */
9208 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
9209 MODULE_VERSION(t5nex, 1);
9210 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
9212 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
9213 #endif /* DEV_NETMAP */
9215 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
9216 MODULE_VERSION(cxgbe, 1);
9218 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
9219 MODULE_VERSION(cxl, 1);
9221 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
9222 MODULE_VERSION(vcxgbe, 1);
9224 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
9225 MODULE_VERSION(vcxl, 1);