2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pci_private.h>
49 #include <sys/firmware.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <net/ethernet.h>
57 #include <net/if_types.h>
58 #include <net/if_dl.h>
59 #include <net/if_vlan_var.h>
61 #include <net/rss_config.h>
63 #if defined(__i386__) || defined(__amd64__)
69 #include <ddb/db_lex.h>
72 #include "common/common.h"
73 #include "common/t4_msg.h"
74 #include "common/t4_regs.h"
75 #include "common/t4_regs_values.h"
78 #include "t4_mp_ring.h"
81 /* T4 bus driver interface */
82 static int t4_probe(device_t);
83 static int t4_attach(device_t);
84 static int t4_detach(device_t);
85 static int t4_ready(device_t);
86 static int t4_read_port_device(device_t, int, device_t *);
87 static device_method_t t4_methods[] = {
88 DEVMETHOD(device_probe, t4_probe),
89 DEVMETHOD(device_attach, t4_attach),
90 DEVMETHOD(device_detach, t4_detach),
92 DEVMETHOD(t4_is_main_ready, t4_ready),
93 DEVMETHOD(t4_read_port_device, t4_read_port_device),
97 static driver_t t4_driver = {
100 sizeof(struct adapter)
104 /* T4 port (cxgbe) interface */
105 static int cxgbe_probe(device_t);
106 static int cxgbe_attach(device_t);
107 static int cxgbe_detach(device_t);
108 device_method_t cxgbe_methods[] = {
109 DEVMETHOD(device_probe, cxgbe_probe),
110 DEVMETHOD(device_attach, cxgbe_attach),
111 DEVMETHOD(device_detach, cxgbe_detach),
114 static driver_t cxgbe_driver = {
117 sizeof(struct port_info)
120 /* T4 VI (vcxgbe) interface */
121 static int vcxgbe_probe(device_t);
122 static int vcxgbe_attach(device_t);
123 static int vcxgbe_detach(device_t);
124 static device_method_t vcxgbe_methods[] = {
125 DEVMETHOD(device_probe, vcxgbe_probe),
126 DEVMETHOD(device_attach, vcxgbe_attach),
127 DEVMETHOD(device_detach, vcxgbe_detach),
130 static driver_t vcxgbe_driver = {
133 sizeof(struct vi_info)
136 static d_ioctl_t t4_ioctl;
138 static struct cdevsw t4_cdevsw = {
139 .d_version = D_VERSION,
144 /* T5 bus driver interface */
145 static int t5_probe(device_t);
146 static device_method_t t5_methods[] = {
147 DEVMETHOD(device_probe, t5_probe),
148 DEVMETHOD(device_attach, t4_attach),
149 DEVMETHOD(device_detach, t4_detach),
151 DEVMETHOD(t4_is_main_ready, t4_ready),
152 DEVMETHOD(t4_read_port_device, t4_read_port_device),
156 static driver_t t5_driver = {
159 sizeof(struct adapter)
163 /* T5 port (cxl) interface */
164 static driver_t cxl_driver = {
167 sizeof(struct port_info)
170 /* T5 VI (vcxl) interface */
171 static driver_t vcxl_driver = {
174 sizeof(struct vi_info)
177 /* T6 bus driver interface */
178 static int t6_probe(device_t);
179 static device_method_t t6_methods[] = {
180 DEVMETHOD(device_probe, t6_probe),
181 DEVMETHOD(device_attach, t4_attach),
182 DEVMETHOD(device_detach, t4_detach),
184 DEVMETHOD(t4_is_main_ready, t4_ready),
185 DEVMETHOD(t4_read_port_device, t4_read_port_device),
189 static driver_t t6_driver = {
192 sizeof(struct adapter)
196 /* T6 port (cc) interface */
197 static driver_t cc_driver = {
200 sizeof(struct port_info)
203 /* T6 VI (vcc) interface */
204 static driver_t vcc_driver = {
207 sizeof(struct vi_info)
210 /* ifnet + media interface */
211 static void cxgbe_init(void *);
212 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
213 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
214 static void cxgbe_qflush(struct ifnet *);
215 static int cxgbe_media_change(struct ifnet *);
216 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
218 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
221 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
222 * then ADAPTER_LOCK, then t4_uld_list_lock.
224 static struct sx t4_list_lock;
225 SLIST_HEAD(, adapter) t4_list;
227 static struct sx t4_uld_list_lock;
228 SLIST_HEAD(, uld_info) t4_uld_list;
232 * Tunables. See tweak_tunables() too.
234 * Each tunable is set to a default value here if it's known at compile-time.
235 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
236 * provide a reasonable default (upto n) when the driver is loaded.
238 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
239 * T5 are under hw.cxl.
243 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
246 int t4_ntxq10g = -NTXQ_10G;
247 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
250 int t4_nrxq10g = -NRXQ_10G;
251 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
254 int t4_ntxq1g = -NTXQ_1G;
255 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
258 int t4_nrxq1g = -NRXQ_1G;
259 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
262 static int t4_ntxq_vi = -NTXQ_VI;
263 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
266 static int t4_nrxq_vi = -NRXQ_VI;
267 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
269 static int t4_rsrv_noflowq = 0;
270 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
273 #define NOFLDTXQ_10G 8
274 static int t4_nofldtxq10g = -NOFLDTXQ_10G;
275 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
277 #define NOFLDRXQ_10G 2
278 static int t4_nofldrxq10g = -NOFLDRXQ_10G;
279 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
281 #define NOFLDTXQ_1G 2
282 static int t4_nofldtxq1g = -NOFLDTXQ_1G;
283 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
285 #define NOFLDRXQ_1G 1
286 static int t4_nofldrxq1g = -NOFLDRXQ_1G;
287 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
289 #define NOFLDTXQ_VI 1
290 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
291 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
293 #define NOFLDRXQ_VI 1
294 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
295 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
300 static int t4_nnmtxq_vi = -NNMTXQ_VI;
301 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
304 static int t4_nnmrxq_vi = -NNMRXQ_VI;
305 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
309 * Holdoff parameters for 10G and 1G ports.
311 #define TMR_IDX_10G 1
312 int t4_tmr_idx_10g = TMR_IDX_10G;
313 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
315 #define PKTC_IDX_10G (-1)
316 int t4_pktc_idx_10g = PKTC_IDX_10G;
317 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
320 int t4_tmr_idx_1g = TMR_IDX_1G;
321 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
323 #define PKTC_IDX_1G (-1)
324 int t4_pktc_idx_1g = PKTC_IDX_1G;
325 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
328 * Size (# of entries) of each tx and rx queue.
330 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
331 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
333 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
334 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
337 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
339 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
340 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
343 * Configuration file.
345 #define DEFAULT_CF "default"
346 #define FLASH_CF "flash"
347 #define UWIRE_CF "uwire"
348 #define FPGA_CF "fpga"
349 static char t4_cfg_file[32] = DEFAULT_CF;
350 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
353 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
354 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
355 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
356 * mark or when signalled to do so, 0 to never emit PAUSE.
358 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
359 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
362 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
363 * FEC_RESERVED respectively).
364 * -1 to run with the firmware default.
367 static int t4_fec = -1;
368 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
371 * Link autonegotiation.
372 * -1 to run with the firmware default.
376 static int t4_autoneg = -1;
377 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
380 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
381 * encouraged respectively).
383 static unsigned int t4_fw_install = 1;
384 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
387 * ASIC features that will be used. Disable the ones you don't want so that the
388 * chip resources aren't wasted on features that will not be used.
390 static int t4_nbmcaps_allowed = 0;
391 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
393 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
394 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
396 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
397 FW_CAPS_CONFIG_SWITCH_EGRESS;
398 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
400 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
401 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
403 static int t4_toecaps_allowed = -1;
404 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
406 static int t4_rdmacaps_allowed = -1;
407 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
409 static int t4_cryptocaps_allowed = 0;
410 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
412 static int t4_iscsicaps_allowed = -1;
413 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
415 static int t4_fcoecaps_allowed = 0;
416 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
418 static int t5_write_combine = 0;
419 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
421 static int t4_num_vis = 1;
422 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
424 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
425 static int vi_mac_funcs[] = {
428 FW_VI_FUNC_OPENISCSI,
434 struct intrs_and_queues {
435 uint16_t intr_type; /* INTx, MSI, or MSI-X */
436 uint16_t nirq; /* Total # of vectors */
437 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
438 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
439 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */
440 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */
441 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
442 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
443 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
444 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
445 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
446 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
447 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
449 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
450 uint16_t ntxq_vi; /* # of NIC txq's */
451 uint16_t nrxq_vi; /* # of NIC rxq's */
452 uint16_t nofldtxq_vi; /* # of TOE txq's */
453 uint16_t nofldrxq_vi; /* # of TOE rxq's */
454 uint16_t nnmtxq_vi; /* # of netmap txq's */
455 uint16_t nnmrxq_vi; /* # of netmap rxq's */
458 struct filter_entry {
459 uint32_t valid:1; /* filter allocated and valid */
460 uint32_t locked:1; /* filter is administratively locked */
461 uint32_t pending:1; /* filter action is pending firmware reply */
462 uint32_t smtidx:8; /* Source MAC Table index for smac */
463 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
465 struct t4_filter_specification fs;
468 static void setup_memwin(struct adapter *);
469 static void position_memwin(struct adapter *, int, uint32_t);
470 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
471 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *,
473 static inline int write_via_memwin(struct adapter *, int, uint32_t,
474 const uint32_t *, int);
475 static int validate_mem_range(struct adapter *, uint32_t, int);
476 static int fwmtype_to_hwmtype(int);
477 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
479 static int fixup_devlog_params(struct adapter *);
480 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
481 struct intrs_and_queues *);
482 static int prep_firmware(struct adapter *);
483 static int partition_resources(struct adapter *, const struct firmware *,
485 static int get_params__pre_init(struct adapter *);
486 static int get_params__post_init(struct adapter *);
487 static int set_params__post_init(struct adapter *);
488 static void t4_set_desc(struct adapter *);
489 static void build_medialist(struct port_info *, struct ifmedia *);
490 static int cxgbe_init_synchronized(struct vi_info *);
491 static int cxgbe_uninit_synchronized(struct vi_info *);
492 static void quiesce_txq(struct adapter *, struct sge_txq *);
493 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
494 static void quiesce_iq(struct adapter *, struct sge_iq *);
495 static void quiesce_fl(struct adapter *, struct sge_fl *);
496 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
497 driver_intr_t *, void *, char *);
498 static int t4_free_irq(struct adapter *, struct irq *);
499 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
500 static void vi_refresh_stats(struct adapter *, struct vi_info *);
501 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
502 static void cxgbe_tick(void *);
503 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
504 static void cxgbe_sysctls(struct port_info *);
505 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
506 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
507 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
508 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
509 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
510 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
511 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
512 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
513 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
514 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
515 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
516 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
517 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
519 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
520 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
521 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
522 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
523 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
524 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
525 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
526 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
527 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
528 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
529 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
530 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
531 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
532 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
533 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
534 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
535 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
536 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
537 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
538 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
539 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
540 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
541 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
542 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
543 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
544 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
545 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
546 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
547 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
550 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
551 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
552 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
554 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t);
555 static uint32_t mode_to_fconf(uint32_t);
556 static uint32_t mode_to_iconf(uint32_t);
557 static int check_fspec_against_fconf_iconf(struct adapter *,
558 struct t4_filter_specification *);
559 static int get_filter_mode(struct adapter *, uint32_t *);
560 static int set_filter_mode(struct adapter *, uint32_t);
561 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
562 static int get_filter(struct adapter *, struct t4_filter *);
563 static int set_filter(struct adapter *, struct t4_filter *);
564 static int del_filter(struct adapter *, struct t4_filter *);
565 static void clear_filter(struct filter_entry *);
566 static int set_filter_wr(struct adapter *, int);
567 static int del_filter_wr(struct adapter *, int);
568 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
570 static int get_sge_context(struct adapter *, struct t4_sge_context *);
571 static int load_fw(struct adapter *, struct t4_data *);
572 static int load_cfg(struct adapter *, struct t4_data *);
573 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
574 static int read_i2c(struct adapter *, struct t4_i2c_data *);
576 static int toe_capability(struct vi_info *, int);
578 static int mod_event(module_t, int, void *);
579 static int notify_siblings(device_t, int);
585 {0xa000, "Chelsio Terminator 4 FPGA"},
586 {0x4400, "Chelsio T440-dbg"},
587 {0x4401, "Chelsio T420-CR"},
588 {0x4402, "Chelsio T422-CR"},
589 {0x4403, "Chelsio T440-CR"},
590 {0x4404, "Chelsio T420-BCH"},
591 {0x4405, "Chelsio T440-BCH"},
592 {0x4406, "Chelsio T440-CH"},
593 {0x4407, "Chelsio T420-SO"},
594 {0x4408, "Chelsio T420-CX"},
595 {0x4409, "Chelsio T420-BT"},
596 {0x440a, "Chelsio T404-BT"},
597 {0x440e, "Chelsio T440-LP-CR"},
599 {0xb000, "Chelsio Terminator 5 FPGA"},
600 {0x5400, "Chelsio T580-dbg"},
601 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
602 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
603 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
604 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
605 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
606 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
607 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
608 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
609 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
610 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
611 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
612 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
613 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
615 {0x5404, "Chelsio T520-BCH"},
616 {0x5405, "Chelsio T540-BCH"},
617 {0x5406, "Chelsio T540-CH"},
618 {0x5408, "Chelsio T520-CX"},
619 {0x540b, "Chelsio B520-SR"},
620 {0x540c, "Chelsio B504-BT"},
621 {0x540f, "Chelsio Amsterdam"},
622 {0x5413, "Chelsio T580-CHR"},
625 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
626 {0x6400, "Chelsio T6225-DBG"}, /* 2 x 10/25G, debug */
627 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
628 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
629 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
630 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
631 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
632 {0x6410, "Chelsio T62100-DBG"}, /* 2 x 40/50/100G, debug */
637 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
638 * exactly the same for both rxq and ofld_rxq.
640 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
641 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
643 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
646 t4_probe(device_t dev)
649 uint16_t v = pci_get_vendor(dev);
650 uint16_t d = pci_get_device(dev);
651 uint8_t f = pci_get_function(dev);
653 if (v != PCI_VENDOR_ID_CHELSIO)
656 /* Attach only to PF0 of the FPGA */
657 if (d == 0xa000 && f != 0)
660 for (i = 0; i < nitems(t4_pciids); i++) {
661 if (d == t4_pciids[i].device) {
662 device_set_desc(dev, t4_pciids[i].desc);
663 return (BUS_PROBE_DEFAULT);
671 t5_probe(device_t dev)
674 uint16_t v = pci_get_vendor(dev);
675 uint16_t d = pci_get_device(dev);
676 uint8_t f = pci_get_function(dev);
678 if (v != PCI_VENDOR_ID_CHELSIO)
681 /* Attach only to PF0 of the FPGA */
682 if (d == 0xb000 && f != 0)
685 for (i = 0; i < nitems(t5_pciids); i++) {
686 if (d == t5_pciids[i].device) {
687 device_set_desc(dev, t5_pciids[i].desc);
688 return (BUS_PROBE_DEFAULT);
696 t6_probe(device_t dev)
699 uint16_t v = pci_get_vendor(dev);
700 uint16_t d = pci_get_device(dev);
702 if (v != PCI_VENDOR_ID_CHELSIO)
705 for (i = 0; i < nitems(t6_pciids); i++) {
706 if (d == t6_pciids[i].device) {
707 device_set_desc(dev, t6_pciids[i].desc);
708 return (BUS_PROBE_DEFAULT);
716 t5_attribute_workaround(device_t dev)
722 * The T5 chips do not properly echo the No Snoop and Relaxed
723 * Ordering attributes when replying to a TLP from a Root
724 * Port. As a workaround, find the parent Root Port and
725 * disable No Snoop and Relaxed Ordering. Note that this
726 * affects all devices under this root port.
728 root_port = pci_find_pcie_root_port(dev);
729 if (root_port == NULL) {
730 device_printf(dev, "Unable to find parent root port\n");
734 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
735 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
736 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
738 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
739 device_get_nameunit(root_port));
742 static const struct devnames devnames[] = {
744 .nexus_name = "t4nex",
745 .ifnet_name = "cxgbe",
746 .vi_ifnet_name = "vcxgbe",
747 .pf03_drv_name = "t4iov",
748 .vf_nexus_name = "t4vf",
749 .vf_ifnet_name = "cxgbev"
751 .nexus_name = "t5nex",
753 .vi_ifnet_name = "vcxl",
754 .pf03_drv_name = "t5iov",
755 .vf_nexus_name = "t5vf",
756 .vf_ifnet_name = "cxlv"
758 .nexus_name = "t6nex",
760 .vi_ifnet_name = "vcc",
761 .pf03_drv_name = "t6iov",
762 .vf_nexus_name = "t6vf",
763 .vf_ifnet_name = "ccv"
768 t4_init_devnames(struct adapter *sc)
773 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
774 sc->names = &devnames[id - CHELSIO_T4];
776 device_printf(sc->dev, "chip id %d is not supported.\n", id);
782 t4_attach(device_t dev)
785 int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
786 struct make_dev_args mda;
787 struct intrs_and_queues iaq;
791 int ofld_rqidx, ofld_tqidx;
794 int nm_rqidx, nm_tqidx;
798 sc = device_get_softc(dev);
800 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
802 if ((pci_get_device(dev) & 0xff00) == 0x5400)
803 t5_attribute_workaround(dev);
804 pci_enable_busmaster(dev);
805 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
808 pci_set_max_read_req(dev, 4096);
809 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
810 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
811 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
813 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
816 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
817 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
819 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
820 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
821 device_get_nameunit(dev));
823 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
824 device_get_nameunit(dev));
825 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
828 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
829 TAILQ_INIT(&sc->sfl);
830 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
832 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
834 rc = t4_map_bars_0_and_4(sc);
836 goto done; /* error message displayed already */
838 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
840 /* Prepare the adapter for operation. */
841 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
842 rc = -t4_prep_adapter(sc, buf);
845 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
850 * This is the real PF# to which we're attaching. Works from within PCI
851 * passthrough environments too, where pci_get_function() could return a
852 * different PF# depending on the passthrough configuration. We need to
853 * use the real PF# in all our communication with the firmware.
855 j = t4_read_reg(sc, A_PL_WHOAMI);
856 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
859 t4_init_devnames(sc);
860 if (sc->names == NULL) {
862 goto done; /* error message displayed already */
866 * Do this really early, with the memory windows set up even before the
867 * character device. The userland tool's register i/o and mem read
868 * will work even in "recovery mode".
871 if (t4_init_devlog_params(sc, 0) == 0)
872 fixup_devlog_params(sc);
873 make_dev_args_init(&mda);
874 mda.mda_devsw = &t4_cdevsw;
875 mda.mda_uid = UID_ROOT;
876 mda.mda_gid = GID_WHEEL;
878 mda.mda_si_drv1 = sc;
879 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
881 device_printf(dev, "failed to create nexus char device: %d.\n",
884 /* Go no further if recovery mode has been requested. */
885 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
886 device_printf(dev, "recovery mode.\n");
890 #if defined(__i386__)
891 if ((cpu_feature & CPUID_CX8) == 0) {
892 device_printf(dev, "64 bit atomics not available.\n");
898 /* Prepare the firmware for operation */
899 rc = prep_firmware(sc);
901 goto done; /* error message displayed already */
903 rc = get_params__post_init(sc);
905 goto done; /* error message displayed already */
907 rc = set_params__post_init(sc);
909 goto done; /* error message displayed already */
911 rc = t4_map_bar_2(sc);
913 goto done; /* error message displayed already */
915 rc = t4_create_dma_tag(sc);
917 goto done; /* error message displayed already */
920 * Number of VIs to create per-port. The first VI is the "main" regular
921 * VI for the port. The rest are additional virtual interfaces on the
922 * same physical port. Note that the main VI does not have native
923 * netmap support but the extra VIs do.
925 * Limit the number of VIs per port to the number of available
926 * MAC addresses per port.
929 num_vis = t4_num_vis;
932 if (num_vis > nitems(vi_mac_funcs)) {
933 num_vis = nitems(vi_mac_funcs);
934 device_printf(dev, "Number of VIs limited to %d\n", num_vis);
938 * First pass over all the ports - allocate VIs and initialize some
939 * basic parameters like mac address, port type, etc. We also figure
940 * out whether a port is 10G or 1G and use that information when
941 * calculating how many interrupts to attempt to allocate.
944 for_each_port(sc, i) {
945 struct port_info *pi;
946 struct link_config *lc;
948 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
951 /* These must be set before t4_port_init */
955 * XXX: vi[0] is special so we can't delay this allocation until
956 * pi->nvi's final value is known.
958 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
962 * Allocate the "main" VI and initialize parameters
965 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
967 device_printf(dev, "unable to initialize port %d: %d\n",
969 free(pi->vi, M_CXGBE);
976 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
977 lc->requested_fc |= t4_pause_settings;
979 lc->requested_fec = t4_fec &
980 G_FW_PORT_CAP_FEC(lc->supported);
982 if (lc->supported & FW_PORT_CAP_ANEG && t4_autoneg != -1) {
983 lc->autoneg = t4_autoneg ? AUTONEG_ENABLE :
987 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
989 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
990 free(pi->vi, M_CXGBE);
996 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
997 device_get_nameunit(dev), i);
998 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
999 sc->chan_map[pi->tx_chan] = i;
1001 pi->tc = malloc(sizeof(struct tx_sched_class) *
1002 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK);
1004 if (port_top_speed(pi) >= 10) {
1010 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1011 if (pi->dev == NULL) {
1013 "failed to add device for port %d.\n", i);
1017 pi->vi[0].dev = pi->dev;
1018 device_set_softc(pi->dev, pi);
1022 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1024 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
1026 goto done; /* error message displayed already */
1027 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
1030 sc->intr_type = iaq.intr_type;
1031 sc->intr_count = iaq.nirq;
1034 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
1035 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
1037 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
1038 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
1040 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1041 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1042 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1044 if (is_offload(sc)) {
1045 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
1046 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
1048 s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
1050 s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
1053 s->neq += s->nofldtxq + s->nofldrxq;
1054 s->niq += s->nofldrxq;
1056 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1057 M_CXGBE, M_ZERO | M_WAITOK);
1058 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1059 M_CXGBE, M_ZERO | M_WAITOK);
1064 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
1065 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
1067 s->neq += s->nnmtxq + s->nnmrxq;
1068 s->niq += s->nnmrxq;
1070 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1071 M_CXGBE, M_ZERO | M_WAITOK);
1072 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1073 M_CXGBE, M_ZERO | M_WAITOK);
1076 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
1078 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1080 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1082 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1084 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1087 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1090 t4_init_l2t(sc, M_WAITOK);
1093 * Second pass over the ports. This time we know the number of rx and
1094 * tx queues that each port should get.
1098 ofld_rqidx = ofld_tqidx = 0;
1101 nm_rqidx = nm_tqidx = 0;
1103 for_each_port(sc, i) {
1104 struct port_info *pi = sc->port[i];
1111 for_each_vi(pi, j, vi) {
1113 vi->qsize_rxq = t4_qsize_rxq;
1114 vi->qsize_txq = t4_qsize_txq;
1116 vi->first_rxq = rqidx;
1117 vi->first_txq = tqidx;
1118 if (port_top_speed(pi) >= 10) {
1119 vi->tmr_idx = t4_tmr_idx_10g;
1120 vi->pktc_idx = t4_pktc_idx_10g;
1121 vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
1122 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
1123 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
1125 vi->tmr_idx = t4_tmr_idx_1g;
1126 vi->pktc_idx = t4_pktc_idx_1g;
1127 vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
1128 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
1129 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
1134 if (j == 0 && vi->ntxq > 1)
1135 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
1137 vi->rsrv_noflowq = 0;
1140 vi->first_ofld_rxq = ofld_rqidx;
1141 vi->first_ofld_txq = ofld_tqidx;
1142 if (port_top_speed(pi) >= 10) {
1143 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1144 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
1146 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
1149 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1150 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
1152 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
1155 ofld_rqidx += vi->nofldrxq;
1156 ofld_tqidx += vi->nofldtxq;
1160 vi->first_nm_rxq = nm_rqidx;
1161 vi->first_nm_txq = nm_tqidx;
1162 vi->nnmrxq = iaq.nnmrxq_vi;
1163 vi->nnmtxq = iaq.nnmtxq_vi;
1164 nm_rqidx += vi->nnmrxq;
1165 nm_tqidx += vi->nnmtxq;
1171 rc = t4_setup_intr_handlers(sc);
1174 "failed to setup interrupt handlers: %d\n", rc);
1178 rc = bus_generic_attach(dev);
1181 "failed to attach all child ports: %d\n", rc);
1186 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1187 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1188 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1189 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1190 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1194 notify_siblings(dev, 0);
1197 if (rc != 0 && sc->cdev) {
1198 /* cdev was created and so cxgbetool works; recover that way. */
1200 "error during attach, adapter is now in recovery mode.\n");
1205 t4_detach_common(dev);
1213 t4_ready(device_t dev)
1217 sc = device_get_softc(dev);
1218 if (sc->flags & FW_OK)
1224 t4_read_port_device(device_t dev, int port, device_t *child)
1227 struct port_info *pi;
1229 sc = device_get_softc(dev);
1230 if (port < 0 || port >= MAX_NPORTS)
1232 pi = sc->port[port];
1233 if (pi == NULL || pi->dev == NULL)
1240 notify_siblings(device_t dev, int detaching)
1246 for (i = 0; i < PCI_FUNCMAX; i++) {
1247 if (i == pci_get_function(dev))
1249 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1250 pci_get_slot(dev), i);
1251 if (sibling == NULL || !device_is_attached(sibling))
1254 error = T4_DETACH_CHILD(sibling);
1256 (void)T4_ATTACH_CHILD(sibling);
1267 t4_detach(device_t dev)
1272 sc = device_get_softc(dev);
1274 rc = notify_siblings(dev, 1);
1277 "failed to detach sibling devices: %d\n", rc);
1281 return (t4_detach_common(dev));
1285 t4_detach_common(device_t dev)
1288 struct port_info *pi;
1291 sc = device_get_softc(dev);
1293 if (sc->flags & FULL_INIT_DONE) {
1294 if (!(sc->flags & IS_VF))
1295 t4_intr_disable(sc);
1299 destroy_dev(sc->cdev);
1303 if (device_is_attached(dev)) {
1304 rc = bus_generic_detach(dev);
1307 "failed to detach child devices: %d\n", rc);
1312 for (i = 0; i < sc->intr_count; i++)
1313 t4_free_irq(sc, &sc->irq[i]);
1315 for (i = 0; i < MAX_NPORTS; i++) {
1318 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1320 device_delete_child(dev, pi->dev);
1322 mtx_destroy(&pi->pi_lock);
1323 free(pi->vi, M_CXGBE);
1324 free(pi->tc, M_CXGBE);
1329 if (sc->flags & FULL_INIT_DONE)
1330 adapter_full_uninit(sc);
1332 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1333 t4_fw_bye(sc, sc->mbox);
1335 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1336 pci_release_msi(dev);
1339 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1343 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1347 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1351 t4_free_l2t(sc->l2t);
1354 free(sc->sge.ofld_rxq, M_CXGBE);
1355 free(sc->sge.ofld_txq, M_CXGBE);
1358 free(sc->sge.nm_rxq, M_CXGBE);
1359 free(sc->sge.nm_txq, M_CXGBE);
1361 free(sc->irq, M_CXGBE);
1362 free(sc->sge.rxq, M_CXGBE);
1363 free(sc->sge.txq, M_CXGBE);
1364 free(sc->sge.ctrlq, M_CXGBE);
1365 free(sc->sge.iqmap, M_CXGBE);
1366 free(sc->sge.eqmap, M_CXGBE);
1367 free(sc->tids.ftid_tab, M_CXGBE);
1368 t4_destroy_dma_tag(sc);
1369 if (mtx_initialized(&sc->sc_lock)) {
1370 sx_xlock(&t4_list_lock);
1371 SLIST_REMOVE(&t4_list, sc, adapter, link);
1372 sx_xunlock(&t4_list_lock);
1373 mtx_destroy(&sc->sc_lock);
1376 callout_drain(&sc->sfl_callout);
1377 if (mtx_initialized(&sc->tids.ftid_lock))
1378 mtx_destroy(&sc->tids.ftid_lock);
1379 if (mtx_initialized(&sc->sfl_lock))
1380 mtx_destroy(&sc->sfl_lock);
1381 if (mtx_initialized(&sc->ifp_lock))
1382 mtx_destroy(&sc->ifp_lock);
1383 if (mtx_initialized(&sc->reg_lock))
1384 mtx_destroy(&sc->reg_lock);
1386 for (i = 0; i < NUM_MEMWIN; i++) {
1387 struct memwin *mw = &sc->memwin[i];
1389 if (rw_initialized(&mw->mw_lock))
1390 rw_destroy(&mw->mw_lock);
1393 bzero(sc, sizeof(*sc));
1399 cxgbe_probe(device_t dev)
1402 struct port_info *pi = device_get_softc(dev);
1404 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1405 device_set_desc_copy(dev, buf);
1407 return (BUS_PROBE_DEFAULT);
1410 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1411 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1412 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1413 #define T4_CAP_ENABLE (T4_CAP)
1416 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1421 vi->xact_addr_filt = -1;
1422 callout_init(&vi->tick, 1);
1424 /* Allocate an ifnet and set it up */
1425 ifp = if_alloc(IFT_ETHER);
1427 device_printf(dev, "Cannot allocate ifnet\n");
1433 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1434 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1436 ifp->if_init = cxgbe_init;
1437 ifp->if_ioctl = cxgbe_ioctl;
1438 ifp->if_transmit = cxgbe_transmit;
1439 ifp->if_qflush = cxgbe_qflush;
1440 ifp->if_get_counter = cxgbe_get_counter;
1442 ifp->if_capabilities = T4_CAP;
1444 if (vi->nofldrxq != 0)
1445 ifp->if_capabilities |= IFCAP_TOE;
1448 if (vi->nnmrxq != 0)
1449 ifp->if_capabilities |= IFCAP_NETMAP;
1451 ifp->if_capenable = T4_CAP_ENABLE;
1452 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1453 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1455 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1456 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1457 ifp->if_hw_tsomaxsegsize = 65536;
1459 /* Initialize ifmedia for this VI */
1460 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
1461 cxgbe_media_status);
1462 build_medialist(vi->pi, &vi->media);
1464 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1465 EVENTHANDLER_PRI_ANY);
1467 ether_ifattach(ifp, vi->hw_addr);
1469 if (ifp->if_capabilities & IFCAP_NETMAP)
1470 cxgbe_nm_attach(vi);
1472 sb = sbuf_new_auto();
1473 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1475 if (ifp->if_capabilities & IFCAP_TOE)
1476 sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1477 vi->nofldtxq, vi->nofldrxq);
1480 if (ifp->if_capabilities & IFCAP_NETMAP)
1481 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1482 vi->nnmtxq, vi->nnmrxq);
1485 device_printf(dev, "%s\n", sbuf_data(sb));
1494 cxgbe_attach(device_t dev)
1496 struct port_info *pi = device_get_softc(dev);
1497 struct adapter *sc = pi->adapter;
1501 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1503 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1507 for_each_vi(pi, i, vi) {
1510 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1511 if (vi->dev == NULL) {
1512 device_printf(dev, "failed to add VI %d\n", i);
1515 device_set_softc(vi->dev, vi);
1520 bus_generic_attach(dev);
1526 cxgbe_vi_detach(struct vi_info *vi)
1528 struct ifnet *ifp = vi->ifp;
1530 ether_ifdetach(ifp);
1533 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1535 /* Let detach proceed even if these fail. */
1537 if (ifp->if_capabilities & IFCAP_NETMAP)
1538 cxgbe_nm_detach(vi);
1540 cxgbe_uninit_synchronized(vi);
1541 callout_drain(&vi->tick);
1544 ifmedia_removeall(&vi->media);
1550 cxgbe_detach(device_t dev)
1552 struct port_info *pi = device_get_softc(dev);
1553 struct adapter *sc = pi->adapter;
1556 /* Detach the extra VIs first. */
1557 rc = bus_generic_detach(dev);
1560 device_delete_children(dev);
1562 doom_vi(sc, &pi->vi[0]);
1564 if (pi->flags & HAS_TRACEQ) {
1565 sc->traceq = -1; /* cloner should not create ifnet */
1566 t4_tracer_port_detach(sc);
1569 cxgbe_vi_detach(&pi->vi[0]);
1570 callout_drain(&pi->tick);
1572 end_synchronized_op(sc, 0);
1578 cxgbe_init(void *arg)
1580 struct vi_info *vi = arg;
1581 struct adapter *sc = vi->pi->adapter;
1583 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1585 cxgbe_init_synchronized(vi);
1586 end_synchronized_op(sc, 0);
1590 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1592 int rc = 0, mtu, flags, can_sleep;
1593 struct vi_info *vi = ifp->if_softc;
1594 struct adapter *sc = vi->pi->adapter;
1595 struct ifreq *ifr = (struct ifreq *)data;
1601 if (mtu < ETHERMIN || mtu > MAX_MTU)
1604 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1608 if (vi->flags & VI_INIT_DONE) {
1609 t4_update_fl_bufsize(ifp);
1610 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1611 rc = update_mac_settings(ifp, XGMAC_MTU);
1613 end_synchronized_op(sc, 0);
1619 rc = begin_synchronized_op(sc, vi,
1620 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1624 if (ifp->if_flags & IFF_UP) {
1625 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1626 flags = vi->if_flags;
1627 if ((ifp->if_flags ^ flags) &
1628 (IFF_PROMISC | IFF_ALLMULTI)) {
1629 if (can_sleep == 1) {
1630 end_synchronized_op(sc, 0);
1634 rc = update_mac_settings(ifp,
1635 XGMAC_PROMISC | XGMAC_ALLMULTI);
1638 if (can_sleep == 0) {
1639 end_synchronized_op(sc, LOCK_HELD);
1643 rc = cxgbe_init_synchronized(vi);
1645 vi->if_flags = ifp->if_flags;
1646 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1647 if (can_sleep == 0) {
1648 end_synchronized_op(sc, LOCK_HELD);
1652 rc = cxgbe_uninit_synchronized(vi);
1654 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1658 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1659 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1662 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1663 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1664 end_synchronized_op(sc, LOCK_HELD);
1668 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1672 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1673 if (mask & IFCAP_TXCSUM) {
1674 ifp->if_capenable ^= IFCAP_TXCSUM;
1675 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1677 if (IFCAP_TSO4 & ifp->if_capenable &&
1678 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1679 ifp->if_capenable &= ~IFCAP_TSO4;
1681 "tso4 disabled due to -txcsum.\n");
1684 if (mask & IFCAP_TXCSUM_IPV6) {
1685 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1686 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1688 if (IFCAP_TSO6 & ifp->if_capenable &&
1689 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1690 ifp->if_capenable &= ~IFCAP_TSO6;
1692 "tso6 disabled due to -txcsum6.\n");
1695 if (mask & IFCAP_RXCSUM)
1696 ifp->if_capenable ^= IFCAP_RXCSUM;
1697 if (mask & IFCAP_RXCSUM_IPV6)
1698 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1701 * Note that we leave CSUM_TSO alone (it is always set). The
1702 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1703 * sending a TSO request our way, so it's sufficient to toggle
1706 if (mask & IFCAP_TSO4) {
1707 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1708 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1709 if_printf(ifp, "enable txcsum first.\n");
1713 ifp->if_capenable ^= IFCAP_TSO4;
1715 if (mask & IFCAP_TSO6) {
1716 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1717 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1718 if_printf(ifp, "enable txcsum6 first.\n");
1722 ifp->if_capenable ^= IFCAP_TSO6;
1724 if (mask & IFCAP_LRO) {
1725 #if defined(INET) || defined(INET6)
1727 struct sge_rxq *rxq;
1729 ifp->if_capenable ^= IFCAP_LRO;
1730 for_each_rxq(vi, i, rxq) {
1731 if (ifp->if_capenable & IFCAP_LRO)
1732 rxq->iq.flags |= IQ_LRO_ENABLED;
1734 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1739 if (mask & IFCAP_TOE) {
1740 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1742 rc = toe_capability(vi, enable);
1746 ifp->if_capenable ^= mask;
1749 if (mask & IFCAP_VLAN_HWTAGGING) {
1750 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1751 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1752 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1754 if (mask & IFCAP_VLAN_MTU) {
1755 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1757 /* Need to find out how to disable auto-mtu-inflation */
1759 if (mask & IFCAP_VLAN_HWTSO)
1760 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1761 if (mask & IFCAP_VLAN_HWCSUM)
1762 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1764 #ifdef VLAN_CAPABILITIES
1765 VLAN_CAPABILITIES(ifp);
1768 end_synchronized_op(sc, 0);
1774 ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
1778 struct ifi2creq i2c;
1780 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1783 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1787 if (i2c.len > sizeof(i2c.data)) {
1791 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1794 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
1795 i2c.offset, i2c.len, &i2c.data[0]);
1796 end_synchronized_op(sc, 0);
1798 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1803 rc = ether_ioctl(ifp, cmd, data);
1810 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1812 struct vi_info *vi = ifp->if_softc;
1813 struct port_info *pi = vi->pi;
1814 struct adapter *sc = pi->adapter;
1815 struct sge_txq *txq;
1820 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1822 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1827 rc = parse_pkt(sc, &m);
1828 if (__predict_false(rc != 0)) {
1829 MPASS(m == NULL); /* was freed already */
1830 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1835 txq = &sc->sge.txq[vi->first_txq];
1836 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1837 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1841 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1842 if (__predict_false(rc != 0))
1849 cxgbe_qflush(struct ifnet *ifp)
1851 struct vi_info *vi = ifp->if_softc;
1852 struct sge_txq *txq;
1855 /* queues do not exist if !VI_INIT_DONE. */
1856 if (vi->flags & VI_INIT_DONE) {
1857 for_each_txq(vi, i, txq) {
1859 txq->eq.flags &= ~EQ_ENABLED;
1861 while (!mp_ring_is_idle(txq->r)) {
1862 mp_ring_check_drainage(txq->r, 0);
1871 vi_get_counter(struct ifnet *ifp, ift_counter c)
1873 struct vi_info *vi = ifp->if_softc;
1874 struct fw_vi_stats_vf *s = &vi->stats;
1876 vi_refresh_stats(vi->pi->adapter, vi);
1879 case IFCOUNTER_IPACKETS:
1880 return (s->rx_bcast_frames + s->rx_mcast_frames +
1881 s->rx_ucast_frames);
1882 case IFCOUNTER_IERRORS:
1883 return (s->rx_err_frames);
1884 case IFCOUNTER_OPACKETS:
1885 return (s->tx_bcast_frames + s->tx_mcast_frames +
1886 s->tx_ucast_frames + s->tx_offload_frames);
1887 case IFCOUNTER_OERRORS:
1888 return (s->tx_drop_frames);
1889 case IFCOUNTER_IBYTES:
1890 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1892 case IFCOUNTER_OBYTES:
1893 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1894 s->tx_ucast_bytes + s->tx_offload_bytes);
1895 case IFCOUNTER_IMCASTS:
1896 return (s->rx_mcast_frames);
1897 case IFCOUNTER_OMCASTS:
1898 return (s->tx_mcast_frames);
1899 case IFCOUNTER_OQDROPS: {
1903 if (vi->flags & VI_INIT_DONE) {
1905 struct sge_txq *txq;
1907 for_each_txq(vi, i, txq)
1908 drops += counter_u64_fetch(txq->r->drops);
1916 return (if_get_counter_default(ifp, c));
1921 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1923 struct vi_info *vi = ifp->if_softc;
1924 struct port_info *pi = vi->pi;
1925 struct adapter *sc = pi->adapter;
1926 struct port_stats *s = &pi->stats;
1928 if (pi->nvi > 1 || sc->flags & IS_VF)
1929 return (vi_get_counter(ifp, c));
1931 cxgbe_refresh_stats(sc, pi);
1934 case IFCOUNTER_IPACKETS:
1935 return (s->rx_frames);
1937 case IFCOUNTER_IERRORS:
1938 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
1939 s->rx_fcs_err + s->rx_len_err);
1941 case IFCOUNTER_OPACKETS:
1942 return (s->tx_frames);
1944 case IFCOUNTER_OERRORS:
1945 return (s->tx_error_frames);
1947 case IFCOUNTER_IBYTES:
1948 return (s->rx_octets);
1950 case IFCOUNTER_OBYTES:
1951 return (s->tx_octets);
1953 case IFCOUNTER_IMCASTS:
1954 return (s->rx_mcast_frames);
1956 case IFCOUNTER_OMCASTS:
1957 return (s->tx_mcast_frames);
1959 case IFCOUNTER_IQDROPS:
1960 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
1961 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
1962 s->rx_trunc3 + pi->tnl_cong_drops);
1964 case IFCOUNTER_OQDROPS: {
1968 if (vi->flags & VI_INIT_DONE) {
1970 struct sge_txq *txq;
1972 for_each_txq(vi, i, txq)
1973 drops += counter_u64_fetch(txq->r->drops);
1981 return (if_get_counter_default(ifp, c));
1986 cxgbe_media_change(struct ifnet *ifp)
1988 struct vi_info *vi = ifp->if_softc;
1990 device_printf(vi->dev, "%s unimplemented.\n", __func__);
1992 return (EOPNOTSUPP);
1996 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1998 struct vi_info *vi = ifp->if_softc;
1999 struct port_info *pi = vi->pi;
2000 struct ifmedia_entry *cur;
2001 int speed = pi->link_cfg.speed;
2003 cur = vi->media.ifm_cur;
2005 ifmr->ifm_status = IFM_AVALID;
2006 if (!pi->link_cfg.link_ok)
2009 ifmr->ifm_status |= IFM_ACTIVE;
2011 /* active and current will differ iff current media is autoselect. */
2012 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2015 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2017 ifmr->ifm_active |= IFM_10G_T;
2018 else if (speed == 1000)
2019 ifmr->ifm_active |= IFM_1000_T;
2020 else if (speed == 100)
2021 ifmr->ifm_active |= IFM_100_TX;
2022 else if (speed == 10)
2023 ifmr->ifm_active |= IFM_10_T;
2025 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2030 vcxgbe_probe(device_t dev)
2033 struct vi_info *vi = device_get_softc(dev);
2035 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2037 device_set_desc_copy(dev, buf);
2039 return (BUS_PROBE_DEFAULT);
2043 vcxgbe_attach(device_t dev)
2046 struct port_info *pi;
2048 int func, index, rc;
2051 vi = device_get_softc(dev);
2055 index = vi - pi->vi;
2056 KASSERT(index < nitems(vi_mac_funcs),
2057 ("%s: VI %s doesn't have a MAC func", __func__,
2058 device_get_nameunit(dev)));
2059 func = vi_mac_funcs[index];
2060 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2061 vi->hw_addr, &vi->rss_size, func, 0);
2063 device_printf(dev, "Failed to allocate virtual interface "
2064 "for port %d: %d\n", pi->port_id, -rc);
2068 if (chip_id(sc) <= CHELSIO_T5)
2069 vi->smt_idx = (rc & 0x7f) << 1;
2071 vi->smt_idx = (rc & 0x7f);
2073 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2074 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2075 V_FW_PARAMS_PARAM_YZ(vi->viid);
2076 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2078 vi->rss_base = 0xffff;
2080 /* MPASS((val >> 16) == rss_size); */
2081 vi->rss_base = val & 0xffff;
2084 rc = cxgbe_vi_attach(dev, vi);
2086 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2093 vcxgbe_detach(device_t dev)
2098 vi = device_get_softc(dev);
2099 sc = vi->pi->adapter;
2103 cxgbe_vi_detach(vi);
2104 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2106 end_synchronized_op(sc, 0);
2112 t4_fatal_err(struct adapter *sc)
2114 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2115 t4_intr_disable(sc);
2116 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2117 device_get_nameunit(sc->dev));
2121 t4_add_adapter(struct adapter *sc)
2123 sx_xlock(&t4_list_lock);
2124 SLIST_INSERT_HEAD(&t4_list, sc, link);
2125 sx_xunlock(&t4_list_lock);
2129 t4_map_bars_0_and_4(struct adapter *sc)
2131 sc->regs_rid = PCIR_BAR(0);
2132 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2133 &sc->regs_rid, RF_ACTIVE);
2134 if (sc->regs_res == NULL) {
2135 device_printf(sc->dev, "cannot map registers.\n");
2138 sc->bt = rman_get_bustag(sc->regs_res);
2139 sc->bh = rman_get_bushandle(sc->regs_res);
2140 sc->mmio_len = rman_get_size(sc->regs_res);
2141 setbit(&sc->doorbells, DOORBELL_KDB);
2143 sc->msix_rid = PCIR_BAR(4);
2144 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2145 &sc->msix_rid, RF_ACTIVE);
2146 if (sc->msix_res == NULL) {
2147 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2155 t4_map_bar_2(struct adapter *sc)
2159 * T4: only iWARP driver uses the userspace doorbells. There is no need
2160 * to map it if RDMA is disabled.
2162 if (is_t4(sc) && sc->rdmacaps == 0)
2165 sc->udbs_rid = PCIR_BAR(2);
2166 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2167 &sc->udbs_rid, RF_ACTIVE);
2168 if (sc->udbs_res == NULL) {
2169 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2172 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2174 if (chip_id(sc) >= CHELSIO_T5) {
2175 setbit(&sc->doorbells, DOORBELL_UDB);
2176 #if defined(__i386__) || defined(__amd64__)
2177 if (t5_write_combine) {
2181 * Enable write combining on BAR2. This is the
2182 * userspace doorbell BAR and is split into 128B
2183 * (UDBS_SEG_SIZE) doorbell regions, each associated
2184 * with an egress queue. The first 64B has the doorbell
2185 * and the second 64B can be used to submit a tx work
2186 * request with an implicit doorbell.
2189 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2190 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2192 clrbit(&sc->doorbells, DOORBELL_UDB);
2193 setbit(&sc->doorbells, DOORBELL_WCWR);
2194 setbit(&sc->doorbells, DOORBELL_UDBWC);
2196 device_printf(sc->dev,
2197 "couldn't enable write combining: %d\n",
2201 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2202 t4_write_reg(sc, A_SGE_STAT_CFG,
2203 V_STATSOURCE_T5(7) | mode);
2211 struct memwin_init {
2216 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2217 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2218 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2219 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2222 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2223 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2224 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2225 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2229 setup_memwin(struct adapter *sc)
2231 const struct memwin_init *mw_init;
2238 * Read low 32b of bar0 indirectly via the hardware backdoor
2239 * mechanism. Works from within PCI passthrough environments
2240 * too, where rman_get_start() can return a different value. We
2241 * need to program the T4 memory window decoders with the actual
2242 * addresses that will be coming across the PCIe link.
2244 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2245 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2247 mw_init = &t4_memwin[0];
2249 /* T5+ use the relative offset inside the PCIe BAR */
2252 mw_init = &t5_memwin[0];
2255 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2256 rw_init(&mw->mw_lock, "memory window access");
2257 mw->mw_base = mw_init->base;
2258 mw->mw_aperture = mw_init->aperture;
2261 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2262 (mw->mw_base + bar0) | V_BIR(0) |
2263 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2264 rw_wlock(&mw->mw_lock);
2265 position_memwin(sc, i, 0);
2266 rw_wunlock(&mw->mw_lock);
2270 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2274 * Positions the memory window at the given address in the card's address space.
2275 * There are some alignment requirements and the actual position may be at an
2276 * address prior to the requested address. mw->mw_curpos always has the actual
2277 * position of the window.
2280 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2286 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2287 mw = &sc->memwin[idx];
2288 rw_assert(&mw->mw_lock, RA_WLOCKED);
2292 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2294 pf = V_PFNUM(sc->pf);
2295 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2297 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2298 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2299 t4_read_reg(sc, reg); /* flush */
2303 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2309 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2311 /* Memory can only be accessed in naturally aligned 4 byte units */
2312 if (addr & 3 || len & 3 || len <= 0)
2315 mw = &sc->memwin[idx];
2317 rw_rlock(&mw->mw_lock);
2318 mw_end = mw->mw_curpos + mw->mw_aperture;
2319 if (addr >= mw_end || addr < mw->mw_curpos) {
2320 /* Will need to reposition the window */
2321 if (!rw_try_upgrade(&mw->mw_lock)) {
2322 rw_runlock(&mw->mw_lock);
2323 rw_wlock(&mw->mw_lock);
2325 rw_assert(&mw->mw_lock, RA_WLOCKED);
2326 position_memwin(sc, idx, addr);
2327 rw_downgrade(&mw->mw_lock);
2328 mw_end = mw->mw_curpos + mw->mw_aperture;
2330 rw_assert(&mw->mw_lock, RA_RLOCKED);
2331 while (addr < mw_end && len > 0) {
2333 v = t4_read_reg(sc, mw->mw_base + addr -
2335 *val++ = le32toh(v);
2338 t4_write_reg(sc, mw->mw_base + addr -
2339 mw->mw_curpos, htole32(v));
2344 rw_runlock(&mw->mw_lock);
2351 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2355 return (rw_via_memwin(sc, idx, addr, val, len, 0));
2359 write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
2360 const uint32_t *val, int len)
2363 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
2367 t4_range_cmp(const void *a, const void *b)
2369 return ((const struct t4_range *)a)->start -
2370 ((const struct t4_range *)b)->start;
2374 * Verify that the memory range specified by the addr/len pair is valid within
2375 * the card's address space.
2378 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2380 struct t4_range mem_ranges[4], *r, *next;
2381 uint32_t em, addr_len;
2382 int i, n, remaining;
2384 /* Memory can only be accessed in naturally aligned 4 byte units */
2385 if (addr & 3 || len & 3 || len <= 0)
2388 /* Enabled memories */
2389 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2393 bzero(r, sizeof(mem_ranges));
2394 if (em & F_EDRAM0_ENABLE) {
2395 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2396 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2398 r->start = G_EDRAM0_BASE(addr_len) << 20;
2399 if (addr >= r->start &&
2400 addr + len <= r->start + r->size)
2406 if (em & F_EDRAM1_ENABLE) {
2407 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2408 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2410 r->start = G_EDRAM1_BASE(addr_len) << 20;
2411 if (addr >= r->start &&
2412 addr + len <= r->start + r->size)
2418 if (em & F_EXT_MEM_ENABLE) {
2419 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2420 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2422 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2423 if (addr >= r->start &&
2424 addr + len <= r->start + r->size)
2430 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2431 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2432 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2434 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2435 if (addr >= r->start &&
2436 addr + len <= r->start + r->size)
2442 MPASS(n <= nitems(mem_ranges));
2445 /* Sort and merge the ranges. */
2446 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2448 /* Start from index 0 and examine the next n - 1 entries. */
2450 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2452 MPASS(r->size > 0); /* r is a valid entry. */
2454 MPASS(next->size > 0); /* and so is the next one. */
2456 while (r->start + r->size >= next->start) {
2457 /* Merge the next one into the current entry. */
2458 r->size = max(r->start + r->size,
2459 next->start + next->size) - r->start;
2460 n--; /* One fewer entry in total. */
2461 if (--remaining == 0)
2462 goto done; /* short circuit */
2465 if (next != r + 1) {
2467 * Some entries were merged into r and next
2468 * points to the first valid entry that couldn't
2471 MPASS(next->size > 0); /* must be valid */
2472 memcpy(r + 1, next, remaining * sizeof(*r));
2475 * This so that the foo->size assertion in the
2476 * next iteration of the loop do the right
2477 * thing for entries that were pulled up and are
2480 MPASS(n < nitems(mem_ranges));
2481 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2482 sizeof(struct t4_range));
2487 /* Done merging the ranges. */
2490 for (i = 0; i < n; i++, r++) {
2491 if (addr >= r->start &&
2492 addr + len <= r->start + r->size)
2501 fwmtype_to_hwmtype(int mtype)
2505 case FW_MEMTYPE_EDC0:
2507 case FW_MEMTYPE_EDC1:
2509 case FW_MEMTYPE_EXTMEM:
2511 case FW_MEMTYPE_EXTMEM1:
2514 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2519 * Verify that the memory range specified by the memtype/offset/len pair is
2520 * valid and lies entirely within the memtype specified. The global address of
2521 * the start of the range is returned in addr.
2524 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2527 uint32_t em, addr_len, maddr;
2529 /* Memory can only be accessed in naturally aligned 4 byte units */
2530 if (off & 3 || len & 3 || len == 0)
2533 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2534 switch (fwmtype_to_hwmtype(mtype)) {
2536 if (!(em & F_EDRAM0_ENABLE))
2538 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2539 maddr = G_EDRAM0_BASE(addr_len) << 20;
2542 if (!(em & F_EDRAM1_ENABLE))
2544 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2545 maddr = G_EDRAM1_BASE(addr_len) << 20;
2548 if (!(em & F_EXT_MEM_ENABLE))
2550 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2551 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2554 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2556 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2557 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2563 *addr = maddr + off; /* global address */
2564 return (validate_mem_range(sc, *addr, len));
2568 fixup_devlog_params(struct adapter *sc)
2570 struct devlog_params *dparams = &sc->params.devlog;
2573 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2574 dparams->size, &dparams->addr);
2580 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2581 struct intrs_and_queues *iaq)
2583 int rc, itype, navail, nrxq10g, nrxq1g, n;
2584 int nofldrxq10g = 0, nofldrxq1g = 0;
2586 bzero(iaq, sizeof(*iaq));
2588 iaq->ntxq10g = t4_ntxq10g;
2589 iaq->ntxq1g = t4_ntxq1g;
2590 iaq->ntxq_vi = t4_ntxq_vi;
2591 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2592 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2593 iaq->nrxq_vi = t4_nrxq_vi;
2594 iaq->rsrv_noflowq = t4_rsrv_noflowq;
2596 if (is_offload(sc)) {
2597 iaq->nofldtxq10g = t4_nofldtxq10g;
2598 iaq->nofldtxq1g = t4_nofldtxq1g;
2599 iaq->nofldtxq_vi = t4_nofldtxq_vi;
2600 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2601 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2602 iaq->nofldrxq_vi = t4_nofldrxq_vi;
2606 iaq->nnmtxq_vi = t4_nnmtxq_vi;
2607 iaq->nnmrxq_vi = t4_nnmrxq_vi;
2610 for (itype = INTR_MSIX; itype; itype >>= 1) {
2612 if ((itype & t4_intr_types) == 0)
2613 continue; /* not allowed */
2615 if (itype == INTR_MSIX)
2616 navail = pci_msix_count(sc->dev);
2617 else if (itype == INTR_MSI)
2618 navail = pci_msi_count(sc->dev);
2625 iaq->intr_type = itype;
2626 iaq->intr_flags_10g = 0;
2627 iaq->intr_flags_1g = 0;
2630 * Best option: an interrupt vector for errors, one for the
2631 * firmware event queue, and one for every rxq (NIC and TOE) of
2632 * every VI. The VIs that support netmap use the same
2633 * interrupts for the NIC rx queues and the netmap rx queues
2634 * because only one set of queues is active at a time.
2636 iaq->nirq = T4_EXTRA_INTR;
2637 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2638 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2639 iaq->nirq += (n10g + n1g) * (num_vis - 1) *
2640 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */
2641 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
2642 if (iaq->nirq <= navail &&
2643 (itype != INTR_MSI || powerof2(iaq->nirq))) {
2644 iaq->intr_flags_10g = INTR_ALL;
2645 iaq->intr_flags_1g = INTR_ALL;
2649 /* Disable the VIs (and netmap) if there aren't enough intrs */
2651 device_printf(sc->dev, "virtual interfaces disabled "
2652 "because num_vis=%u with current settings "
2653 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
2654 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
2655 "nnmrxq_vi=%u) would need %u interrupts but "
2656 "only %u are available.\n", num_vis, nrxq10g,
2657 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
2658 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
2661 iaq->ntxq_vi = iaq->nrxq_vi = 0;
2662 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
2663 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
2668 * Second best option: a vector for errors, one for the firmware
2669 * event queue, and vectors for either all the NIC rx queues or
2670 * all the TOE rx queues. The queues that don't get vectors
2671 * will forward their interrupts to those that do.
2673 iaq->nirq = T4_EXTRA_INTR;
2674 if (nrxq10g >= nofldrxq10g) {
2675 iaq->intr_flags_10g = INTR_RXQ;
2676 iaq->nirq += n10g * nrxq10g;
2678 iaq->intr_flags_10g = INTR_OFLD_RXQ;
2679 iaq->nirq += n10g * nofldrxq10g;
2681 if (nrxq1g >= nofldrxq1g) {
2682 iaq->intr_flags_1g = INTR_RXQ;
2683 iaq->nirq += n1g * nrxq1g;
2685 iaq->intr_flags_1g = INTR_OFLD_RXQ;
2686 iaq->nirq += n1g * nofldrxq1g;
2688 if (iaq->nirq <= navail &&
2689 (itype != INTR_MSI || powerof2(iaq->nirq)))
2693 * Next best option: an interrupt vector for errors, one for the
2694 * firmware event queue, and at least one per main-VI. At this
2695 * point we know we'll have to downsize nrxq and/or nofldrxq to
2696 * fit what's available to us.
2698 iaq->nirq = T4_EXTRA_INTR;
2699 iaq->nirq += n10g + n1g;
2700 if (iaq->nirq <= navail) {
2701 int leftover = navail - iaq->nirq;
2704 int target = max(nrxq10g, nofldrxq10g);
2706 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2707 INTR_RXQ : INTR_OFLD_RXQ;
2710 while (n < target && leftover >= n10g) {
2715 iaq->nrxq10g = min(n, nrxq10g);
2717 iaq->nofldrxq10g = min(n, nofldrxq10g);
2722 int target = max(nrxq1g, nofldrxq1g);
2724 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2725 INTR_RXQ : INTR_OFLD_RXQ;
2728 while (n < target && leftover >= n1g) {
2733 iaq->nrxq1g = min(n, nrxq1g);
2735 iaq->nofldrxq1g = min(n, nofldrxq1g);
2739 if (itype != INTR_MSI || powerof2(iaq->nirq))
2744 * Least desirable option: one interrupt vector for everything.
2746 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2747 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2750 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2755 if (itype == INTR_MSIX)
2756 rc = pci_alloc_msix(sc->dev, &navail);
2757 else if (itype == INTR_MSI)
2758 rc = pci_alloc_msi(sc->dev, &navail);
2761 if (navail == iaq->nirq)
2765 * Didn't get the number requested. Use whatever number
2766 * the kernel is willing to allocate (it's in navail).
2768 device_printf(sc->dev, "fewer vectors than requested, "
2769 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2770 itype, iaq->nirq, navail);
2771 pci_release_msi(sc->dev);
2775 device_printf(sc->dev,
2776 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2777 itype, rc, iaq->nirq, navail);
2780 device_printf(sc->dev,
2781 "failed to find a usable interrupt type. "
2782 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2783 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2788 #define FW_VERSION(chip) ( \
2789 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2790 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2791 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2792 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2793 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2799 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
2803 .kld_name = "t4fw_cfg",
2804 .fw_mod_name = "t4fw",
2806 .chip = FW_HDR_CHIP_T4,
2807 .fw_ver = htobe32_const(FW_VERSION(T4)),
2808 .intfver_nic = FW_INTFVER(T4, NIC),
2809 .intfver_vnic = FW_INTFVER(T4, VNIC),
2810 .intfver_ofld = FW_INTFVER(T4, OFLD),
2811 .intfver_ri = FW_INTFVER(T4, RI),
2812 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2813 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2814 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2815 .intfver_fcoe = FW_INTFVER(T4, FCOE),
2819 .kld_name = "t5fw_cfg",
2820 .fw_mod_name = "t5fw",
2822 .chip = FW_HDR_CHIP_T5,
2823 .fw_ver = htobe32_const(FW_VERSION(T5)),
2824 .intfver_nic = FW_INTFVER(T5, NIC),
2825 .intfver_vnic = FW_INTFVER(T5, VNIC),
2826 .intfver_ofld = FW_INTFVER(T5, OFLD),
2827 .intfver_ri = FW_INTFVER(T5, RI),
2828 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2829 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2830 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2831 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2835 .kld_name = "t6fw_cfg",
2836 .fw_mod_name = "t6fw",
2838 .chip = FW_HDR_CHIP_T6,
2839 .fw_ver = htobe32_const(FW_VERSION(T6)),
2840 .intfver_nic = FW_INTFVER(T6, NIC),
2841 .intfver_vnic = FW_INTFVER(T6, VNIC),
2842 .intfver_ofld = FW_INTFVER(T6, OFLD),
2843 .intfver_ri = FW_INTFVER(T6, RI),
2844 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
2845 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
2846 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
2847 .intfver_fcoe = FW_INTFVER(T6, FCOE),
2852 static struct fw_info *
2853 find_fw_info(int chip)
2857 for (i = 0; i < nitems(fw_info); i++) {
2858 if (fw_info[i].chip == chip)
2859 return (&fw_info[i]);
2865 * Is the given firmware API compatible with the one the driver was compiled
2869 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2872 /* short circuit if it's the exact same firmware version */
2873 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2877 * XXX: Is this too conservative? Perhaps I should limit this to the
2878 * features that are supported in the driver.
2880 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2881 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2882 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2883 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2891 * The firmware in the KLD is usable, but should it be installed? This routine
2892 * explains itself in detail if it indicates the KLD firmware should be
2896 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2900 if (!card_fw_usable) {
2901 reason = "incompatible or unusable";
2906 reason = "older than the version bundled with this driver";
2910 if (t4_fw_install == 2 && k != c) {
2911 reason = "different than the version bundled with this driver";
2918 if (t4_fw_install == 0) {
2919 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2920 "but the driver is prohibited from installing a different "
2921 "firmware on the card.\n",
2922 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2923 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2928 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2929 "installing firmware %u.%u.%u.%u on card.\n",
2930 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2931 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2932 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2933 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2938 * Establish contact with the firmware and determine if we are the master driver
2939 * or not, and whether we are responsible for chip initialization.
2942 prep_firmware(struct adapter *sc)
2944 const struct firmware *fw = NULL, *default_cfg;
2945 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2946 enum dev_state state;
2947 struct fw_info *fw_info;
2948 struct fw_hdr *card_fw; /* fw on the card */
2949 const struct fw_hdr *kld_fw; /* fw in the KLD */
2950 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
2953 /* Contact firmware. */
2954 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2955 if (rc < 0 || state == DEV_STATE_ERR) {
2957 device_printf(sc->dev,
2958 "failed to connect to the firmware: %d, %d.\n", rc, state);
2963 sc->flags |= MASTER_PF;
2964 else if (state == DEV_STATE_UNINIT) {
2966 * We didn't get to be the master so we definitely won't be
2967 * configuring the chip. It's a bug if someone else hasn't
2968 * configured it already.
2970 device_printf(sc->dev, "couldn't be master(%d), "
2971 "device not already initialized either(%d).\n", rc, state);
2975 /* This is the firmware whose headers the driver was compiled against */
2976 fw_info = find_fw_info(chip_id(sc));
2977 if (fw_info == NULL) {
2978 device_printf(sc->dev,
2979 "unable to look up firmware information for chip %d.\n",
2983 drv_fw = &fw_info->fw_hdr;
2986 * The firmware KLD contains many modules. The KLD name is also the
2987 * name of the module that contains the default config file.
2989 default_cfg = firmware_get(fw_info->kld_name);
2991 /* Read the header of the firmware on the card */
2992 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2993 rc = -t4_read_flash(sc, FLASH_FW_START,
2994 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2996 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2998 device_printf(sc->dev,
2999 "Unable to read card's firmware header: %d\n", rc);
3003 /* This is the firmware in the KLD */
3004 fw = firmware_get(fw_info->fw_mod_name);
3006 kld_fw = (const void *)fw->data;
3007 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3013 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3014 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3016 * Common case: the firmware on the card is an exact match and
3017 * the KLD is an exact match too, or the KLD is
3018 * absent/incompatible. Note that t4_fw_install = 2 is ignored
3019 * here -- use cxgbetool loadfw if you want to reinstall the
3020 * same firmware as the one on the card.
3022 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3023 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3024 be32toh(card_fw->fw_ver))) {
3026 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3028 device_printf(sc->dev,
3029 "failed to install firmware: %d\n", rc);
3033 /* Installed successfully, update the cached header too. */
3034 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3036 need_fw_reset = 0; /* already reset as part of load_fw */
3039 if (!card_fw_usable) {
3042 d = ntohl(drv_fw->fw_ver);
3043 c = ntohl(card_fw->fw_ver);
3044 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3046 device_printf(sc->dev, "Cannot find a usable firmware: "
3047 "fw_install %d, chip state %d, "
3048 "driver compiled with %d.%d.%d.%d, "
3049 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3050 t4_fw_install, state,
3051 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3052 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3053 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3054 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3055 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3056 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3062 if (need_fw_reset &&
3063 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3064 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3065 if (rc != ETIMEDOUT && rc != EIO)
3066 t4_fw_bye(sc, sc->mbox);
3071 rc = get_params__pre_init(sc);
3073 goto done; /* error message displayed already */
3075 /* Partition adapter resources as specified in the config file. */
3076 if (state == DEV_STATE_UNINIT) {
3078 KASSERT(sc->flags & MASTER_PF,
3079 ("%s: trying to change chip settings when not master.",
3082 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3084 goto done; /* error message displayed already */
3086 t4_tweak_chip_settings(sc);
3088 /* get basic stuff going */
3089 rc = -t4_fw_initialize(sc, sc->mbox);
3091 device_printf(sc->dev, "fw init failed: %d.\n", rc);
3095 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3100 free(card_fw, M_CXGBE);
3102 firmware_put(fw, FIRMWARE_UNLOAD);
3103 if (default_cfg != NULL)
3104 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3109 #define FW_PARAM_DEV(param) \
3110 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3111 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3112 #define FW_PARAM_PFVF(param) \
3113 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3114 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3117 * Partition chip resources for use between various PFs, VFs, etc.
3120 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3121 const char *name_prefix)
3123 const struct firmware *cfg = NULL;
3125 struct fw_caps_config_cmd caps;
3126 uint32_t mtype, moff, finicsum, cfcsum;
3129 * Figure out what configuration file to use. Pick the default config
3130 * file for the card if the user hasn't specified one explicitly.
3132 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3133 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3134 /* Card specific overrides go here. */
3135 if (pci_get_device(sc->dev) == 0x440a)
3136 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3138 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3142 * We need to load another module if the profile is anything except
3143 * "default" or "flash".
3145 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3146 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3149 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3150 cfg = firmware_get(s);
3152 if (default_cfg != NULL) {
3153 device_printf(sc->dev,
3154 "unable to load module \"%s\" for "
3155 "configuration profile \"%s\", will use "
3156 "the default config file instead.\n",
3158 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3161 device_printf(sc->dev,
3162 "unable to load module \"%s\" for "
3163 "configuration profile \"%s\", will use "
3164 "the config file on the card's flash "
3165 "instead.\n", s, sc->cfg_file);
3166 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3172 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3173 default_cfg == NULL) {
3174 device_printf(sc->dev,
3175 "default config file not available, will use the config "
3176 "file on the card's flash instead.\n");
3177 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3180 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3182 const uint32_t *cfdata;
3183 uint32_t param, val, addr;
3185 KASSERT(cfg != NULL || default_cfg != NULL,
3186 ("%s: no config to upload", __func__));
3189 * Ask the firmware where it wants us to upload the config file.
3191 param = FW_PARAM_DEV(CF);
3192 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3194 /* No support for config file? Shouldn't happen. */
3195 device_printf(sc->dev,
3196 "failed to query config file location: %d.\n", rc);
3199 mtype = G_FW_PARAMS_PARAM_Y(val);
3200 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3203 * XXX: sheer laziness. We deliberately added 4 bytes of
3204 * useless stuffing/comments at the end of the config file so
3205 * it's ok to simply throw away the last remaining bytes when
3206 * the config file is not an exact multiple of 4. This also
3207 * helps with the validate_mt_off_len check.
3210 cflen = cfg->datasize & ~3;
3213 cflen = default_cfg->datasize & ~3;
3214 cfdata = default_cfg->data;
3217 if (cflen > FLASH_CFG_MAX_SIZE) {
3218 device_printf(sc->dev,
3219 "config file too long (%d, max allowed is %d). "
3220 "Will try to use the config on the card, if any.\n",
3221 cflen, FLASH_CFG_MAX_SIZE);
3222 goto use_config_on_flash;
3225 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3227 device_printf(sc->dev,
3228 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
3229 "Will try to use the config on the card, if any.\n",
3230 __func__, mtype, moff, cflen, rc);
3231 goto use_config_on_flash;
3233 write_via_memwin(sc, 2, addr, cfdata, cflen);
3235 use_config_on_flash:
3236 mtype = FW_MEMTYPE_FLASH;
3237 moff = t4_flash_cfg_addr(sc);
3240 bzero(&caps, sizeof(caps));
3241 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3242 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3243 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3244 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3245 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3246 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3248 device_printf(sc->dev,
3249 "failed to pre-process config file: %d "
3250 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
3254 finicsum = be32toh(caps.finicsum);
3255 cfcsum = be32toh(caps.cfcsum);
3256 if (finicsum != cfcsum) {
3257 device_printf(sc->dev,
3258 "WARNING: config file checksum mismatch: %08x %08x\n",
3261 sc->cfcsum = cfcsum;
3263 #define LIMIT_CAPS(x) do { \
3264 caps.x &= htobe16(t4_##x##_allowed); \
3268 * Let the firmware know what features will (not) be used so it can tune
3269 * things accordingly.
3271 LIMIT_CAPS(nbmcaps);
3272 LIMIT_CAPS(linkcaps);
3273 LIMIT_CAPS(switchcaps);
3274 LIMIT_CAPS(niccaps);
3275 LIMIT_CAPS(toecaps);
3276 LIMIT_CAPS(rdmacaps);
3277 LIMIT_CAPS(cryptocaps);
3278 LIMIT_CAPS(iscsicaps);
3279 LIMIT_CAPS(fcoecaps);
3282 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3283 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3284 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3285 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3287 device_printf(sc->dev,
3288 "failed to process config file: %d.\n", rc);
3292 firmware_put(cfg, FIRMWARE_UNLOAD);
3297 * Retrieve parameters that are needed (or nice to have) very early.
3300 get_params__pre_init(struct adapter *sc)
3303 uint32_t param[2], val[2];
3305 t4_get_version_info(sc);
3307 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3308 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3309 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3310 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3311 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3313 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3314 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3315 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3316 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3317 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3319 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3320 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3321 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3322 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3323 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3325 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3326 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3327 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3328 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3329 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3331 param[0] = FW_PARAM_DEV(PORTVEC);
3332 param[1] = FW_PARAM_DEV(CCLK);
3333 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3335 device_printf(sc->dev,
3336 "failed to query parameters (pre_init): %d.\n", rc);
3340 sc->params.portvec = val[0];
3341 sc->params.nports = bitcount32(val[0]);
3342 sc->params.vpd.cclk = val[1];
3344 /* Read device log parameters. */
3345 rc = -t4_init_devlog_params(sc, 1);
3347 fixup_devlog_params(sc);
3349 device_printf(sc->dev,
3350 "failed to get devlog parameters: %d.\n", rc);
3351 rc = 0; /* devlog isn't critical for device operation */
3358 * Retrieve various parameters that are of interest to the driver. The device
3359 * has been initialized by the firmware at this point.
3362 get_params__post_init(struct adapter *sc)
3365 uint32_t param[7], val[7];
3366 struct fw_caps_config_cmd caps;
3368 param[0] = FW_PARAM_PFVF(IQFLINT_START);
3369 param[1] = FW_PARAM_PFVF(EQ_START);
3370 param[2] = FW_PARAM_PFVF(FILTER_START);
3371 param[3] = FW_PARAM_PFVF(FILTER_END);
3372 param[4] = FW_PARAM_PFVF(L2T_START);
3373 param[5] = FW_PARAM_PFVF(L2T_END);
3374 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3376 device_printf(sc->dev,
3377 "failed to query parameters (post_init): %d.\n", rc);
3381 sc->sge.iq_start = val[0];
3382 sc->sge.eq_start = val[1];
3383 sc->tids.ftid_base = val[2];
3384 sc->tids.nftids = val[3] - val[2] + 1;
3385 sc->params.ftid_min = val[2];
3386 sc->params.ftid_max = val[3];
3387 sc->vres.l2t.start = val[4];
3388 sc->vres.l2t.size = val[5] - val[4] + 1;
3389 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3390 ("%s: L2 table size (%u) larger than expected (%u)",
3391 __func__, sc->vres.l2t.size, L2T_SIZE));
3393 /* get capabilites */
3394 bzero(&caps, sizeof(caps));
3395 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3396 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3397 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3398 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3400 device_printf(sc->dev,
3401 "failed to get card capabilities: %d.\n", rc);
3405 #define READ_CAPS(x) do { \
3406 sc->x = htobe16(caps.x); \
3409 READ_CAPS(linkcaps);
3410 READ_CAPS(switchcaps);
3413 READ_CAPS(rdmacaps);
3414 READ_CAPS(cryptocaps);
3415 READ_CAPS(iscsicaps);
3416 READ_CAPS(fcoecaps);
3418 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3419 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3420 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3421 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3422 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3424 device_printf(sc->dev,
3425 "failed to query NIC parameters: %d.\n", rc);
3428 sc->tids.etid_base = val[0];
3429 sc->params.etid_min = val[0];
3430 sc->tids.netids = val[1] - val[0] + 1;
3431 sc->params.netids = sc->tids.netids;
3432 sc->params.eo_wr_cred = val[2];
3433 sc->params.ethoffload = 1;
3437 /* query offload-related parameters */
3438 param[0] = FW_PARAM_DEV(NTID);
3439 param[1] = FW_PARAM_PFVF(SERVER_START);
3440 param[2] = FW_PARAM_PFVF(SERVER_END);
3441 param[3] = FW_PARAM_PFVF(TDDP_START);
3442 param[4] = FW_PARAM_PFVF(TDDP_END);
3443 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3444 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3446 device_printf(sc->dev,
3447 "failed to query TOE parameters: %d.\n", rc);
3450 sc->tids.ntids = val[0];
3451 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3452 sc->tids.stid_base = val[1];
3453 sc->tids.nstids = val[2] - val[1] + 1;
3454 sc->vres.ddp.start = val[3];
3455 sc->vres.ddp.size = val[4] - val[3] + 1;
3456 sc->params.ofldq_wr_cred = val[5];
3457 sc->params.offload = 1;
3460 param[0] = FW_PARAM_PFVF(STAG_START);
3461 param[1] = FW_PARAM_PFVF(STAG_END);
3462 param[2] = FW_PARAM_PFVF(RQ_START);
3463 param[3] = FW_PARAM_PFVF(RQ_END);
3464 param[4] = FW_PARAM_PFVF(PBL_START);
3465 param[5] = FW_PARAM_PFVF(PBL_END);
3466 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3468 device_printf(sc->dev,
3469 "failed to query RDMA parameters(1): %d.\n", rc);
3472 sc->vres.stag.start = val[0];
3473 sc->vres.stag.size = val[1] - val[0] + 1;
3474 sc->vres.rq.start = val[2];
3475 sc->vres.rq.size = val[3] - val[2] + 1;
3476 sc->vres.pbl.start = val[4];
3477 sc->vres.pbl.size = val[5] - val[4] + 1;
3479 param[0] = FW_PARAM_PFVF(SQRQ_START);
3480 param[1] = FW_PARAM_PFVF(SQRQ_END);
3481 param[2] = FW_PARAM_PFVF(CQ_START);
3482 param[3] = FW_PARAM_PFVF(CQ_END);
3483 param[4] = FW_PARAM_PFVF(OCQ_START);
3484 param[5] = FW_PARAM_PFVF(OCQ_END);
3485 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3487 device_printf(sc->dev,
3488 "failed to query RDMA parameters(2): %d.\n", rc);
3491 sc->vres.qp.start = val[0];
3492 sc->vres.qp.size = val[1] - val[0] + 1;
3493 sc->vres.cq.start = val[2];
3494 sc->vres.cq.size = val[3] - val[2] + 1;
3495 sc->vres.ocq.start = val[4];
3496 sc->vres.ocq.size = val[5] - val[4] + 1;
3498 param[0] = FW_PARAM_PFVF(SRQ_START);
3499 param[1] = FW_PARAM_PFVF(SRQ_END);
3500 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
3501 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3502 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
3504 device_printf(sc->dev,
3505 "failed to query RDMA parameters(3): %d.\n", rc);
3508 sc->vres.srq.start = val[0];
3509 sc->vres.srq.size = val[1] - val[0] + 1;
3510 sc->params.max_ordird_qp = val[2];
3511 sc->params.max_ird_adapter = val[3];
3513 if (sc->iscsicaps) {
3514 param[0] = FW_PARAM_PFVF(ISCSI_START);
3515 param[1] = FW_PARAM_PFVF(ISCSI_END);
3516 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3518 device_printf(sc->dev,
3519 "failed to query iSCSI parameters: %d.\n", rc);
3522 sc->vres.iscsi.start = val[0];
3523 sc->vres.iscsi.size = val[1] - val[0] + 1;
3526 t4_init_sge_params(sc);
3529 * We've got the params we wanted to query via the firmware. Now grab
3530 * some others directly from the chip.
3532 rc = t4_read_chip_settings(sc);
3538 set_params__post_init(struct adapter *sc)
3540 uint32_t param, val;
3542 /* ask for encapsulated CPLs */
3543 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3545 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3550 #undef FW_PARAM_PFVF
3554 t4_set_desc(struct adapter *sc)
3557 struct adapter_params *p = &sc->params;
3559 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
3561 device_set_desc_copy(sc->dev, buf);
3565 build_medialist(struct port_info *pi, struct ifmedia *media)
3571 ifmedia_removeall(media);
3573 m = IFM_ETHER | IFM_FDX;
3575 switch(pi->port_type) {
3576 case FW_PORT_TYPE_BT_XFI:
3577 case FW_PORT_TYPE_BT_XAUI:
3578 ifmedia_add(media, m | IFM_10G_T, 0, NULL);
3581 case FW_PORT_TYPE_BT_SGMII:
3582 ifmedia_add(media, m | IFM_1000_T, 0, NULL);
3583 ifmedia_add(media, m | IFM_100_TX, 0, NULL);
3584 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
3585 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
3588 case FW_PORT_TYPE_CX4:
3589 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
3590 ifmedia_set(media, m | IFM_10G_CX4);
3593 case FW_PORT_TYPE_QSFP_10G:
3594 case FW_PORT_TYPE_SFP:
3595 case FW_PORT_TYPE_FIBER_XFI:
3596 case FW_PORT_TYPE_FIBER_XAUI:
3597 switch (pi->mod_type) {
3599 case FW_PORT_MOD_TYPE_LR:
3600 ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
3601 ifmedia_set(media, m | IFM_10G_LR);
3604 case FW_PORT_MOD_TYPE_SR:
3605 ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
3606 ifmedia_set(media, m | IFM_10G_SR);
3609 case FW_PORT_MOD_TYPE_LRM:
3610 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
3611 ifmedia_set(media, m | IFM_10G_LRM);
3614 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3615 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3616 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
3617 ifmedia_set(media, m | IFM_10G_TWINAX);
3620 case FW_PORT_MOD_TYPE_NONE:
3622 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3623 ifmedia_set(media, m | IFM_NONE);
3626 case FW_PORT_MOD_TYPE_NA:
3627 case FW_PORT_MOD_TYPE_ER:
3629 device_printf(pi->dev,
3630 "unknown port_type (%d), mod_type (%d)\n",
3631 pi->port_type, pi->mod_type);
3632 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3633 ifmedia_set(media, m | IFM_UNKNOWN);
3638 case FW_PORT_TYPE_CR_QSFP:
3639 case FW_PORT_TYPE_SFP28:
3640 case FW_PORT_TYPE_KR_SFP28:
3641 switch (pi->mod_type) {
3643 case FW_PORT_MOD_TYPE_SR:
3644 ifmedia_add(media, m | IFM_25G_SR, 0, NULL);
3645 ifmedia_set(media, m | IFM_25G_SR);
3648 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3649 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3650 ifmedia_add(media, m | IFM_25G_CR, 0, NULL);
3651 ifmedia_set(media, m | IFM_25G_CR);
3654 case FW_PORT_MOD_TYPE_NONE:
3656 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3657 ifmedia_set(media, m | IFM_NONE);
3661 device_printf(pi->dev,
3662 "unknown port_type (%d), mod_type (%d)\n",
3663 pi->port_type, pi->mod_type);
3664 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3665 ifmedia_set(media, m | IFM_UNKNOWN);
3670 case FW_PORT_TYPE_QSFP:
3671 switch (pi->mod_type) {
3673 case FW_PORT_MOD_TYPE_LR:
3674 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
3675 ifmedia_set(media, m | IFM_40G_LR4);
3678 case FW_PORT_MOD_TYPE_SR:
3679 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
3680 ifmedia_set(media, m | IFM_40G_SR4);
3683 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3684 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3685 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
3686 ifmedia_set(media, m | IFM_40G_CR4);
3689 case FW_PORT_MOD_TYPE_NONE:
3691 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3692 ifmedia_set(media, m | IFM_NONE);
3696 device_printf(pi->dev,
3697 "unknown port_type (%d), mod_type (%d)\n",
3698 pi->port_type, pi->mod_type);
3699 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3700 ifmedia_set(media, m | IFM_UNKNOWN);
3705 case FW_PORT_TYPE_KR4_100G:
3706 case FW_PORT_TYPE_CR4_QSFP:
3707 switch (pi->mod_type) {
3709 case FW_PORT_MOD_TYPE_LR:
3710 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL);
3711 ifmedia_set(media, m | IFM_100G_LR4);
3714 case FW_PORT_MOD_TYPE_SR:
3715 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL);
3716 ifmedia_set(media, m | IFM_100G_SR4);
3719 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3720 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3721 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL);
3722 ifmedia_set(media, m | IFM_100G_CR4);
3725 case FW_PORT_MOD_TYPE_NONE:
3727 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3728 ifmedia_set(media, m | IFM_NONE);
3732 device_printf(pi->dev,
3733 "unknown port_type (%d), mod_type (%d)\n",
3734 pi->port_type, pi->mod_type);
3735 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3736 ifmedia_set(media, m | IFM_UNKNOWN);
3742 device_printf(pi->dev,
3743 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
3745 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3746 ifmedia_set(media, m | IFM_UNKNOWN);
3753 #define FW_MAC_EXACT_CHUNK 7
3756 * Program the port's XGMAC based on parameters in ifnet. The caller also
3757 * indicates which parameters should be programmed (the rest are left alone).
3760 update_mac_settings(struct ifnet *ifp, int flags)
3763 struct vi_info *vi = ifp->if_softc;
3764 struct port_info *pi = vi->pi;
3765 struct adapter *sc = pi->adapter;
3766 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3768 ASSERT_SYNCHRONIZED_OP(sc);
3769 KASSERT(flags, ("%s: not told what to update.", __func__));
3771 if (flags & XGMAC_MTU)
3774 if (flags & XGMAC_PROMISC)
3775 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3777 if (flags & XGMAC_ALLMULTI)
3778 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3780 if (flags & XGMAC_VLANEX)
3781 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3783 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3784 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
3785 allmulti, 1, vlanex, false);
3787 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3793 if (flags & XGMAC_UCADDR) {
3794 uint8_t ucaddr[ETHER_ADDR_LEN];
3796 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3797 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
3798 ucaddr, true, true);
3801 if_printf(ifp, "change_mac failed: %d\n", rc);
3804 vi->xact_addr_filt = rc;
3809 if (flags & XGMAC_MCADDRS) {
3810 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3813 struct ifmultiaddr *ifma;
3816 if_maddr_rlock(ifp);
3817 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3818 if (ifma->ifma_addr->sa_family != AF_LINK)
3821 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3822 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3825 if (i == FW_MAC_EXACT_CHUNK) {
3826 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
3827 del, i, mcaddr, NULL, &hash, 0);
3830 for (j = 0; j < i; j++) {
3832 "failed to add mc address"
3834 "%02x:%02x:%02x rc=%d\n",
3835 mcaddr[j][0], mcaddr[j][1],
3836 mcaddr[j][2], mcaddr[j][3],
3837 mcaddr[j][4], mcaddr[j][5],
3847 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
3848 mcaddr, NULL, &hash, 0);
3851 for (j = 0; j < i; j++) {
3853 "failed to add mc address"
3855 "%02x:%02x:%02x rc=%d\n",
3856 mcaddr[j][0], mcaddr[j][1],
3857 mcaddr[j][2], mcaddr[j][3],
3858 mcaddr[j][4], mcaddr[j][5],
3865 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
3867 if_printf(ifp, "failed to set mc address hash: %d", rc);
3869 if_maddr_runlock(ifp);
3876 * {begin|end}_synchronized_op must be called from the same thread.
3879 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
3885 /* the caller thinks it's ok to sleep, but is it really? */
3886 if (flags & SLEEP_OK)
3887 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3888 "begin_synchronized_op");
3899 if (vi && IS_DOOMED(vi)) {
3909 if (!(flags & SLEEP_OK)) {
3914 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3920 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3923 sc->last_op = wmesg;
3924 sc->last_op_thr = curthread;
3925 sc->last_op_flags = flags;
3929 if (!(flags & HOLD_LOCK) || rc)
3936 * Tell if_ioctl and if_init that the VI is going away. This is
3937 * special variant of begin_synchronized_op and must be paired with a
3938 * call to end_synchronized_op.
3941 doom_vi(struct adapter *sc, struct vi_info *vi)
3948 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
3951 sc->last_op = "t4detach";
3952 sc->last_op_thr = curthread;
3953 sc->last_op_flags = 0;
3959 * {begin|end}_synchronized_op must be called from the same thread.
3962 end_synchronized_op(struct adapter *sc, int flags)
3965 if (flags & LOCK_HELD)
3966 ADAPTER_LOCK_ASSERT_OWNED(sc);
3970 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3977 cxgbe_init_synchronized(struct vi_info *vi)
3979 struct port_info *pi = vi->pi;
3980 struct adapter *sc = pi->adapter;
3981 struct ifnet *ifp = vi->ifp;
3983 struct sge_txq *txq;
3985 ASSERT_SYNCHRONIZED_OP(sc);
3987 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3988 return (0); /* already running */
3990 if (!(sc->flags & FULL_INIT_DONE) &&
3991 ((rc = adapter_full_init(sc)) != 0))
3992 return (rc); /* error message displayed already */
3994 if (!(vi->flags & VI_INIT_DONE) &&
3995 ((rc = vi_full_init(vi)) != 0))
3996 return (rc); /* error message displayed already */
3998 rc = update_mac_settings(ifp, XGMAC_ALL);
4000 goto done; /* error message displayed already */
4002 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4004 if_printf(ifp, "enable_vi failed: %d\n", rc);
4009 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
4013 for_each_txq(vi, i, txq) {
4015 txq->eq.flags |= EQ_ENABLED;
4020 * The first iq of the first port to come up is used for tracing.
4022 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4023 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4024 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
4025 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4026 V_QUEUENUMBER(sc->traceq));
4027 pi->flags |= HAS_TRACEQ;
4032 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4035 if (pi->nvi > 1 || sc->flags & IS_VF)
4036 callout_reset(&vi->tick, hz, vi_tick, vi);
4038 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4042 cxgbe_uninit_synchronized(vi);
4051 cxgbe_uninit_synchronized(struct vi_info *vi)
4053 struct port_info *pi = vi->pi;
4054 struct adapter *sc = pi->adapter;
4055 struct ifnet *ifp = vi->ifp;
4057 struct sge_txq *txq;
4059 ASSERT_SYNCHRONIZED_OP(sc);
4061 if (!(vi->flags & VI_INIT_DONE)) {
4062 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
4063 ("uninited VI is running"));
4068 * Disable the VI so that all its data in either direction is discarded
4069 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
4070 * tick) intact as the TP can deliver negative advice or data that it's
4071 * holding in its RAM (for an offloaded connection) even after the VI is
4074 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4076 if_printf(ifp, "disable_vi failed: %d\n", rc);
4080 for_each_txq(vi, i, txq) {
4082 txq->eq.flags &= ~EQ_ENABLED;
4087 if (pi->nvi > 1 || sc->flags & IS_VF)
4088 callout_stop(&vi->tick);
4090 callout_stop(&pi->tick);
4091 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4095 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4097 if (pi->up_vis > 0) {
4103 pi->link_cfg.link_ok = 0;
4104 pi->link_cfg.speed = 0;
4105 pi->link_cfg.link_down_rc = 255;
4106 t4_os_link_changed(sc, pi->port_id, 0);
4112 * It is ok for this function to fail midway and return right away. t4_detach
4113 * will walk the entire sc->irq list and clean up whatever is valid.
4116 t4_setup_intr_handlers(struct adapter *sc)
4118 int rc, rid, p, q, v;
4121 struct port_info *pi;
4123 struct sge *sge = &sc->sge;
4124 struct sge_rxq *rxq;
4126 struct sge_ofld_rxq *ofld_rxq;
4129 struct sge_nm_rxq *nm_rxq;
4132 int nbuckets = rss_getnumbuckets();
4139 rid = sc->intr_type == INTR_INTX ? 0 : 1;
4140 if (sc->intr_count == 1)
4141 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4143 /* Multiple interrupts. */
4144 if (sc->flags & IS_VF)
4145 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4146 ("%s: too few intr.", __func__));
4148 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4149 ("%s: too few intr.", __func__));
4151 /* The first one is always error intr on PFs */
4152 if (!(sc->flags & IS_VF)) {
4153 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4160 /* The second one is always the firmware event queue (first on VFs) */
4161 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4167 for_each_port(sc, p) {
4169 for_each_vi(pi, v, vi) {
4170 vi->first_intr = rid - 1;
4172 if (vi->nnmrxq > 0) {
4173 int n = max(vi->nrxq, vi->nnmrxq);
4175 MPASS(vi->flags & INTR_RXQ);
4177 rxq = &sge->rxq[vi->first_rxq];
4179 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4181 for (q = 0; q < n; q++) {
4182 snprintf(s, sizeof(s), "%x%c%x", p,
4188 irq->nm_rxq = nm_rxq++;
4190 rc = t4_alloc_irq(sc, irq, rid,
4191 t4_vi_intr, irq, s);
4198 } else if (vi->flags & INTR_RXQ) {
4199 for_each_rxq(vi, q, rxq) {
4200 snprintf(s, sizeof(s), "%x%c%x", p,
4202 rc = t4_alloc_irq(sc, irq, rid,
4207 bus_bind_intr(sc->dev, irq->res,
4208 rss_getcpu(q % nbuckets));
4216 if (vi->flags & INTR_OFLD_RXQ) {
4217 for_each_ofld_rxq(vi, q, ofld_rxq) {
4218 snprintf(s, sizeof(s), "%x%c%x", p,
4220 rc = t4_alloc_irq(sc, irq, rid,
4221 t4_intr, ofld_rxq, s);
4232 MPASS(irq == &sc->irq[sc->intr_count]);
4238 adapter_full_init(struct adapter *sc)
4242 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4243 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4246 ASSERT_SYNCHRONIZED_OP(sc);
4247 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4248 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4249 ("%s: FULL_INIT_DONE already", __func__));
4252 * queues that belong to the adapter (not any particular port).
4254 rc = t4_setup_adapter_queues(sc);
4258 for (i = 0; i < nitems(sc->tq); i++) {
4259 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4260 taskqueue_thread_enqueue, &sc->tq[i]);
4261 if (sc->tq[i] == NULL) {
4262 device_printf(sc->dev,
4263 "failed to allocate task queue %d\n", i);
4267 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4268 device_get_nameunit(sc->dev), i);
4271 MPASS(RSS_KEYSIZE == 40);
4272 rss_getkey((void *)&raw_rss_key[0]);
4273 for (i = 0; i < nitems(rss_key); i++) {
4274 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4276 t4_write_rss_key(sc, &rss_key[0], -1);
4279 if (!(sc->flags & IS_VF))
4281 sc->flags |= FULL_INIT_DONE;
4284 adapter_full_uninit(sc);
4290 adapter_full_uninit(struct adapter *sc)
4294 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4296 t4_teardown_adapter_queues(sc);
4298 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4299 taskqueue_free(sc->tq[i]);
4303 sc->flags &= ~FULL_INIT_DONE;
4309 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4310 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4311 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4312 RSS_HASHTYPE_RSS_UDP_IPV6)
4314 /* Translates kernel hash types to hardware. */
4316 hashconfig_to_hashen(int hashconfig)
4320 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4321 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4322 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4323 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4324 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4325 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4326 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4328 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4329 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4330 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4332 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4333 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4334 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4335 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4340 /* Translates hardware hash types to kernel. */
4342 hashen_to_hashconfig(int hashen)
4346 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4348 * If UDP hashing was enabled it must have been enabled for
4349 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
4350 * enabling any 4-tuple hash is nonsense configuration.
4352 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4353 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4355 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4356 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4357 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4358 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4360 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4361 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4362 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4363 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4364 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4365 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4366 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4367 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4369 return (hashconfig);
4374 vi_full_init(struct vi_info *vi)
4376 struct adapter *sc = vi->pi->adapter;
4377 struct ifnet *ifp = vi->ifp;
4379 struct sge_rxq *rxq;
4380 int rc, i, j, hashen;
4382 int nbuckets = rss_getnumbuckets();
4383 int hashconfig = rss_gethashconfig();
4387 ASSERT_SYNCHRONIZED_OP(sc);
4388 KASSERT((vi->flags & VI_INIT_DONE) == 0,
4389 ("%s: VI_INIT_DONE already", __func__));
4391 sysctl_ctx_init(&vi->ctx);
4392 vi->flags |= VI_SYSCTL_CTX;
4395 * Allocate tx/rx/fl queues for this VI.
4397 rc = t4_setup_vi_queues(vi);
4399 goto done; /* error message displayed already */
4402 * Setup RSS for this VI. Save a copy of the RSS table for later use.
4404 if (vi->nrxq > vi->rss_size) {
4405 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4406 "some queues will never receive traffic.\n", vi->nrxq,
4408 } else if (vi->rss_size % vi->nrxq) {
4409 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4410 "expect uneven traffic distribution.\n", vi->nrxq,
4414 if (vi->nrxq != nbuckets) {
4415 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
4416 "performance will be impacted.\n", vi->nrxq, nbuckets);
4419 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
4420 for (i = 0; i < vi->rss_size;) {
4422 j = rss_get_indirection_to_bucket(i);
4424 rxq = &sc->sge.rxq[vi->first_rxq + j];
4425 rss[i++] = rxq->iq.abs_id;
4427 for_each_rxq(vi, j, rxq) {
4428 rss[i++] = rxq->iq.abs_id;
4429 if (i == vi->rss_size)
4435 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
4438 if_printf(ifp, "rss_config failed: %d\n", rc);
4443 hashen = hashconfig_to_hashen(hashconfig);
4446 * We may have had to enable some hashes even though the global config
4447 * wants them disabled. This is a potential problem that must be
4448 * reported to the user.
4450 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
4453 * If we consider only the supported hash types, then the enabled hashes
4454 * are a superset of the requested hashes. In other words, there cannot
4455 * be any supported hash that was requested but not enabled, but there
4456 * can be hashes that were not requested but had to be enabled.
4458 extra &= SUPPORTED_RSS_HASHTYPES;
4459 MPASS((extra & hashconfig) == 0);
4463 "global RSS config (0x%x) cannot be accommodated.\n",
4466 if (extra & RSS_HASHTYPE_RSS_IPV4)
4467 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
4468 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
4469 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
4470 if (extra & RSS_HASHTYPE_RSS_IPV6)
4471 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
4472 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
4473 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
4474 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
4475 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
4476 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
4477 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
4479 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
4480 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
4481 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4482 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
4484 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
4486 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
4491 vi->flags |= VI_INIT_DONE;
4503 vi_full_uninit(struct vi_info *vi)
4505 struct port_info *pi = vi->pi;
4506 struct adapter *sc = pi->adapter;
4508 struct sge_rxq *rxq;
4509 struct sge_txq *txq;
4511 struct sge_ofld_rxq *ofld_rxq;
4512 struct sge_wrq *ofld_txq;
4515 if (vi->flags & VI_INIT_DONE) {
4517 /* Need to quiesce queues. */
4519 /* XXX: Only for the first VI? */
4520 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
4521 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4523 for_each_txq(vi, i, txq) {
4524 quiesce_txq(sc, txq);
4528 for_each_ofld_txq(vi, i, ofld_txq) {
4529 quiesce_wrq(sc, ofld_txq);
4533 for_each_rxq(vi, i, rxq) {
4534 quiesce_iq(sc, &rxq->iq);
4535 quiesce_fl(sc, &rxq->fl);
4539 for_each_ofld_rxq(vi, i, ofld_rxq) {
4540 quiesce_iq(sc, &ofld_rxq->iq);
4541 quiesce_fl(sc, &ofld_rxq->fl);
4544 free(vi->rss, M_CXGBE);
4545 free(vi->nm_rss, M_CXGBE);
4548 t4_teardown_vi_queues(vi);
4549 vi->flags &= ~VI_INIT_DONE;
4555 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4557 struct sge_eq *eq = &txq->eq;
4558 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4560 (void) sc; /* unused */
4564 MPASS((eq->flags & EQ_ENABLED) == 0);
4568 /* Wait for the mp_ring to empty. */
4569 while (!mp_ring_is_idle(txq->r)) {
4570 mp_ring_check_drainage(txq->r, 0);
4571 pause("rquiesce", 1);
4574 /* Then wait for the hardware to finish. */
4575 while (spg->cidx != htobe16(eq->pidx))
4576 pause("equiesce", 1);
4578 /* Finally, wait for the driver to reclaim all descriptors. */
4579 while (eq->cidx != eq->pidx)
4580 pause("dquiesce", 1);
4584 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4591 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4593 (void) sc; /* unused */
4595 /* Synchronize with the interrupt handler */
4596 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4601 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4603 mtx_lock(&sc->sfl_lock);
4605 fl->flags |= FL_DOOMED;
4607 callout_stop(&sc->sfl_callout);
4608 mtx_unlock(&sc->sfl_lock);
4610 KASSERT((fl->flags & FL_STARVING) == 0,
4611 ("%s: still starving", __func__));
4615 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4616 driver_intr_t *handler, void *arg, char *name)
4621 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4622 RF_SHAREABLE | RF_ACTIVE);
4623 if (irq->res == NULL) {
4624 device_printf(sc->dev,
4625 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4629 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4630 NULL, handler, arg, &irq->tag);
4632 device_printf(sc->dev,
4633 "failed to setup interrupt for rid %d, name %s: %d\n",
4636 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
4642 t4_free_irq(struct adapter *sc, struct irq *irq)
4645 bus_teardown_intr(sc->dev, irq->res, irq->tag);
4647 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4649 bzero(irq, sizeof(*irq));
4655 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4658 regs->version = chip_id(sc) | chip_rev(sc) << 10;
4659 t4_get_regs(sc, buf, regs->len);
4662 #define A_PL_INDIR_CMD 0x1f8
4664 #define S_PL_AUTOINC 31
4665 #define M_PL_AUTOINC 0x1U
4666 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
4667 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4669 #define S_PL_VFID 20
4670 #define M_PL_VFID 0xffU
4671 #define V_PL_VFID(x) ((x) << S_PL_VFID)
4672 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
4675 #define M_PL_ADDR 0xfffffU
4676 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
4677 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
4679 #define A_PL_INDIR_DATA 0x1fc
4682 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
4686 mtx_assert(&sc->reg_lock, MA_OWNED);
4687 if (sc->flags & IS_VF) {
4688 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
4689 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
4691 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4692 V_PL_VFID(G_FW_VIID_VIN(viid)) |
4693 V_PL_ADDR(VF_MPS_REG(reg)));
4694 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
4695 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
4697 return (((uint64_t)stats[1]) << 32 | stats[0]);
4701 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
4702 struct fw_vi_stats_vf *stats)
4705 #define GET_STAT(name) \
4706 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
4708 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
4709 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
4710 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
4711 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
4712 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
4713 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
4714 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
4715 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
4716 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
4717 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
4718 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
4719 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
4720 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
4721 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
4722 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
4723 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
4729 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
4733 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4734 V_PL_VFID(G_FW_VIID_VIN(viid)) |
4735 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
4736 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
4737 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
4738 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
4742 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
4745 const struct timeval interval = {0, 250000}; /* 250ms */
4747 if (!(vi->flags & VI_INIT_DONE))
4751 timevalsub(&tv, &interval);
4752 if (timevalcmp(&tv, &vi->last_refreshed, <))
4755 mtx_lock(&sc->reg_lock);
4756 t4_get_vi_stats(sc, vi->viid, &vi->stats);
4757 getmicrotime(&vi->last_refreshed);
4758 mtx_unlock(&sc->reg_lock);
4762 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4765 u_int v, tnl_cong_drops;
4767 const struct timeval interval = {0, 250000}; /* 250ms */
4770 timevalsub(&tv, &interval);
4771 if (timevalcmp(&tv, &pi->last_refreshed, <))
4775 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
4776 for (i = 0; i < sc->chip_params->nchan; i++) {
4777 if (pi->rx_chan_map & (1 << i)) {
4778 mtx_lock(&sc->reg_lock);
4779 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4780 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4781 mtx_unlock(&sc->reg_lock);
4782 tnl_cong_drops += v;
4785 pi->tnl_cong_drops = tnl_cong_drops;
4786 getmicrotime(&pi->last_refreshed);
4790 cxgbe_tick(void *arg)
4792 struct port_info *pi = arg;
4793 struct adapter *sc = pi->adapter;
4795 PORT_LOCK_ASSERT_OWNED(pi);
4796 cxgbe_refresh_stats(sc, pi);
4798 callout_schedule(&pi->tick, hz);
4804 struct vi_info *vi = arg;
4805 struct adapter *sc = vi->pi->adapter;
4807 vi_refresh_stats(sc, vi);
4809 callout_schedule(&vi->tick, hz);
4813 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4817 if (arg != ifp || ifp->if_type != IFT_ETHER)
4820 vlan = VLAN_DEVAT(ifp, vid);
4821 VLAN_SETCOOKIE(vlan, ifp);
4825 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
4827 static char *caps_decoder[] = {
4828 "\20\001IPMI\002NCSI", /* 0: NBM */
4829 "\20\001PPP\002QFC\003DCBX", /* 1: link */
4830 "\20\001INGRESS\002EGRESS", /* 2: switch */
4831 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
4832 "\006HASHFILTER\007ETHOFLD",
4833 "\20\001TOE", /* 4: TOE */
4834 "\20\001RDDP\002RDMAC", /* 5: RDMA */
4835 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
4836 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
4837 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
4839 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
4840 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
4841 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
4842 "\004PO_INITIATOR\005PO_TARGET",
4846 t4_sysctls(struct adapter *sc)
4848 struct sysctl_ctx_list *ctx;
4849 struct sysctl_oid *oid;
4850 struct sysctl_oid_list *children, *c0;
4851 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4853 ctx = device_get_sysctl_ctx(sc->dev);
4858 oid = device_get_sysctl_tree(sc->dev);
4859 c0 = children = SYSCTL_CHILDREN(oid);
4861 sc->sc_do_rxcopy = 1;
4862 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4863 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4865 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4866 sc->params.nports, "# of ports");
4868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4869 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4870 sysctl_bitfield, "A", "available doorbells");
4872 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4873 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4875 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4876 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
4877 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
4878 "interrupt holdoff timer values (us)");
4880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4881 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
4882 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
4883 "interrupt holdoff packet counter values");
4885 t4_sge_sysctls(sc, ctx, children);
4887 sc->lro_timeout = 100;
4888 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4889 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4891 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
4892 &sc->debug_flags, 0, "flags to enable runtime debugging");
4894 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
4895 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
4897 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4898 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4900 if (sc->flags & IS_VF)
4903 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4904 NULL, chip_rev(sc), "chip hardware revision");
4906 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
4907 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
4909 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
4910 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
4912 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
4913 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
4915 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
4916 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
4918 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
4919 sc->er_version, 0, "expansion ROM version");
4921 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
4922 sc->bs_version, 0, "bootstrap firmware version");
4924 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
4925 NULL, sc->params.scfg_vers, "serial config version");
4927 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
4928 NULL, sc->params.vpd_vers, "VPD version");
4930 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4931 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4933 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4934 sc->cfcsum, "config file checksum");
4936 #define SYSCTL_CAP(name, n, text) \
4937 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
4938 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
4939 sysctl_bitfield, "A", "available " text " capabilities")
4941 SYSCTL_CAP(nbmcaps, 0, "NBM");
4942 SYSCTL_CAP(linkcaps, 1, "link");
4943 SYSCTL_CAP(switchcaps, 2, "switch");
4944 SYSCTL_CAP(niccaps, 3, "NIC");
4945 SYSCTL_CAP(toecaps, 4, "TCP offload");
4946 SYSCTL_CAP(rdmacaps, 5, "RDMA");
4947 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
4948 SYSCTL_CAP(cryptocaps, 7, "crypto");
4949 SYSCTL_CAP(fcoecaps, 8, "FCoE");
4952 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4953 NULL, sc->tids.nftids, "number of filters");
4955 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4956 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4957 "chip temperature (in Celsius)");
4961 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4963 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4964 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4965 "logs and miscellaneous information");
4966 children = SYSCTL_CHILDREN(oid);
4968 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4969 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4970 sysctl_cctrl, "A", "congestion control");
4972 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4973 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4974 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4976 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4977 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4978 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4981 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4982 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4985 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4986 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4988 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4989 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4990 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4992 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4993 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4994 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4996 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4997 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4998 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
4999 "A", "CIM logic analyzer");
5001 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5002 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5003 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5005 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5006 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5007 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5009 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5010 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5011 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5013 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5014 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5015 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5017 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5018 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5019 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5021 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5022 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5023 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5025 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5026 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5027 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5029 if (chip_id(sc) > CHELSIO_T4) {
5030 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5031 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5032 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5034 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5035 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5036 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5040 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5041 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5043 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5044 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5045 sysctl_cim_qcfg, "A", "CIM queue configuration");
5047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5048 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5049 sysctl_cpl_stats, "A", "CPL statistics");
5051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5053 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5055 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5056 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5057 sysctl_devlog, "A", "firmware's device log");
5059 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5060 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5061 sysctl_fcoe_stats, "A", "FCoE statistics");
5063 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5064 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5065 sysctl_hw_sched, "A", "hardware scheduler ");
5067 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5068 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5069 sysctl_l2t, "A", "hardware L2 table");
5071 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5072 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5073 sysctl_lb_stats, "A", "loopback statistics");
5075 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5076 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5077 sysctl_meminfo, "A", "memory regions");
5079 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5080 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5081 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5082 "A", "MPS TCAM entries");
5084 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5085 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5086 sysctl_path_mtus, "A", "path MTUs");
5088 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5089 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5090 sysctl_pm_stats, "A", "PM statistics");
5092 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5093 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5094 sysctl_rdma_stats, "A", "RDMA statistics");
5096 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5097 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5098 sysctl_tcp_stats, "A", "TCP statistics");
5100 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5101 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5102 sysctl_tids, "A", "TID information");
5104 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5105 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5106 sysctl_tp_err_stats, "A", "TP error statistics");
5108 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5109 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5110 "TP logic analyzer event capture mask");
5112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5113 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5114 sysctl_tp_la, "A", "TP logic analyzer");
5116 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5117 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5118 sysctl_tx_rate, "A", "Tx rate");
5120 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5121 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5122 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5124 if (chip_id(sc) >= CHELSIO_T5) {
5125 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5126 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5127 sysctl_wcwr_stats, "A", "write combined work requests");
5132 if (is_offload(sc)) {
5136 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5137 NULL, "TOE parameters");
5138 children = SYSCTL_CHILDREN(oid);
5140 sc->tt.sndbuf = 256 * 1024;
5141 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5142 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5145 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5146 &sc->tt.ddp, 0, "DDP allowed");
5148 sc->tt.rx_coalesce = 1;
5149 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5150 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5152 sc->tt.tx_align = 1;
5153 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5154 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5156 sc->tt.tx_zcopy = 0;
5157 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5158 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5159 "Enable zero-copy aio_write(2)");
5161 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5162 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5163 "TP timer tick (us)");
5165 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5166 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5167 "TCP timestamp tick (us)");
5169 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5170 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5173 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5174 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5175 "IU", "DACK timer (us)");
5177 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5178 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5179 sysctl_tp_timer, "LU", "Retransmit min (us)");
5181 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5182 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5183 sysctl_tp_timer, "LU", "Retransmit max (us)");
5185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5186 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5187 sysctl_tp_timer, "LU", "Persist timer min (us)");
5189 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5190 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5191 sysctl_tp_timer, "LU", "Persist timer max (us)");
5193 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5194 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5195 sysctl_tp_timer, "LU", "Keepidle idle timer (us)");
5197 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl",
5198 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5199 sysctl_tp_timer, "LU", "Keepidle interval (us)");
5201 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5202 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5203 sysctl_tp_timer, "LU", "Initial SRTT (us)");
5205 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5206 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5207 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5213 vi_sysctls(struct vi_info *vi)
5215 struct sysctl_ctx_list *ctx;
5216 struct sysctl_oid *oid;
5217 struct sysctl_oid_list *children;
5219 ctx = device_get_sysctl_ctx(vi->dev);
5222 * dev.v?(cxgbe|cxl).X.
5224 oid = device_get_sysctl_tree(vi->dev);
5225 children = SYSCTL_CHILDREN(oid);
5227 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5228 vi->viid, "VI identifer");
5229 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5230 &vi->nrxq, 0, "# of rx queues");
5231 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5232 &vi->ntxq, 0, "# of tx queues");
5233 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5234 &vi->first_rxq, 0, "index of first rx queue");
5235 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5236 &vi->first_txq, 0, "index of first tx queue");
5237 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5238 vi->rss_size, "size of RSS indirection table");
5240 if (IS_MAIN_VI(vi)) {
5241 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5242 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5243 "Reserve queue 0 for non-flowid packets");
5247 if (vi->nofldrxq != 0) {
5248 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5250 "# of rx queues for offloaded TCP connections");
5251 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5253 "# of tx queues for offloaded TCP connections");
5254 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5255 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5256 "index of first TOE rx queue");
5257 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5258 CTLFLAG_RD, &vi->first_ofld_txq, 0,
5259 "index of first TOE tx queue");
5263 if (vi->nnmrxq != 0) {
5264 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5265 &vi->nnmrxq, 0, "# of netmap rx queues");
5266 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5267 &vi->nnmtxq, 0, "# of netmap tx queues");
5268 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5269 CTLFLAG_RD, &vi->first_nm_rxq, 0,
5270 "index of first netmap rx queue");
5271 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5272 CTLFLAG_RD, &vi->first_nm_txq, 0,
5273 "index of first netmap tx queue");
5277 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5278 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5279 "holdoff timer index");
5280 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5281 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5282 "holdoff packet counter index");
5284 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5285 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5287 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5288 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5293 cxgbe_sysctls(struct port_info *pi)
5295 struct sysctl_ctx_list *ctx;
5296 struct sysctl_oid *oid;
5297 struct sysctl_oid_list *children, *children2;
5298 struct adapter *sc = pi->adapter;
5302 ctx = device_get_sysctl_ctx(pi->dev);
5307 oid = device_get_sysctl_tree(pi->dev);
5308 children = SYSCTL_CHILDREN(oid);
5310 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5311 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5312 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5313 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5314 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5315 "PHY temperature (in Celsius)");
5316 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5317 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5318 "PHY firmware version");
5321 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5322 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
5323 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
5325 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
5326 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
5327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
5328 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
5329 "autonegotiation (-1 = not supported)");
5331 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5332 port_top_speed(pi), "max speed (in Gbps)");
5334 if (sc->flags & IS_VF)
5338 * dev.(cxgbe|cxl).X.tc.
5340 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
5341 "Tx scheduler traffic classes");
5342 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
5343 struct tx_sched_class *tc = &pi->tc[i];
5345 snprintf(name, sizeof(name), "%d", i);
5346 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
5347 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
5349 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
5350 &tc->flags, 0, "flags");
5351 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
5352 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
5354 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
5355 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
5356 sysctl_tc_params, "A", "traffic class parameters");
5361 * dev.cxgbe.X.stats.
5363 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5364 NULL, "port statistics");
5365 children = SYSCTL_CHILDREN(oid);
5366 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5367 &pi->tx_parse_error, 0,
5368 "# of tx packets with invalid length or # of segments");
5370 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5371 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5372 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5373 sysctl_handle_t4_reg64, "QU", desc)
5375 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5376 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5377 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5378 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5379 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5380 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5381 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5382 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5383 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5384 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5385 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5386 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5387 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5388 "# of tx frames in this range",
5389 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5390 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5391 "# of tx frames in this range",
5392 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5393 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5394 "# of tx frames in this range",
5395 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5396 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5397 "# of tx frames in this range",
5398 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5399 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5400 "# of tx frames in this range",
5401 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5402 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5403 "# of tx frames in this range",
5404 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5405 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5406 "# of tx frames in this range",
5407 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5408 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5409 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5410 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5411 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5412 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5413 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5414 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5415 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5416 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5417 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5418 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5419 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5420 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5421 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5422 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5423 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5424 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5425 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5426 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5427 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5429 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5430 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5431 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5432 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5433 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5434 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5435 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5436 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5437 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5438 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5439 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5440 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5441 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5442 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5443 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5444 "# of frames received with bad FCS",
5445 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5446 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5447 "# of frames received with length error",
5448 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5449 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5450 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5451 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5452 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5453 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5454 "# of rx frames in this range",
5455 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5456 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5457 "# of rx frames in this range",
5458 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5459 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5460 "# of rx frames in this range",
5461 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5462 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5463 "# of rx frames in this range",
5464 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5465 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5466 "# of rx frames in this range",
5467 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5468 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5469 "# of rx frames in this range",
5470 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5471 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5472 "# of rx frames in this range",
5473 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5474 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5475 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5476 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5477 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5478 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5479 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5480 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5481 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5482 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5483 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5484 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5485 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5486 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5487 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5488 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5489 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5490 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5491 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5493 #undef SYSCTL_ADD_T4_REG64
5495 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5496 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5497 &pi->stats.name, desc)
5499 /* We get these from port_stats and they may be stale by up to 1s */
5500 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5501 "# drops due to buffer-group 0 overflows");
5502 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5503 "# drops due to buffer-group 1 overflows");
5504 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5505 "# drops due to buffer-group 2 overflows");
5506 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5507 "# drops due to buffer-group 3 overflows");
5508 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5509 "# of buffer-group 0 truncated packets");
5510 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5511 "# of buffer-group 1 truncated packets");
5512 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5513 "# of buffer-group 2 truncated packets");
5514 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5515 "# of buffer-group 3 truncated packets");
5517 #undef SYSCTL_ADD_T4_PORTSTAT
5521 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5523 int rc, *i, space = 0;
5526 sbuf_new_for_sysctl(&sb, NULL, 64, req);
5527 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5529 sbuf_printf(&sb, " ");
5530 sbuf_printf(&sb, "%d", *i);
5533 rc = sbuf_finish(&sb);
5539 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5544 rc = sysctl_wire_old_buffer(req, 0);
5548 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5552 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5553 rc = sbuf_finish(sb);
5560 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5562 struct port_info *pi = arg1;
5564 struct adapter *sc = pi->adapter;
5568 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5571 /* XXX: magic numbers */
5572 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5574 end_synchronized_op(sc, 0);
5580 rc = sysctl_handle_int(oidp, &v, 0, req);
5585 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5587 struct vi_info *vi = arg1;
5590 val = vi->rsrv_noflowq;
5591 rc = sysctl_handle_int(oidp, &val, 0, req);
5592 if (rc != 0 || req->newptr == NULL)
5595 if ((val >= 1) && (vi->ntxq > 1))
5596 vi->rsrv_noflowq = 1;
5598 vi->rsrv_noflowq = 0;
5604 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5606 struct vi_info *vi = arg1;
5607 struct adapter *sc = vi->pi->adapter;
5609 struct sge_rxq *rxq;
5611 struct sge_ofld_rxq *ofld_rxq;
5617 rc = sysctl_handle_int(oidp, &idx, 0, req);
5618 if (rc != 0 || req->newptr == NULL)
5621 if (idx < 0 || idx >= SGE_NTIMERS)
5624 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5629 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5630 for_each_rxq(vi, i, rxq) {
5631 #ifdef atomic_store_rel_8
5632 atomic_store_rel_8(&rxq->iq.intr_params, v);
5634 rxq->iq.intr_params = v;
5638 for_each_ofld_rxq(vi, i, ofld_rxq) {
5639 #ifdef atomic_store_rel_8
5640 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5642 ofld_rxq->iq.intr_params = v;
5648 end_synchronized_op(sc, LOCK_HELD);
5653 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5655 struct vi_info *vi = arg1;
5656 struct adapter *sc = vi->pi->adapter;
5661 rc = sysctl_handle_int(oidp, &idx, 0, req);
5662 if (rc != 0 || req->newptr == NULL)
5665 if (idx < -1 || idx >= SGE_NCOUNTERS)
5668 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5673 if (vi->flags & VI_INIT_DONE)
5674 rc = EBUSY; /* cannot be changed once the queues are created */
5678 end_synchronized_op(sc, LOCK_HELD);
5683 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5685 struct vi_info *vi = arg1;
5686 struct adapter *sc = vi->pi->adapter;
5689 qsize = vi->qsize_rxq;
5691 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5692 if (rc != 0 || req->newptr == NULL)
5695 if (qsize < 128 || (qsize & 7))
5698 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5703 if (vi->flags & VI_INIT_DONE)
5704 rc = EBUSY; /* cannot be changed once the queues are created */
5706 vi->qsize_rxq = qsize;
5708 end_synchronized_op(sc, LOCK_HELD);
5713 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5715 struct vi_info *vi = arg1;
5716 struct adapter *sc = vi->pi->adapter;
5719 qsize = vi->qsize_txq;
5721 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5722 if (rc != 0 || req->newptr == NULL)
5725 if (qsize < 128 || qsize > 65536)
5728 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5733 if (vi->flags & VI_INIT_DONE)
5734 rc = EBUSY; /* cannot be changed once the queues are created */
5736 vi->qsize_txq = qsize;
5738 end_synchronized_op(sc, LOCK_HELD);
5743 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5745 struct port_info *pi = arg1;
5746 struct adapter *sc = pi->adapter;
5747 struct link_config *lc = &pi->link_cfg;
5750 if (req->newptr == NULL) {
5752 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5754 rc = sysctl_wire_old_buffer(req, 0);
5758 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5762 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5763 rc = sbuf_finish(sb);
5769 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5772 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5778 if (s[0] < '0' || s[0] > '9')
5779 return (EINVAL); /* not a number */
5781 if (n & ~(PAUSE_TX | PAUSE_RX))
5782 return (EINVAL); /* some other bit is set too */
5784 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5788 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5789 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5790 lc->requested_fc |= n;
5791 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5793 end_synchronized_op(sc, 0);
5800 sysctl_fec(SYSCTL_HANDLER_ARGS)
5802 struct port_info *pi = arg1;
5803 struct adapter *sc = pi->adapter;
5804 struct link_config *lc = &pi->link_cfg;
5807 if (req->newptr == NULL) {
5809 static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
5811 rc = sysctl_wire_old_buffer(req, 0);
5815 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5819 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
5820 rc = sbuf_finish(sb);
5826 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
5829 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5835 if (s[0] < '0' || s[0] > '9')
5836 return (EINVAL); /* not a number */
5838 if (n & ~M_FW_PORT_CAP_FEC)
5839 return (EINVAL); /* some other bit is set too */
5841 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5845 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
5846 lc->requested_fec = n &
5847 G_FW_PORT_CAP_FEC(lc->supported);
5848 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5850 end_synchronized_op(sc, 0);
5857 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
5859 struct port_info *pi = arg1;
5860 struct adapter *sc = pi->adapter;
5861 struct link_config *lc = &pi->link_cfg;
5864 if (lc->supported & FW_PORT_CAP_ANEG)
5865 val = lc->autoneg == AUTONEG_ENABLE ? 1 : 0;
5868 rc = sysctl_handle_int(oidp, &val, 0, req);
5869 if (rc != 0 || req->newptr == NULL)
5871 if ((lc->supported & FW_PORT_CAP_ANEG) == 0)
5874 val = val ? AUTONEG_ENABLE : AUTONEG_DISABLE;
5875 if (lc->autoneg == val)
5876 return (0); /* no change */
5878 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5884 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5891 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5893 struct adapter *sc = arg1;
5897 val = t4_read_reg64(sc, reg);
5899 return (sysctl_handle_64(oidp, &val, 0, req));
5903 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5905 struct adapter *sc = arg1;
5907 uint32_t param, val;
5909 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5912 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5913 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5914 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5915 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5916 end_synchronized_op(sc, 0);
5920 /* unknown is returned as 0 but we display -1 in that case */
5921 t = val == 0 ? -1 : val;
5923 rc = sysctl_handle_int(oidp, &t, 0, req);
5929 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5931 struct adapter *sc = arg1;
5934 uint16_t incr[NMTUS][NCCTRL_WIN];
5935 static const char *dec_fac[] = {
5936 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5940 rc = sysctl_wire_old_buffer(req, 0);
5944 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5948 t4_read_cong_tbl(sc, incr);
5950 for (i = 0; i < NCCTRL_WIN; ++i) {
5951 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5952 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5953 incr[5][i], incr[6][i], incr[7][i]);
5954 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5955 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5956 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5957 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5960 rc = sbuf_finish(sb);
5966 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5967 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
5968 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5969 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
5973 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5975 struct adapter *sc = arg1;
5977 int rc, i, n, qid = arg2;
5980 u_int cim_num_obq = sc->chip_params->cim_num_obq;
5982 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5983 ("%s: bad qid %d\n", __func__, qid));
5985 if (qid < CIM_NUM_IBQ) {
5988 n = 4 * CIM_IBQ_SIZE;
5989 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5990 rc = t4_read_cim_ibq(sc, qid, buf, n);
5992 /* outbound queue */
5995 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5996 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5997 rc = t4_read_cim_obq(sc, qid, buf, n);
6004 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6006 rc = sysctl_wire_old_buffer(req, 0);
6010 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6016 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6017 for (i = 0, p = buf; i < n; i += 16, p += 4)
6018 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6021 rc = sbuf_finish(sb);
6029 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6031 struct adapter *sc = arg1;
6037 MPASS(chip_id(sc) <= CHELSIO_T5);
6039 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6043 rc = sysctl_wire_old_buffer(req, 0);
6047 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6051 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6054 rc = -t4_cim_read_la(sc, buf, NULL);
6058 sbuf_printf(sb, "Status Data PC%s",
6059 cfg & F_UPDBGLACAPTPCONLY ? "" :
6060 " LS0Stat LS0Addr LS0Data");
6062 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6063 if (cfg & F_UPDBGLACAPTPCONLY) {
6064 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
6066 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
6067 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6068 p[4] & 0xff, p[5] >> 8);
6069 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
6070 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6071 p[1] & 0xf, p[2] >> 4);
6074 "\n %02x %x%07x %x%07x %08x %08x "
6076 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6077 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6082 rc = sbuf_finish(sb);
6090 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6092 struct adapter *sc = arg1;
6098 MPASS(chip_id(sc) > CHELSIO_T5);
6100 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6104 rc = sysctl_wire_old_buffer(req, 0);
6108 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6112 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6115 rc = -t4_cim_read_la(sc, buf, NULL);
6119 sbuf_printf(sb, "Status Inst Data PC%s",
6120 cfg & F_UPDBGLACAPTPCONLY ? "" :
6121 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
6123 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6124 if (cfg & F_UPDBGLACAPTPCONLY) {
6125 sbuf_printf(sb, "\n %02x %08x %08x %08x",
6126 p[3] & 0xff, p[2], p[1], p[0]);
6127 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
6128 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6129 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6130 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
6131 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6132 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6135 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
6136 "%08x %08x %08x %08x %08x %08x",
6137 (p[9] >> 16) & 0xff,
6138 p[9] & 0xffff, p[8] >> 16,
6139 p[8] & 0xffff, p[7] >> 16,
6140 p[7] & 0xffff, p[6] >> 16,
6141 p[2], p[1], p[0], p[5], p[4], p[3]);
6145 rc = sbuf_finish(sb);
6153 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6155 struct adapter *sc = arg1;
6161 rc = sysctl_wire_old_buffer(req, 0);
6165 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6169 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6172 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6175 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6176 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6180 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
6181 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6182 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
6183 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6184 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6185 (p[1] >> 2) | ((p[2] & 3) << 30),
6186 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6190 rc = sbuf_finish(sb);
6197 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6199 struct adapter *sc = arg1;
6205 rc = sysctl_wire_old_buffer(req, 0);
6209 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6213 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6216 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6219 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
6220 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6221 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
6222 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6223 p[4], p[3], p[2], p[1], p[0]);
6226 sbuf_printf(sb, "\n\nCntl ID Data");
6227 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6228 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
6229 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6232 rc = sbuf_finish(sb);
6239 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6241 struct adapter *sc = arg1;
6244 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6245 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6246 uint16_t thres[CIM_NUM_IBQ];
6247 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6248 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6249 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6251 cim_num_obq = sc->chip_params->cim_num_obq;
6253 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6254 obq_rdaddr = A_UP_OBQ_0_REALADDR;
6256 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6257 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6259 nq = CIM_NUM_IBQ + cim_num_obq;
6261 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6263 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6267 t4_read_cimq_cfg(sc, base, size, thres);
6269 rc = sysctl_wire_old_buffer(req, 0);
6273 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6278 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
6280 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6281 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
6282 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6283 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6284 G_QUEREMFLITS(p[2]) * 16);
6285 for ( ; i < nq; i++, p += 4, wr += 2)
6286 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
6287 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6288 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6289 G_QUEREMFLITS(p[2]) * 16);
6291 rc = sbuf_finish(sb);
6298 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6300 struct adapter *sc = arg1;
6303 struct tp_cpl_stats stats;
6305 rc = sysctl_wire_old_buffer(req, 0);
6309 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6313 mtx_lock(&sc->reg_lock);
6314 t4_tp_get_cpl_stats(sc, &stats);
6315 mtx_unlock(&sc->reg_lock);
6317 if (sc->chip_params->nchan > 2) {
6318 sbuf_printf(sb, " channel 0 channel 1"
6319 " channel 2 channel 3");
6320 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
6321 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6322 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
6323 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6325 sbuf_printf(sb, " channel 0 channel 1");
6326 sbuf_printf(sb, "\nCPL requests: %10u %10u",
6327 stats.req[0], stats.req[1]);
6328 sbuf_printf(sb, "\nCPL responses: %10u %10u",
6329 stats.rsp[0], stats.rsp[1]);
6332 rc = sbuf_finish(sb);
6339 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6341 struct adapter *sc = arg1;
6344 struct tp_usm_stats stats;
6346 rc = sysctl_wire_old_buffer(req, 0);
6350 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6354 t4_get_usm_stats(sc, &stats);
6356 sbuf_printf(sb, "Frames: %u\n", stats.frames);
6357 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6358 sbuf_printf(sb, "Drops: %u", stats.drops);
6360 rc = sbuf_finish(sb);
6366 static const char * const devlog_level_strings[] = {
6367 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
6368 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
6369 [FW_DEVLOG_LEVEL_ERR] = "ERR",
6370 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
6371 [FW_DEVLOG_LEVEL_INFO] = "INFO",
6372 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
6375 static const char * const devlog_facility_strings[] = {
6376 [FW_DEVLOG_FACILITY_CORE] = "CORE",
6377 [FW_DEVLOG_FACILITY_CF] = "CF",
6378 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
6379 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
6380 [FW_DEVLOG_FACILITY_RES] = "RES",
6381 [FW_DEVLOG_FACILITY_HW] = "HW",
6382 [FW_DEVLOG_FACILITY_FLR] = "FLR",
6383 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
6384 [FW_DEVLOG_FACILITY_PHY] = "PHY",
6385 [FW_DEVLOG_FACILITY_MAC] = "MAC",
6386 [FW_DEVLOG_FACILITY_PORT] = "PORT",
6387 [FW_DEVLOG_FACILITY_VI] = "VI",
6388 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
6389 [FW_DEVLOG_FACILITY_ACL] = "ACL",
6390 [FW_DEVLOG_FACILITY_TM] = "TM",
6391 [FW_DEVLOG_FACILITY_QFC] = "QFC",
6392 [FW_DEVLOG_FACILITY_DCB] = "DCB",
6393 [FW_DEVLOG_FACILITY_ETH] = "ETH",
6394 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
6395 [FW_DEVLOG_FACILITY_RI] = "RI",
6396 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
6397 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
6398 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
6399 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
6400 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
6404 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6406 struct adapter *sc = arg1;
6407 struct devlog_params *dparams = &sc->params.devlog;
6408 struct fw_devlog_e *buf, *e;
6409 int i, j, rc, nentries, first = 0;
6411 uint64_t ftstamp = UINT64_MAX;
6413 if (dparams->addr == 0)
6416 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6420 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
6424 nentries = dparams->size / sizeof(struct fw_devlog_e);
6425 for (i = 0; i < nentries; i++) {
6428 if (e->timestamp == 0)
6431 e->timestamp = be64toh(e->timestamp);
6432 e->seqno = be32toh(e->seqno);
6433 for (j = 0; j < 8; j++)
6434 e->params[j] = be32toh(e->params[j]);
6436 if (e->timestamp < ftstamp) {
6437 ftstamp = e->timestamp;
6442 if (buf[first].timestamp == 0)
6443 goto done; /* nothing in the log */
6445 rc = sysctl_wire_old_buffer(req, 0);
6449 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6454 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
6455 "Seq#", "Tstamp", "Level", "Facility", "Message");
6460 if (e->timestamp == 0)
6463 sbuf_printf(sb, "%10d %15ju %8s %8s ",
6464 e->seqno, e->timestamp,
6465 (e->level < nitems(devlog_level_strings) ?
6466 devlog_level_strings[e->level] : "UNKNOWN"),
6467 (e->facility < nitems(devlog_facility_strings) ?
6468 devlog_facility_strings[e->facility] : "UNKNOWN"));
6469 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6470 e->params[2], e->params[3], e->params[4],
6471 e->params[5], e->params[6], e->params[7]);
6473 if (++i == nentries)
6475 } while (i != first);
6477 rc = sbuf_finish(sb);
6485 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6487 struct adapter *sc = arg1;
6490 struct tp_fcoe_stats stats[MAX_NCHAN];
6491 int i, nchan = sc->chip_params->nchan;
6493 rc = sysctl_wire_old_buffer(req, 0);
6497 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6501 for (i = 0; i < nchan; i++)
6502 t4_get_fcoe_stats(sc, i, &stats[i]);
6505 sbuf_printf(sb, " channel 0 channel 1"
6506 " channel 2 channel 3");
6507 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
6508 stats[0].octets_ddp, stats[1].octets_ddp,
6509 stats[2].octets_ddp, stats[3].octets_ddp);
6510 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
6511 stats[0].frames_ddp, stats[1].frames_ddp,
6512 stats[2].frames_ddp, stats[3].frames_ddp);
6513 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
6514 stats[0].frames_drop, stats[1].frames_drop,
6515 stats[2].frames_drop, stats[3].frames_drop);
6517 sbuf_printf(sb, " channel 0 channel 1");
6518 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
6519 stats[0].octets_ddp, stats[1].octets_ddp);
6520 sbuf_printf(sb, "\nframesDDP: %16u %16u",
6521 stats[0].frames_ddp, stats[1].frames_ddp);
6522 sbuf_printf(sb, "\nframesDrop: %16u %16u",
6523 stats[0].frames_drop, stats[1].frames_drop);
6526 rc = sbuf_finish(sb);
6533 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6535 struct adapter *sc = arg1;
6538 unsigned int map, kbps, ipg, mode;
6539 unsigned int pace_tab[NTX_SCHED];
6541 rc = sysctl_wire_old_buffer(req, 0);
6545 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6549 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6550 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6551 t4_read_pace_tbl(sc, pace_tab);
6553 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
6554 "Class IPG (0.1 ns) Flow IPG (us)");
6556 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6557 t4_get_tx_sched(sc, i, &kbps, &ipg);
6558 sbuf_printf(sb, "\n %u %-5s %u ", i,
6559 (mode & (1 << i)) ? "flow" : "class", map & 3);
6561 sbuf_printf(sb, "%9u ", kbps);
6563 sbuf_printf(sb, " disabled ");
6566 sbuf_printf(sb, "%13u ", ipg);
6568 sbuf_printf(sb, " disabled ");
6571 sbuf_printf(sb, "%10u", pace_tab[i]);
6573 sbuf_printf(sb, " disabled");
6576 rc = sbuf_finish(sb);
6583 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6585 struct adapter *sc = arg1;
6589 struct lb_port_stats s[2];
6590 static const char *stat_name[] = {
6591 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6592 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6593 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
6594 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6595 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6596 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6597 "BG2FramesTrunc:", "BG3FramesTrunc:"
6600 rc = sysctl_wire_old_buffer(req, 0);
6604 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6608 memset(s, 0, sizeof(s));
6610 for (i = 0; i < sc->chip_params->nchan; i += 2) {
6611 t4_get_lb_stats(sc, i, &s[0]);
6612 t4_get_lb_stats(sc, i + 1, &s[1]);
6616 sbuf_printf(sb, "%s Loopback %u"
6617 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6619 for (j = 0; j < nitems(stat_name); j++)
6620 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6624 rc = sbuf_finish(sb);
6631 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6634 struct port_info *pi = arg1;
6635 struct link_config *lc = &pi->link_cfg;
6638 rc = sysctl_wire_old_buffer(req, 0);
6641 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6645 if (lc->link_ok || lc->link_down_rc == 255)
6646 sbuf_printf(sb, "n/a");
6648 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
6650 rc = sbuf_finish(sb);
6663 mem_desc_cmp(const void *a, const void *b)
6665 return ((const struct mem_desc *)a)->base -
6666 ((const struct mem_desc *)b)->base;
6670 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6678 size = to - from + 1;
6682 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6683 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6687 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6689 struct adapter *sc = arg1;
6692 uint32_t lo, hi, used, alloc;
6693 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6694 static const char *region[] = {
6695 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6696 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6697 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6698 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6699 "RQUDP region:", "PBL region:", "TXPBL region:",
6700 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6703 struct mem_desc avail[4];
6704 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
6705 struct mem_desc *md = mem;
6707 rc = sysctl_wire_old_buffer(req, 0);
6711 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6715 for (i = 0; i < nitems(mem); i++) {
6720 /* Find and sort the populated memory ranges */
6722 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6723 if (lo & F_EDRAM0_ENABLE) {
6724 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6725 avail[i].base = G_EDRAM0_BASE(hi) << 20;
6726 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6730 if (lo & F_EDRAM1_ENABLE) {
6731 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6732 avail[i].base = G_EDRAM1_BASE(hi) << 20;
6733 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6737 if (lo & F_EXT_MEM_ENABLE) {
6738 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6739 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6740 avail[i].limit = avail[i].base +
6741 (G_EXT_MEM_SIZE(hi) << 20);
6742 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
6745 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
6746 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6747 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6748 avail[i].limit = avail[i].base +
6749 (G_EXT_MEM1_SIZE(hi) << 20);
6753 if (!i) /* no memory available */
6755 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6757 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6758 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6759 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6760 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6761 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6762 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6763 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6764 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6765 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6767 /* the next few have explicit upper bounds */
6768 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6769 md->limit = md->base - 1 +
6770 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6771 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6774 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6775 md->limit = md->base - 1 +
6776 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6777 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6780 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6781 if (chip_id(sc) <= CHELSIO_T5)
6782 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6784 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
6788 md->idx = nitems(region); /* hide it */
6792 #define ulp_region(reg) \
6793 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6794 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6796 ulp_region(RX_ISCSI);
6797 ulp_region(RX_TDDP);
6799 ulp_region(RX_STAG);
6801 ulp_region(RX_RQUDP);
6807 md->idx = nitems(region);
6810 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
6811 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
6814 if (sge_ctrl & F_VFIFO_ENABLE)
6815 size = G_DBVFIFO_SIZE(fifo_size);
6817 size = G_T6_DBVFIFO_SIZE(fifo_size);
6820 md->base = G_BASEADDR(t4_read_reg(sc,
6821 A_SGE_DBVFIFO_BADDR));
6822 md->limit = md->base + (size << 2) - 1;
6827 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6830 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6834 md->base = sc->vres.ocq.start;
6835 if (sc->vres.ocq.size)
6836 md->limit = md->base + sc->vres.ocq.size - 1;
6838 md->idx = nitems(region); /* hide it */
6841 /* add any address-space holes, there can be up to 3 */
6842 for (n = 0; n < i - 1; n++)
6843 if (avail[n].limit < avail[n + 1].base)
6844 (md++)->base = avail[n].limit;
6846 (md++)->base = avail[n].limit;
6849 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6851 for (lo = 0; lo < i; lo++)
6852 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6853 avail[lo].limit - 1);
6855 sbuf_printf(sb, "\n");
6856 for (i = 0; i < n; i++) {
6857 if (mem[i].idx >= nitems(region))
6858 continue; /* skip holes */
6860 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6861 mem_region_show(sb, region[mem[i].idx], mem[i].base,
6865 sbuf_printf(sb, "\n");
6866 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6867 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6868 mem_region_show(sb, "uP RAM:", lo, hi);
6870 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6871 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6872 mem_region_show(sb, "uP Extmem2:", lo, hi);
6874 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6875 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6877 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6878 (lo & F_PMRXNUMCHN) ? 2 : 1);
6880 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6881 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6882 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6884 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6885 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6886 sbuf_printf(sb, "%u p-structs\n",
6887 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6889 for (i = 0; i < 4; i++) {
6890 if (chip_id(sc) > CHELSIO_T5)
6891 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
6893 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6895 used = G_T5_USED(lo);
6896 alloc = G_T5_ALLOC(lo);
6899 alloc = G_ALLOC(lo);
6901 /* For T6 these are MAC buffer groups */
6902 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6905 for (i = 0; i < sc->chip_params->nchan; i++) {
6906 if (chip_id(sc) > CHELSIO_T5)
6907 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
6909 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6911 used = G_T5_USED(lo);
6912 alloc = G_T5_ALLOC(lo);
6915 alloc = G_ALLOC(lo);
6917 /* For T6 these are MAC buffer groups */
6919 "\nLoopback %d using %u pages out of %u allocated",
6923 rc = sbuf_finish(sb);
6930 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6934 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6938 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6940 struct adapter *sc = arg1;
6944 MPASS(chip_id(sc) <= CHELSIO_T5);
6946 rc = sysctl_wire_old_buffer(req, 0);
6950 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6955 "Idx Ethernet address Mask Vld Ports PF"
6956 " VF Replication P0 P1 P2 P3 ML");
6957 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
6958 uint64_t tcamx, tcamy, mask;
6959 uint32_t cls_lo, cls_hi;
6960 uint8_t addr[ETHER_ADDR_LEN];
6962 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6963 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6966 tcamxy2valmask(tcamx, tcamy, addr, &mask);
6967 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6968 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6969 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6970 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
6971 addr[3], addr[4], addr[5], (uintmax_t)mask,
6972 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6973 G_PORTMAP(cls_hi), G_PF(cls_lo),
6974 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6976 if (cls_lo & F_REPLICATE) {
6977 struct fw_ldst_cmd ldst_cmd;
6979 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6980 ldst_cmd.op_to_addrspace =
6981 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6982 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6983 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6984 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6985 ldst_cmd.u.mps.rplc.fid_idx =
6986 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6987 V_FW_LDST_CMD_IDX(i));
6989 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6993 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6994 sizeof(ldst_cmd), &ldst_cmd);
6995 end_synchronized_op(sc, 0);
6998 sbuf_printf(sb, "%36d", rc);
7001 sbuf_printf(sb, " %08x %08x %08x %08x",
7002 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7003 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7004 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7005 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7008 sbuf_printf(sb, "%36s", "");
7010 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7011 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7012 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7016 (void) sbuf_finish(sb);
7018 rc = sbuf_finish(sb);
7025 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7027 struct adapter *sc = arg1;
7031 MPASS(chip_id(sc) > CHELSIO_T5);
7033 rc = sysctl_wire_old_buffer(req, 0);
7037 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7041 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
7042 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
7044 " P0 P1 P2 P3 ML\n");
7046 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7047 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7049 uint64_t tcamx, tcamy, val, mask;
7050 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7051 uint8_t addr[ETHER_ADDR_LEN];
7053 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7055 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7057 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7058 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7059 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7060 tcamy = G_DMACH(val) << 32;
7061 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7062 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7063 lookup_type = G_DATALKPTYPE(data2);
7064 port_num = G_DATAPORTNUM(data2);
7065 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7066 /* Inner header VNI */
7067 vniy = ((data2 & F_DATAVIDH2) << 23) |
7068 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7069 dip_hit = data2 & F_DATADIPHIT;
7074 vlan_vld = data2 & F_DATAVIDH2;
7075 ivlan = G_VIDL(val);
7078 ctl |= V_CTLXYBITSEL(1);
7079 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7080 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7081 tcamx = G_DMACH(val) << 32;
7082 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7083 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7084 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7085 /* Inner header VNI mask */
7086 vnix = ((data2 & F_DATAVIDH2) << 23) |
7087 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7093 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7095 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7096 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7098 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7099 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7100 "%012jx %06x %06x - - %3c"
7101 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
7102 addr[1], addr[2], addr[3], addr[4], addr[5],
7103 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7104 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7105 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7106 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7108 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7109 "%012jx - - ", i, addr[0], addr[1],
7110 addr[2], addr[3], addr[4], addr[5],
7114 sbuf_printf(sb, "%4u Y ", ivlan);
7116 sbuf_printf(sb, " - N ");
7118 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
7119 lookup_type ? 'I' : 'O', port_num,
7120 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7121 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7122 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7126 if (cls_lo & F_T6_REPLICATE) {
7127 struct fw_ldst_cmd ldst_cmd;
7129 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7130 ldst_cmd.op_to_addrspace =
7131 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7132 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7133 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7134 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7135 ldst_cmd.u.mps.rplc.fid_idx =
7136 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7137 V_FW_LDST_CMD_IDX(i));
7139 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7143 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7144 sizeof(ldst_cmd), &ldst_cmd);
7145 end_synchronized_op(sc, 0);
7148 sbuf_printf(sb, "%72d", rc);
7151 sbuf_printf(sb, " %08x %08x %08x %08x"
7152 " %08x %08x %08x %08x",
7153 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7154 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7155 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7156 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7157 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7158 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7159 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7160 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7163 sbuf_printf(sb, "%72s", "");
7165 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7166 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7167 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7168 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7172 (void) sbuf_finish(sb);
7174 rc = sbuf_finish(sb);
7181 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7183 struct adapter *sc = arg1;
7186 uint16_t mtus[NMTUS];
7188 rc = sysctl_wire_old_buffer(req, 0);
7192 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7196 t4_read_mtu_tbl(sc, mtus, NULL);
7198 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7199 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7200 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7201 mtus[14], mtus[15]);
7203 rc = sbuf_finish(sb);
7210 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7212 struct adapter *sc = arg1;
7215 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7216 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7217 static const char *tx_stats[MAX_PM_NSTATS] = {
7218 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7219 "Tx FIFO wait", NULL, "Tx latency"
7221 static const char *rx_stats[MAX_PM_NSTATS] = {
7222 "Read:", "Write bypass:", "Write mem:", "Flush:",
7223 "Rx FIFO wait", NULL, "Rx latency"
7226 rc = sysctl_wire_old_buffer(req, 0);
7230 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7234 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
7235 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
7237 sbuf_printf(sb, " Tx pcmds Tx bytes");
7238 for (i = 0; i < 4; i++) {
7239 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7243 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
7244 for (i = 0; i < 4; i++) {
7245 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7249 if (chip_id(sc) > CHELSIO_T5) {
7251 "\n Total wait Total occupancy");
7252 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7254 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7258 MPASS(i < nitems(tx_stats));
7261 "\n Reads Total wait");
7262 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7264 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7268 rc = sbuf_finish(sb);
7275 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7277 struct adapter *sc = arg1;
7280 struct tp_rdma_stats stats;
7282 rc = sysctl_wire_old_buffer(req, 0);
7286 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7290 mtx_lock(&sc->reg_lock);
7291 t4_tp_get_rdma_stats(sc, &stats);
7292 mtx_unlock(&sc->reg_lock);
7294 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7295 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7297 rc = sbuf_finish(sb);
7304 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7306 struct adapter *sc = arg1;
7309 struct tp_tcp_stats v4, v6;
7311 rc = sysctl_wire_old_buffer(req, 0);
7315 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7319 mtx_lock(&sc->reg_lock);
7320 t4_tp_get_tcp_stats(sc, &v4, &v6);
7321 mtx_unlock(&sc->reg_lock);
7325 sbuf_printf(sb, "OutRsts: %20u %20u\n",
7326 v4.tcp_out_rsts, v6.tcp_out_rsts);
7327 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
7328 v4.tcp_in_segs, v6.tcp_in_segs);
7329 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
7330 v4.tcp_out_segs, v6.tcp_out_segs);
7331 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
7332 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
7334 rc = sbuf_finish(sb);
7341 sysctl_tids(SYSCTL_HANDLER_ARGS)
7343 struct adapter *sc = arg1;
7346 struct tid_info *t = &sc->tids;
7348 rc = sysctl_wire_old_buffer(req, 0);
7352 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7357 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7362 sbuf_printf(sb, "TID range: ");
7363 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7366 if (chip_id(sc) <= CHELSIO_T5) {
7367 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7368 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
7370 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
7371 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
7375 sbuf_printf(sb, "0-%u, ", b - 1);
7376 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
7378 sbuf_printf(sb, "0-%u", t->ntids - 1);
7379 sbuf_printf(sb, ", in use: %u\n",
7380 atomic_load_acq_int(&t->tids_in_use));
7384 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7385 t->stid_base + t->nstids - 1, t->stids_in_use);
7389 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7390 t->ftid_base + t->nftids - 1);
7394 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7395 t->etid_base + t->netids - 1);
7398 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7399 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7400 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7402 rc = sbuf_finish(sb);
7409 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7411 struct adapter *sc = arg1;
7414 struct tp_err_stats stats;
7416 rc = sysctl_wire_old_buffer(req, 0);
7420 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7424 mtx_lock(&sc->reg_lock);
7425 t4_tp_get_err_stats(sc, &stats);
7426 mtx_unlock(&sc->reg_lock);
7428 if (sc->chip_params->nchan > 2) {
7429 sbuf_printf(sb, " channel 0 channel 1"
7430 " channel 2 channel 3\n");
7431 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
7432 stats.mac_in_errs[0], stats.mac_in_errs[1],
7433 stats.mac_in_errs[2], stats.mac_in_errs[3]);
7434 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
7435 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
7436 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
7437 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
7438 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
7439 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
7440 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
7441 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
7442 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
7443 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
7444 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
7445 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
7446 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
7447 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
7448 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
7449 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
7450 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
7451 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
7452 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
7453 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
7454 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
7456 sbuf_printf(sb, " channel 0 channel 1\n");
7457 sbuf_printf(sb, "macInErrs: %10u %10u\n",
7458 stats.mac_in_errs[0], stats.mac_in_errs[1]);
7459 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
7460 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
7461 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
7462 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
7463 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
7464 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
7465 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
7466 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
7467 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
7468 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
7469 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
7470 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
7471 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
7472 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
7475 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
7476 stats.ofld_no_neigh, stats.ofld_cong_defer);
7478 rc = sbuf_finish(sb);
7485 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
7487 struct adapter *sc = arg1;
7488 struct tp_params *tpp = &sc->params.tp;
7492 mask = tpp->la_mask >> 16;
7493 rc = sysctl_handle_int(oidp, &mask, 0, req);
7494 if (rc != 0 || req->newptr == NULL)
7498 tpp->la_mask = mask << 16;
7499 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
7511 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7517 uint64_t mask = (1ULL << f->width) - 1;
7518 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7519 ((uintmax_t)v >> f->start) & mask);
7521 if (line_size + len >= 79) {
7523 sbuf_printf(sb, "\n ");
7525 sbuf_printf(sb, "%s ", buf);
7526 line_size += len + 1;
7529 sbuf_printf(sb, "\n");
7532 static const struct field_desc tp_la0[] = {
7533 { "RcfOpCodeOut", 60, 4 },
7535 { "WcfState", 52, 4 },
7536 { "RcfOpcSrcOut", 50, 2 },
7537 { "CRxError", 49, 1 },
7538 { "ERxError", 48, 1 },
7539 { "SanityFailed", 47, 1 },
7540 { "SpuriousMsg", 46, 1 },
7541 { "FlushInputMsg", 45, 1 },
7542 { "FlushInputCpl", 44, 1 },
7543 { "RssUpBit", 43, 1 },
7544 { "RssFilterHit", 42, 1 },
7546 { "InitTcb", 31, 1 },
7547 { "LineNumber", 24, 7 },
7549 { "EdataOut", 22, 1 },
7551 { "CdataOut", 20, 1 },
7552 { "EreadPdu", 19, 1 },
7553 { "CreadPdu", 18, 1 },
7554 { "TunnelPkt", 17, 1 },
7555 { "RcfPeerFin", 16, 1 },
7556 { "RcfReasonOut", 12, 4 },
7557 { "TxCchannel", 10, 2 },
7558 { "RcfTxChannel", 8, 2 },
7559 { "RxEchannel", 6, 2 },
7560 { "RcfRxChannel", 5, 1 },
7561 { "RcfDataOutSrdy", 4, 1 },
7563 { "RxOoDvld", 2, 1 },
7564 { "RxCongestion", 1, 1 },
7565 { "TxCongestion", 0, 1 },
7569 static const struct field_desc tp_la1[] = {
7570 { "CplCmdIn", 56, 8 },
7571 { "CplCmdOut", 48, 8 },
7572 { "ESynOut", 47, 1 },
7573 { "EAckOut", 46, 1 },
7574 { "EFinOut", 45, 1 },
7575 { "ERstOut", 44, 1 },
7580 { "DataIn", 39, 1 },
7581 { "DataInVld", 38, 1 },
7583 { "RxBufEmpty", 36, 1 },
7585 { "RxFbCongestion", 34, 1 },
7586 { "TxFbCongestion", 33, 1 },
7587 { "TxPktSumSrdy", 32, 1 },
7588 { "RcfUlpType", 28, 4 },
7590 { "Ebypass", 26, 1 },
7592 { "Static0", 24, 1 },
7594 { "Cbypass", 22, 1 },
7596 { "CPktOut", 20, 1 },
7597 { "RxPagePoolFull", 18, 2 },
7598 { "RxLpbkPkt", 17, 1 },
7599 { "TxLpbkPkt", 16, 1 },
7600 { "RxVfValid", 15, 1 },
7601 { "SynLearned", 14, 1 },
7602 { "SetDelEntry", 13, 1 },
7603 { "SetInvEntry", 12, 1 },
7604 { "CpcmdDvld", 11, 1 },
7605 { "CpcmdSave", 10, 1 },
7606 { "RxPstructsFull", 8, 2 },
7607 { "EpcmdDvld", 7, 1 },
7608 { "EpcmdFlush", 6, 1 },
7609 { "EpcmdTrimPrefix", 5, 1 },
7610 { "EpcmdTrimPostfix", 4, 1 },
7611 { "ERssIp4Pkt", 3, 1 },
7612 { "ERssIp6Pkt", 2, 1 },
7613 { "ERssTcpUdpPkt", 1, 1 },
7614 { "ERssFceFipPkt", 0, 1 },
7618 static const struct field_desc tp_la2[] = {
7619 { "CplCmdIn", 56, 8 },
7620 { "MpsVfVld", 55, 1 },
7627 { "DataIn", 39, 1 },
7628 { "DataInVld", 38, 1 },
7630 { "RxBufEmpty", 36, 1 },
7632 { "RxFbCongestion", 34, 1 },
7633 { "TxFbCongestion", 33, 1 },
7634 { "TxPktSumSrdy", 32, 1 },
7635 { "RcfUlpType", 28, 4 },
7637 { "Ebypass", 26, 1 },
7639 { "Static0", 24, 1 },
7641 { "Cbypass", 22, 1 },
7643 { "CPktOut", 20, 1 },
7644 { "RxPagePoolFull", 18, 2 },
7645 { "RxLpbkPkt", 17, 1 },
7646 { "TxLpbkPkt", 16, 1 },
7647 { "RxVfValid", 15, 1 },
7648 { "SynLearned", 14, 1 },
7649 { "SetDelEntry", 13, 1 },
7650 { "SetInvEntry", 12, 1 },
7651 { "CpcmdDvld", 11, 1 },
7652 { "CpcmdSave", 10, 1 },
7653 { "RxPstructsFull", 8, 2 },
7654 { "EpcmdDvld", 7, 1 },
7655 { "EpcmdFlush", 6, 1 },
7656 { "EpcmdTrimPrefix", 5, 1 },
7657 { "EpcmdTrimPostfix", 4, 1 },
7658 { "ERssIp4Pkt", 3, 1 },
7659 { "ERssIp6Pkt", 2, 1 },
7660 { "ERssTcpUdpPkt", 1, 1 },
7661 { "ERssFceFipPkt", 0, 1 },
7666 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
7669 field_desc_show(sb, *p, tp_la0);
7673 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
7677 sbuf_printf(sb, "\n");
7678 field_desc_show(sb, p[0], tp_la0);
7679 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7680 field_desc_show(sb, p[1], tp_la0);
7684 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
7688 sbuf_printf(sb, "\n");
7689 field_desc_show(sb, p[0], tp_la0);
7690 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7691 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
7695 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
7697 struct adapter *sc = arg1;
7702 void (*show_func)(struct sbuf *, uint64_t *, int);
7704 rc = sysctl_wire_old_buffer(req, 0);
7708 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7712 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
7714 t4_tp_read_la(sc, buf, NULL);
7717 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7720 show_func = tp_la_show2;
7724 show_func = tp_la_show3;
7728 show_func = tp_la_show;
7731 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7732 (*show_func)(sb, p, i);
7734 rc = sbuf_finish(sb);
7741 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7743 struct adapter *sc = arg1;
7746 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
7748 rc = sysctl_wire_old_buffer(req, 0);
7752 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7756 t4_get_chan_txrate(sc, nrate, orate);
7758 if (sc->chip_params->nchan > 2) {
7759 sbuf_printf(sb, " channel 0 channel 1"
7760 " channel 2 channel 3\n");
7761 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
7762 nrate[0], nrate[1], nrate[2], nrate[3]);
7763 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
7764 orate[0], orate[1], orate[2], orate[3]);
7766 sbuf_printf(sb, " channel 0 channel 1\n");
7767 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
7768 nrate[0], nrate[1]);
7769 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
7770 orate[0], orate[1]);
7773 rc = sbuf_finish(sb);
7780 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7782 struct adapter *sc = arg1;
7787 rc = sysctl_wire_old_buffer(req, 0);
7791 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7795 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7798 t4_ulprx_read_la(sc, buf);
7801 sbuf_printf(sb, " Pcmd Type Message"
7803 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7804 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
7805 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7808 rc = sbuf_finish(sb);
7815 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7817 struct adapter *sc = arg1;
7821 MPASS(chip_id(sc) >= CHELSIO_T5);
7823 rc = sysctl_wire_old_buffer(req, 0);
7827 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7831 v = t4_read_reg(sc, A_SGE_STAT_CFG);
7832 if (G_STATSOURCE_T5(v) == 7) {
7835 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
7837 sbuf_printf(sb, "total %d, incomplete %d",
7838 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7839 t4_read_reg(sc, A_SGE_STAT_MATCH));
7840 } else if (mode == 1) {
7841 sbuf_printf(sb, "total %d, data overflow %d",
7842 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7843 t4_read_reg(sc, A_SGE_STAT_MATCH));
7845 sbuf_printf(sb, "unknown mode %d", mode);
7848 rc = sbuf_finish(sb);
7855 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
7857 struct adapter *sc = arg1;
7858 struct tx_sched_class *tc;
7859 struct t4_sched_class_params p;
7861 int i, rc, port_id, flags, mbps, gbps;
7863 rc = sysctl_wire_old_buffer(req, 0);
7867 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7871 port_id = arg2 >> 16;
7872 MPASS(port_id < sc->params.nports);
7873 MPASS(sc->port[port_id] != NULL);
7875 MPASS(i < sc->chip_params->nsched_cls);
7876 tc = &sc->port[port_id]->tc[i];
7878 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7884 end_synchronized_op(sc, LOCK_HELD);
7886 if ((flags & TX_SC_OK) == 0) {
7887 sbuf_printf(sb, "none");
7891 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) {
7892 sbuf_printf(sb, "cl-wrr weight %u", p.weight);
7894 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL)
7895 sbuf_printf(sb, "cl-rl");
7896 else if (p.level == SCHED_CLASS_LEVEL_CH_RL)
7897 sbuf_printf(sb, "ch-rl");
7903 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) {
7904 /* XXX: top speed or actual link speed? */
7905 gbps = port_top_speed(sc->port[port_id]);
7906 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps);
7908 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) {
7909 switch (p.rateunit) {
7910 case SCHED_CLASS_RATEUNIT_BITS:
7911 mbps = p.maxrate / 1000;
7912 gbps = p.maxrate / 1000000;
7913 if (p.maxrate == gbps * 1000000)
7914 sbuf_printf(sb, " %uGbps", gbps);
7915 else if (p.maxrate == mbps * 1000)
7916 sbuf_printf(sb, " %uMbps", mbps);
7918 sbuf_printf(sb, " %uKbps", p.maxrate);
7920 case SCHED_CLASS_RATEUNIT_PKTS:
7921 sbuf_printf(sb, " %upps", p.maxrate);
7930 case SCHED_CLASS_MODE_CLASS:
7931 sbuf_printf(sb, " aggregate");
7933 case SCHED_CLASS_MODE_FLOW:
7934 sbuf_printf(sb, " per-flow");
7943 rc = sbuf_finish(sb);
7952 unit_conv(char *buf, size_t len, u_int val, u_int factor)
7954 u_int rem = val % factor;
7957 snprintf(buf, len, "%u", val / factor);
7959 while (rem % 10 == 0)
7961 snprintf(buf, len, "%u.%u", val / factor, rem);
7966 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
7968 struct adapter *sc = arg1;
7971 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
7973 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
7977 re = G_TIMERRESOLUTION(res);
7980 /* TCP timestamp tick */
7981 re = G_TIMESTAMPRESOLUTION(res);
7985 re = G_DELAYEDACKRESOLUTION(res);
7991 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
7993 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
7997 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
7999 struct adapter *sc = arg1;
8000 u_int res, dack_re, v;
8001 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8003 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8004 dack_re = G_DELAYEDACKRESOLUTION(res);
8005 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8007 return (sysctl_handle_int(oidp, &v, 0, req));
8011 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8013 struct adapter *sc = arg1;
8016 u_long tp_tick_us, v;
8017 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8019 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8020 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
8021 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8022 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8024 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8025 tp_tick_us = (cclk_ps << tre) / 1000000;
8027 if (reg == A_TP_INIT_SRTT)
8028 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8030 v = tp_tick_us * t4_read_reg(sc, reg);
8032 return (sysctl_handle_long(oidp, &v, 0, req));
8037 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
8041 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
8042 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
8044 if (fconf & F_FRAGMENTATION)
8045 mode |= T4_FILTER_IP_FRAGMENT;
8047 if (fconf & F_MPSHITTYPE)
8048 mode |= T4_FILTER_MPS_HIT_TYPE;
8050 if (fconf & F_MACMATCH)
8051 mode |= T4_FILTER_MAC_IDX;
8053 if (fconf & F_ETHERTYPE)
8054 mode |= T4_FILTER_ETH_TYPE;
8056 if (fconf & F_PROTOCOL)
8057 mode |= T4_FILTER_IP_PROTO;
8060 mode |= T4_FILTER_IP_TOS;
8063 mode |= T4_FILTER_VLAN;
8065 if (fconf & F_VNIC_ID) {
8066 mode |= T4_FILTER_VNIC;
8068 mode |= T4_FILTER_IC_VNIC;
8072 mode |= T4_FILTER_PORT;
8075 mode |= T4_FILTER_FCoE;
8081 mode_to_fconf(uint32_t mode)
8085 if (mode & T4_FILTER_IP_FRAGMENT)
8086 fconf |= F_FRAGMENTATION;
8088 if (mode & T4_FILTER_MPS_HIT_TYPE)
8089 fconf |= F_MPSHITTYPE;
8091 if (mode & T4_FILTER_MAC_IDX)
8092 fconf |= F_MACMATCH;
8094 if (mode & T4_FILTER_ETH_TYPE)
8095 fconf |= F_ETHERTYPE;
8097 if (mode & T4_FILTER_IP_PROTO)
8098 fconf |= F_PROTOCOL;
8100 if (mode & T4_FILTER_IP_TOS)
8103 if (mode & T4_FILTER_VLAN)
8106 if (mode & T4_FILTER_VNIC)
8109 if (mode & T4_FILTER_PORT)
8112 if (mode & T4_FILTER_FCoE)
8119 mode_to_iconf(uint32_t mode)
8122 if (mode & T4_FILTER_IC_VNIC)
8127 static int check_fspec_against_fconf_iconf(struct adapter *sc,
8128 struct t4_filter_specification *fs)
8130 struct tp_params *tpp = &sc->params.tp;
8133 if (fs->val.frag || fs->mask.frag)
8134 fconf |= F_FRAGMENTATION;
8136 if (fs->val.matchtype || fs->mask.matchtype)
8137 fconf |= F_MPSHITTYPE;
8139 if (fs->val.macidx || fs->mask.macidx)
8140 fconf |= F_MACMATCH;
8142 if (fs->val.ethtype || fs->mask.ethtype)
8143 fconf |= F_ETHERTYPE;
8145 if (fs->val.proto || fs->mask.proto)
8146 fconf |= F_PROTOCOL;
8148 if (fs->val.tos || fs->mask.tos)
8151 if (fs->val.vlan_vld || fs->mask.vlan_vld)
8154 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
8156 if (tpp->ingress_config & F_VNIC)
8160 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
8162 if ((tpp->ingress_config & F_VNIC) == 0)
8166 if (fs->val.iport || fs->mask.iport)
8169 if (fs->val.fcoe || fs->mask.fcoe)
8172 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
8179 get_filter_mode(struct adapter *sc, uint32_t *mode)
8181 struct tp_params *tpp = &sc->params.tp;
8184 * We trust the cached values of the relevant TP registers. This means
8185 * things work reliably only if writes to those registers are always via
8186 * t4_set_filter_mode.
8188 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
8194 set_filter_mode(struct adapter *sc, uint32_t mode)
8196 struct tp_params *tpp = &sc->params.tp;
8197 uint32_t fconf, iconf;
8200 iconf = mode_to_iconf(mode);
8201 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
8203 * For now we just complain if A_TP_INGRESS_CONFIG is not
8204 * already set to the correct value for the requested filter
8205 * mode. It's not clear if it's safe to write to this register
8206 * on the fly. (And we trust the cached value of the register).
8211 fconf = mode_to_fconf(mode);
8213 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8218 if (sc->tids.ftids_in_use > 0) {
8224 if (uld_active(sc, ULD_TOM)) {
8230 rc = -t4_set_filter_mode(sc, fconf);
8232 end_synchronized_op(sc, LOCK_HELD);
8236 static inline uint64_t
8237 get_filter_hits(struct adapter *sc, uint32_t fid)
8241 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
8242 (fid + sc->tids.ftid_base) * TCB_SIZE;
8247 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
8248 return (be64toh(hits));
8252 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
8253 return (be32toh(hits));
8258 get_filter(struct adapter *sc, struct t4_filter *t)
8260 int i, rc, nfilters = sc->tids.nftids;
8261 struct filter_entry *f;
8263 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8268 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
8269 t->idx >= nfilters) {
8270 t->idx = 0xffffffff;
8274 f = &sc->tids.ftid_tab[t->idx];
8275 for (i = t->idx; i < nfilters; i++, f++) {
8278 t->l2tidx = f->l2t ? f->l2t->idx : 0;
8279 t->smtidx = f->smtidx;
8281 t->hits = get_filter_hits(sc, t->idx);
8283 t->hits = UINT64_MAX;
8290 t->idx = 0xffffffff;
8292 end_synchronized_op(sc, LOCK_HELD);
8297 set_filter(struct adapter *sc, struct t4_filter *t)
8299 unsigned int nfilters, nports;
8300 struct filter_entry *f;
8303 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
8307 nfilters = sc->tids.nftids;
8308 nports = sc->params.nports;
8310 if (nfilters == 0) {
8315 if (t->idx >= nfilters) {
8320 /* Validate against the global filter mode and ingress config */
8321 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
8325 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
8330 if (t->fs.val.iport >= nports) {
8335 /* Can't specify an iq if not steering to it */
8336 if (!t->fs.dirsteer && t->fs.iq) {
8341 /* IPv6 filter idx must be 4 aligned */
8342 if (t->fs.type == 1 &&
8343 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
8348 if (!(sc->flags & FULL_INIT_DONE) &&
8349 ((rc = adapter_full_init(sc)) != 0))
8352 if (sc->tids.ftid_tab == NULL) {
8353 KASSERT(sc->tids.ftids_in_use == 0,
8354 ("%s: no memory allocated but filters_in_use > 0",
8357 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
8358 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
8359 if (sc->tids.ftid_tab == NULL) {
8363 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
8366 for (i = 0; i < 4; i++) {
8367 f = &sc->tids.ftid_tab[t->idx + i];
8369 if (f->pending || f->valid) {
8378 if (t->fs.type == 0)
8382 f = &sc->tids.ftid_tab[t->idx];
8385 rc = set_filter_wr(sc, t->idx);
8387 end_synchronized_op(sc, 0);
8390 mtx_lock(&sc->tids.ftid_lock);
8392 if (f->pending == 0) {
8393 rc = f->valid ? 0 : EIO;
8397 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8398 PCATCH, "t4setfw", 0)) {
8403 mtx_unlock(&sc->tids.ftid_lock);
8409 del_filter(struct adapter *sc, struct t4_filter *t)
8411 unsigned int nfilters;
8412 struct filter_entry *f;
8415 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
8419 nfilters = sc->tids.nftids;
8421 if (nfilters == 0) {
8426 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
8427 t->idx >= nfilters) {
8432 if (!(sc->flags & FULL_INIT_DONE)) {
8437 f = &sc->tids.ftid_tab[t->idx];
8449 t->fs = f->fs; /* extra info for the caller */
8450 rc = del_filter_wr(sc, t->idx);
8454 end_synchronized_op(sc, 0);
8457 mtx_lock(&sc->tids.ftid_lock);
8459 if (f->pending == 0) {
8460 rc = f->valid ? EIO : 0;
8464 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8465 PCATCH, "t4delfw", 0)) {
8470 mtx_unlock(&sc->tids.ftid_lock);
8477 clear_filter(struct filter_entry *f)
8480 t4_l2t_release(f->l2t);
8482 bzero(f, sizeof (*f));
8486 set_filter_wr(struct adapter *sc, int fidx)
8488 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8489 struct fw_filter_wr *fwr;
8490 unsigned int ftid, vnic_vld, vnic_vld_mask;
8491 struct wrq_cookie cookie;
8493 ASSERT_SYNCHRONIZED_OP(sc);
8495 if (f->fs.newdmac || f->fs.newvlan) {
8496 /* This filter needs an L2T entry; allocate one. */
8497 f->l2t = t4_l2t_alloc_switching(sc->l2t);
8500 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8502 t4_l2t_release(f->l2t);
8508 /* Already validated against fconf, iconf */
8509 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
8510 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
8511 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
8515 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
8520 ftid = sc->tids.ftid_base + fidx;
8522 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8525 bzero(fwr, sizeof(*fwr));
8527 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8528 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8530 htobe32(V_FW_FILTER_WR_TID(ftid) |
8531 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8532 V_FW_FILTER_WR_NOREPLY(0) |
8533 V_FW_FILTER_WR_IQ(f->fs.iq));
8534 fwr->del_filter_to_l2tix =
8535 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8536 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8537 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8538 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8539 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8540 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8541 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8542 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8543 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8544 f->fs.newvlan == VLAN_REWRITE) |
8545 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8546 f->fs.newvlan == VLAN_REWRITE) |
8547 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8548 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8549 V_FW_FILTER_WR_PRIO(f->fs.prio) |
8550 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8551 fwr->ethtype = htobe16(f->fs.val.ethtype);
8552 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8553 fwr->frag_to_ovlan_vldm =
8554 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
8555 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
8556 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
8557 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
8558 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
8559 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
8561 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
8562 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
8563 fwr->maci_to_matchtypem =
8564 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
8565 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
8566 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
8567 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
8568 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
8569 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
8570 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
8571 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
8572 fwr->ptcl = f->fs.val.proto;
8573 fwr->ptclm = f->fs.mask.proto;
8574 fwr->ttyp = f->fs.val.tos;
8575 fwr->ttypm = f->fs.mask.tos;
8576 fwr->ivlan = htobe16(f->fs.val.vlan);
8577 fwr->ivlanm = htobe16(f->fs.mask.vlan);
8578 fwr->ovlan = htobe16(f->fs.val.vnic);
8579 fwr->ovlanm = htobe16(f->fs.mask.vnic);
8580 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
8581 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
8582 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
8583 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
8584 fwr->lp = htobe16(f->fs.val.dport);
8585 fwr->lpm = htobe16(f->fs.mask.dport);
8586 fwr->fp = htobe16(f->fs.val.sport);
8587 fwr->fpm = htobe16(f->fs.mask.sport);
8589 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
8592 sc->tids.ftids_in_use++;
8594 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8599 del_filter_wr(struct adapter *sc, int fidx)
8601 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8602 struct fw_filter_wr *fwr;
8604 struct wrq_cookie cookie;
8606 ftid = sc->tids.ftid_base + fidx;
8608 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8611 bzero(fwr, sizeof (*fwr));
8613 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
8616 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8621 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8623 struct adapter *sc = iq->adapter;
8624 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
8625 unsigned int idx = GET_TID(rpl);
8627 struct filter_entry *f;
8629 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
8631 MPASS(iq == &sc->sge.fwq);
8632 MPASS(is_ftid(sc, idx));
8634 idx -= sc->tids.ftid_base;
8635 f = &sc->tids.ftid_tab[idx];
8636 rc = G_COOKIE(rpl->cookie);
8638 mtx_lock(&sc->tids.ftid_lock);
8639 if (rc == FW_FILTER_WR_FLT_ADDED) {
8640 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
8642 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
8643 f->pending = 0; /* asynchronous setup completed */
8646 if (rc != FW_FILTER_WR_FLT_DELETED) {
8647 /* Add or delete failed, display an error */
8649 "filter %u setup failed with error %u\n",
8654 sc->tids.ftids_in_use--;
8656 wakeup(&sc->tids.ftid_tab);
8657 mtx_unlock(&sc->tids.ftid_lock);
8663 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8666 MPASS(iq->set_tcb_rpl != NULL);
8667 return (iq->set_tcb_rpl(iq, rss, m));
8671 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8674 MPASS(iq->l2t_write_rpl != NULL);
8675 return (iq->l2t_write_rpl(iq, rss, m));
8679 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8683 if (cntxt->cid > M_CTXTQID)
8686 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8687 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8690 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8694 if (sc->flags & FW_OK) {
8695 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8702 * Read via firmware failed or wasn't even attempted. Read directly via
8705 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8707 end_synchronized_op(sc, 0);
8712 load_fw(struct adapter *sc, struct t4_data *fw)
8717 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8721 if (sc->flags & FULL_INIT_DONE) {
8726 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8727 if (fw_data == NULL) {
8732 rc = copyin(fw->data, fw_data, fw->len);
8734 rc = -t4_load_fw(sc, fw_data, fw->len);
8736 free(fw_data, M_CXGBE);
8738 end_synchronized_op(sc, 0);
8743 load_cfg(struct adapter *sc, struct t4_data *cfg)
8746 uint8_t *cfg_data = NULL;
8748 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
8752 if (cfg->len == 0) {
8754 rc = -t4_load_cfg(sc, NULL, 0);
8758 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
8759 if (cfg_data == NULL) {
8764 rc = copyin(cfg->data, cfg_data, cfg->len);
8766 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
8768 free(cfg_data, M_CXGBE);
8770 end_synchronized_op(sc, 0);
8774 #define MAX_READ_BUF_SIZE (128 * 1024)
8776 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
8778 uint32_t addr, remaining, n;
8783 rc = validate_mem_range(sc, mr->addr, mr->len);
8787 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
8789 remaining = mr->len;
8790 dst = (void *)mr->data;
8793 n = min(remaining, MAX_READ_BUF_SIZE);
8794 read_via_memwin(sc, 2, addr, buf, n);
8796 rc = copyout(buf, dst, n);
8808 #undef MAX_READ_BUF_SIZE
8811 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
8815 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
8818 if (i2cd->len > sizeof(i2cd->data))
8821 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
8824 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
8825 i2cd->offset, i2cd->len, &i2cd->data[0]);
8826 end_synchronized_op(sc, 0);
8832 in_range(int val, int lo, int hi)
8835 return (val < 0 || (val <= hi && val >= lo));
8839 set_sched_class_config(struct adapter *sc, int minmax)
8846 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
8849 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
8850 end_synchronized_op(sc, 0);
8856 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
8859 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
8860 struct port_info *pi;
8861 struct tx_sched_class *tc;
8863 if (p->level == SCHED_CLASS_LEVEL_CL_RL)
8864 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
8865 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
8866 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
8867 else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
8868 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
8872 if (p->mode == SCHED_CLASS_MODE_CLASS)
8873 fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
8874 else if (p->mode == SCHED_CLASS_MODE_FLOW)
8875 fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
8879 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
8880 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
8881 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
8882 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
8886 if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
8887 fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
8888 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
8889 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
8893 /* Vet our parameters ... */
8894 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
8897 pi = sc->port[sc->chan_map[p->channel]];
8900 MPASS(pi->tx_chan == p->channel);
8901 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
8903 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
8904 !in_range(p->minrate, 0, top_speed) ||
8905 !in_range(p->maxrate, 0, top_speed) ||
8906 !in_range(p->weight, 0, 100))
8910 * Translate any unset parameters into the firmware's
8911 * nomenclature and/or fail the call if the parameters
8914 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
8919 if (p->maxrate < 0) {
8920 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
8921 p->level == SCHED_CLASS_LEVEL_CH_RL)
8926 if (p->weight < 0) {
8927 if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
8932 if (p->pktsize < 0) {
8933 if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
8934 p->level == SCHED_CLASS_LEVEL_CH_RL)
8940 rc = begin_synchronized_op(sc, NULL,
8941 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
8944 tc = &pi->tc[p->cl];
8946 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
8947 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
8948 p->weight, p->pktsize, sleep_ok);
8950 tc->flags |= TX_SC_OK;
8953 * Unknown state at this point, see tc->params for what was
8956 tc->flags &= ~TX_SC_OK;
8958 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
8964 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
8967 if (p->type != SCHED_CLASS_TYPE_PACKET)
8970 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
8971 return (set_sched_class_config(sc, p->u.config.minmax));
8973 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
8974 return (set_sched_class_params(sc, &p->u.params, 1));
8980 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
8982 struct port_info *pi = NULL;
8984 struct sge_txq *txq;
8985 uint32_t fw_mnem, fw_queue, fw_class;
8988 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
8992 if (p->port >= sc->params.nports) {
8997 /* XXX: Only supported for the main VI. */
8998 pi = sc->port[p->port];
9000 if (!(vi->flags & VI_INIT_DONE)) {
9001 /* tx queues not set up yet */
9006 if (!in_range(p->queue, 0, vi->ntxq - 1) ||
9007 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) {
9013 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
9014 * Scheduling Class in this case).
9016 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
9017 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
9018 fw_class = p->cl < 0 ? 0xffffffff : p->cl;
9021 * If op.queue is non-negative, then we're only changing the scheduling
9022 * on a single specified TX queue.
9024 if (p->queue >= 0) {
9025 txq = &sc->sge.txq[vi->first_txq + p->queue];
9026 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
9027 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
9033 * Change the scheduling on all the TX queues for the
9036 for_each_txq(vi, i, txq) {
9037 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
9038 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
9046 end_synchronized_op(sc, 0);
9051 t4_os_find_pci_capability(struct adapter *sc, int cap)
9055 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9059 t4_os_pci_save_state(struct adapter *sc)
9062 struct pci_devinfo *dinfo;
9065 dinfo = device_get_ivars(dev);
9067 pci_cfg_save(dev, dinfo, 0);
9072 t4_os_pci_restore_state(struct adapter *sc)
9075 struct pci_devinfo *dinfo;
9078 dinfo = device_get_ivars(dev);
9080 pci_cfg_restore(dev, dinfo);
9085 t4_os_portmod_changed(const struct adapter *sc, int idx)
9087 struct port_info *pi = sc->port[idx];
9091 static const char *mod_str[] = {
9092 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9095 for_each_vi(pi, v, vi) {
9096 build_medialist(pi, &vi->media);
9099 ifp = pi->vi[0].ifp;
9100 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9101 if_printf(ifp, "transceiver unplugged.\n");
9102 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9103 if_printf(ifp, "unknown transceiver inserted.\n");
9104 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9105 if_printf(ifp, "unsupported transceiver inserted.\n");
9106 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9107 if_printf(ifp, "%s transceiver inserted.\n",
9108 mod_str[pi->mod_type]);
9110 if_printf(ifp, "transceiver (type %d) inserted.\n",
9116 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
9118 struct port_info *pi = sc->port[idx];
9123 for_each_vi(pi, v, vi) {
9129 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
9130 if_link_state_change(ifp, LINK_STATE_UP);
9132 if_link_state_change(ifp, LINK_STATE_DOWN);
9138 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9142 sx_slock(&t4_list_lock);
9143 SLIST_FOREACH(sc, &t4_list, link) {
9145 * func should not make any assumptions about what state sc is
9146 * in - the only guarantee is that sc->sc_lock is a valid lock.
9150 sx_sunlock(&t4_list_lock);
9154 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9158 struct adapter *sc = dev->si_drv1;
9160 rc = priv_check(td, PRIV_DRIVER);
9165 case CHELSIO_T4_GETREG: {
9166 struct t4_reg *edata = (struct t4_reg *)data;
9168 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9171 if (edata->size == 4)
9172 edata->val = t4_read_reg(sc, edata->addr);
9173 else if (edata->size == 8)
9174 edata->val = t4_read_reg64(sc, edata->addr);
9180 case CHELSIO_T4_SETREG: {
9181 struct t4_reg *edata = (struct t4_reg *)data;
9183 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9186 if (edata->size == 4) {
9187 if (edata->val & 0xffffffff00000000)
9189 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9190 } else if (edata->size == 8)
9191 t4_write_reg64(sc, edata->addr, edata->val);
9196 case CHELSIO_T4_REGDUMP: {
9197 struct t4_regdump *regs = (struct t4_regdump *)data;
9198 int reglen = t4_get_regs_len(sc);
9201 if (regs->len < reglen) {
9202 regs->len = reglen; /* hint to the caller */
9207 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9208 get_regs(sc, regs, buf);
9209 rc = copyout(buf, regs->data, reglen);
9213 case CHELSIO_T4_GET_FILTER_MODE:
9214 rc = get_filter_mode(sc, (uint32_t *)data);
9216 case CHELSIO_T4_SET_FILTER_MODE:
9217 rc = set_filter_mode(sc, *(uint32_t *)data);
9219 case CHELSIO_T4_GET_FILTER:
9220 rc = get_filter(sc, (struct t4_filter *)data);
9222 case CHELSIO_T4_SET_FILTER:
9223 rc = set_filter(sc, (struct t4_filter *)data);
9225 case CHELSIO_T4_DEL_FILTER:
9226 rc = del_filter(sc, (struct t4_filter *)data);
9228 case CHELSIO_T4_GET_SGE_CONTEXT:
9229 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9231 case CHELSIO_T4_LOAD_FW:
9232 rc = load_fw(sc, (struct t4_data *)data);
9234 case CHELSIO_T4_GET_MEM:
9235 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9237 case CHELSIO_T4_GET_I2C:
9238 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9240 case CHELSIO_T4_CLEAR_STATS: {
9242 u_int port_id = *(uint32_t *)data;
9243 struct port_info *pi;
9246 if (port_id >= sc->params.nports)
9248 pi = sc->port[port_id];
9253 t4_clr_port_stats(sc, pi->tx_chan);
9254 pi->tx_parse_error = 0;
9255 mtx_lock(&sc->reg_lock);
9256 for_each_vi(pi, v, vi) {
9257 if (vi->flags & VI_INIT_DONE)
9258 t4_clr_vi_stats(sc, vi->viid);
9260 mtx_unlock(&sc->reg_lock);
9263 * Since this command accepts a port, clear stats for
9264 * all VIs on this port.
9266 for_each_vi(pi, v, vi) {
9267 if (vi->flags & VI_INIT_DONE) {
9268 struct sge_rxq *rxq;
9269 struct sge_txq *txq;
9270 struct sge_wrq *wrq;
9272 for_each_rxq(vi, i, rxq) {
9273 #if defined(INET) || defined(INET6)
9274 rxq->lro.lro_queued = 0;
9275 rxq->lro.lro_flushed = 0;
9278 rxq->vlan_extraction = 0;
9281 for_each_txq(vi, i, txq) {
9284 txq->vlan_insertion = 0;
9288 txq->txpkts0_wrs = 0;
9289 txq->txpkts1_wrs = 0;
9290 txq->txpkts0_pkts = 0;
9291 txq->txpkts1_pkts = 0;
9292 mp_ring_reset_stats(txq->r);
9296 /* nothing to clear for each ofld_rxq */
9298 for_each_ofld_txq(vi, i, wrq) {
9299 wrq->tx_wrs_direct = 0;
9300 wrq->tx_wrs_copied = 0;
9304 if (IS_MAIN_VI(vi)) {
9305 wrq = &sc->sge.ctrlq[pi->port_id];
9306 wrq->tx_wrs_direct = 0;
9307 wrq->tx_wrs_copied = 0;
9313 case CHELSIO_T4_SCHED_CLASS:
9314 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9316 case CHELSIO_T4_SCHED_QUEUE:
9317 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9319 case CHELSIO_T4_GET_TRACER:
9320 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9322 case CHELSIO_T4_SET_TRACER:
9323 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9325 case CHELSIO_T4_LOAD_CFG:
9326 rc = load_cfg(sc, (struct t4_data *)data);
9336 t4_db_full(struct adapter *sc)
9339 CXGBE_UNIMPLEMENTED(__func__);
9343 t4_db_dropped(struct adapter *sc)
9346 CXGBE_UNIMPLEMENTED(__func__);
9351 toe_capability(struct vi_info *vi, int enable)
9354 struct port_info *pi = vi->pi;
9355 struct adapter *sc = pi->adapter;
9357 ASSERT_SYNCHRONIZED_OP(sc);
9359 if (!is_offload(sc))
9363 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9364 /* TOE is already enabled. */
9369 * We need the port's queues around so that we're able to send
9370 * and receive CPLs to/from the TOE even if the ifnet for this
9371 * port has never been UP'd administratively.
9373 if (!(vi->flags & VI_INIT_DONE)) {
9374 rc = vi_full_init(vi);
9378 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9379 rc = vi_full_init(&pi->vi[0]);
9384 if (isset(&sc->offload_map, pi->port_id)) {
9385 /* TOE is enabled on another VI of this port. */
9390 if (!uld_active(sc, ULD_TOM)) {
9391 rc = t4_activate_uld(sc, ULD_TOM);
9394 "You must kldload t4_tom.ko before trying "
9395 "to enable TOE on a cxgbe interface.\n");
9399 KASSERT(sc->tom_softc != NULL,
9400 ("%s: TOM activated but softc NULL", __func__));
9401 KASSERT(uld_active(sc, ULD_TOM),
9402 ("%s: TOM activated but flag not set", __func__));
9405 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9406 if (!uld_active(sc, ULD_IWARP))
9407 (void) t4_activate_uld(sc, ULD_IWARP);
9408 if (!uld_active(sc, ULD_ISCSI))
9409 (void) t4_activate_uld(sc, ULD_ISCSI);
9412 setbit(&sc->offload_map, pi->port_id);
9416 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9419 KASSERT(uld_active(sc, ULD_TOM),
9420 ("%s: TOM never initialized?", __func__));
9421 clrbit(&sc->offload_map, pi->port_id);
9428 * Add an upper layer driver to the global list.
9431 t4_register_uld(struct uld_info *ui)
9436 sx_xlock(&t4_uld_list_lock);
9437 SLIST_FOREACH(u, &t4_uld_list, link) {
9438 if (u->uld_id == ui->uld_id) {
9444 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9447 sx_xunlock(&t4_uld_list_lock);
9452 t4_unregister_uld(struct uld_info *ui)
9457 sx_xlock(&t4_uld_list_lock);
9459 SLIST_FOREACH(u, &t4_uld_list, link) {
9461 if (ui->refcount > 0) {
9466 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9472 sx_xunlock(&t4_uld_list_lock);
9477 t4_activate_uld(struct adapter *sc, int id)
9480 struct uld_info *ui;
9482 ASSERT_SYNCHRONIZED_OP(sc);
9484 if (id < 0 || id > ULD_MAX)
9486 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
9488 sx_slock(&t4_uld_list_lock);
9490 SLIST_FOREACH(ui, &t4_uld_list, link) {
9491 if (ui->uld_id == id) {
9492 if (!(sc->flags & FULL_INIT_DONE)) {
9493 rc = adapter_full_init(sc);
9498 rc = ui->activate(sc);
9500 setbit(&sc->active_ulds, id);
9507 sx_sunlock(&t4_uld_list_lock);
9513 t4_deactivate_uld(struct adapter *sc, int id)
9516 struct uld_info *ui;
9518 ASSERT_SYNCHRONIZED_OP(sc);
9520 if (id < 0 || id > ULD_MAX)
9524 sx_slock(&t4_uld_list_lock);
9526 SLIST_FOREACH(ui, &t4_uld_list, link) {
9527 if (ui->uld_id == id) {
9528 rc = ui->deactivate(sc);
9530 clrbit(&sc->active_ulds, id);
9537 sx_sunlock(&t4_uld_list_lock);
9543 uld_active(struct adapter *sc, int uld_id)
9546 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9548 return (isset(&sc->active_ulds, uld_id));
9553 * t = ptr to tunable.
9554 * nc = number of CPUs.
9555 * c = compiled in default for that tunable.
9558 calculate_nqueues(int *t, int nc, const int c)
9564 nq = *t < 0 ? -*t : c;
9569 * Come up with reasonable defaults for some of the tunables, provided they're
9570 * not set by the user (in which case we'll use the values as is).
9573 tweak_tunables(void)
9575 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
9577 if (t4_ntxq10g < 1) {
9579 t4_ntxq10g = rss_getnumbuckets();
9581 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G);
9585 if (t4_ntxq1g < 1) {
9587 /* XXX: way too many for 1GbE? */
9588 t4_ntxq1g = rss_getnumbuckets();
9590 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G);
9594 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9596 if (t4_nrxq10g < 1) {
9598 t4_nrxq10g = rss_getnumbuckets();
9600 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G);
9604 if (t4_nrxq1g < 1) {
9606 /* XXX: way too many for 1GbE? */
9607 t4_nrxq1g = rss_getnumbuckets();
9609 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G);
9613 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9616 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G);
9617 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G);
9618 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9619 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G);
9620 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G);
9621 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9623 if (t4_toecaps_allowed == -1)
9624 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9626 if (t4_rdmacaps_allowed == -1) {
9627 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9628 FW_CAPS_CONFIG_RDMA_RDMAC;
9631 if (t4_iscsicaps_allowed == -1) {
9632 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9633 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9634 FW_CAPS_CONFIG_ISCSI_T10DIF;
9637 if (t4_toecaps_allowed == -1)
9638 t4_toecaps_allowed = 0;
9640 if (t4_rdmacaps_allowed == -1)
9641 t4_rdmacaps_allowed = 0;
9643 if (t4_iscsicaps_allowed == -1)
9644 t4_iscsicaps_allowed = 0;
9648 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
9649 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
9652 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
9653 t4_tmr_idx_10g = TMR_IDX_10G;
9655 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
9656 t4_pktc_idx_10g = PKTC_IDX_10G;
9658 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
9659 t4_tmr_idx_1g = TMR_IDX_1G;
9661 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
9662 t4_pktc_idx_1g = PKTC_IDX_1G;
9664 if (t4_qsize_txq < 128)
9667 if (t4_qsize_rxq < 128)
9669 while (t4_qsize_rxq & 7)
9672 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9677 t4_dump_tcb(struct adapter *sc, int tid)
9679 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
9681 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
9682 save = t4_read_reg(sc, reg);
9683 base = sc->memwin[2].mw_base;
9685 /* Dump TCB for the tid */
9686 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9687 tcb_addr += tid * TCB_SIZE;
9691 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
9693 pf = V_PFNUM(sc->pf);
9694 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
9696 t4_write_reg(sc, reg, win_pos | pf);
9697 t4_read_reg(sc, reg);
9699 off = tcb_addr - win_pos;
9700 for (i = 0; i < 4; i++) {
9702 for (j = 0; j < 8; j++, off += 4)
9703 buf[j] = htonl(t4_read_reg(sc, base + off));
9705 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
9706 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
9710 t4_write_reg(sc, reg, save);
9711 t4_read_reg(sc, reg);
9715 t4_dump_devlog(struct adapter *sc)
9717 struct devlog_params *dparams = &sc->params.devlog;
9718 struct fw_devlog_e e;
9719 int i, first, j, m, nentries, rc;
9720 uint64_t ftstamp = UINT64_MAX;
9722 if (dparams->start == 0) {
9723 db_printf("devlog params not valid\n");
9727 nentries = dparams->size / sizeof(struct fw_devlog_e);
9728 m = fwmtype_to_hwmtype(dparams->memtype);
9730 /* Find the first entry. */
9732 for (i = 0; i < nentries && !db_pager_quit; i++) {
9733 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9734 sizeof(e), (void *)&e);
9738 if (e.timestamp == 0)
9741 e.timestamp = be64toh(e.timestamp);
9742 if (e.timestamp < ftstamp) {
9743 ftstamp = e.timestamp;
9753 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9754 sizeof(e), (void *)&e);
9758 if (e.timestamp == 0)
9761 e.timestamp = be64toh(e.timestamp);
9762 e.seqno = be32toh(e.seqno);
9763 for (j = 0; j < 8; j++)
9764 e.params[j] = be32toh(e.params[j]);
9766 db_printf("%10d %15ju %8s %8s ",
9767 e.seqno, e.timestamp,
9768 (e.level < nitems(devlog_level_strings) ?
9769 devlog_level_strings[e.level] : "UNKNOWN"),
9770 (e.facility < nitems(devlog_facility_strings) ?
9771 devlog_facility_strings[e.facility] : "UNKNOWN"));
9772 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
9773 e.params[3], e.params[4], e.params[5], e.params[6],
9776 if (++i == nentries)
9778 } while (i != first && !db_pager_quit);
9781 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
9782 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
9784 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
9791 t = db_read_token();
9793 dev = device_lookup_by_name(db_tok_string);
9798 db_printf("usage: show t4 devlog <nexus>\n");
9803 db_printf("device not found\n");
9807 t4_dump_devlog(device_get_softc(dev));
9810 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
9819 t = db_read_token();
9821 dev = device_lookup_by_name(db_tok_string);
9822 t = db_read_token();
9824 tid = db_tok_number;
9831 db_printf("usage: show t4 tcb <nexus> <tid>\n");
9836 db_printf("device not found\n");
9840 db_printf("invalid tid\n");
9844 t4_dump_tcb(device_get_softc(dev), tid);
9848 static struct sx mlu; /* mod load unload */
9849 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
9852 mod_event(module_t mod, int cmd, void *arg)
9855 static int loaded = 0;
9860 if (loaded++ == 0) {
9862 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
9863 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
9864 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
9865 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
9866 sx_init(&t4_list_lock, "T4/T5 adapters");
9867 SLIST_INIT(&t4_list);
9869 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
9870 SLIST_INIT(&t4_uld_list);
9872 t4_tracer_modload();
9880 if (--loaded == 0) {
9883 sx_slock(&t4_list_lock);
9884 if (!SLIST_EMPTY(&t4_list)) {
9886 sx_sunlock(&t4_list_lock);
9890 sx_slock(&t4_uld_list_lock);
9891 if (!SLIST_EMPTY(&t4_uld_list)) {
9893 sx_sunlock(&t4_uld_list_lock);
9894 sx_sunlock(&t4_list_lock);
9899 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
9900 uprintf("%ju clusters with custom free routine "
9901 "still is use.\n", t4_sge_extfree_refs());
9902 pause("t4unload", 2 * hz);
9905 sx_sunlock(&t4_uld_list_lock);
9907 sx_sunlock(&t4_list_lock);
9909 if (t4_sge_extfree_refs() == 0) {
9910 t4_tracer_modunload();
9912 sx_destroy(&t4_uld_list_lock);
9914 sx_destroy(&t4_list_lock);
9919 loaded++; /* undo earlier decrement */
9930 static devclass_t t4_devclass, t5_devclass, t6_devclass;
9931 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
9932 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
9934 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
9935 MODULE_VERSION(t4nex, 1);
9936 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
9938 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
9939 #endif /* DEV_NETMAP */
9941 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
9942 MODULE_VERSION(t5nex, 1);
9943 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
9945 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
9946 #endif /* DEV_NETMAP */
9948 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
9949 MODULE_VERSION(t6nex, 1);
9950 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
9952 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
9953 #endif /* DEV_NETMAP */
9955 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
9956 MODULE_VERSION(cxgbe, 1);
9958 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
9959 MODULE_VERSION(cxl, 1);
9961 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
9962 MODULE_VERSION(cc, 1);
9964 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
9965 MODULE_VERSION(vcxgbe, 1);
9967 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
9968 MODULE_VERSION(vcxl, 1);
9970 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
9971 MODULE_VERSION(vcc, 1);