2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
36 #include "opt_ratelimit.h"
39 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #if defined(__i386__) || defined(__amd64__)
67 #include <machine/md_var.h>
68 #include <machine/cputypes.h>
72 #include <crypto/rijndael/rijndael.h>
75 #include <ddb/db_lex.h>
78 #include "common/common.h"
79 #include "common/t4_msg.h"
80 #include "common/t4_regs.h"
81 #include "common/t4_regs_values.h"
82 #include "cudbg/cudbg.h"
85 #include "t4_mp_ring.h"
89 /* T4 bus driver interface */
90 static int t4_probe(device_t);
91 static int t4_attach(device_t);
92 static int t4_detach(device_t);
93 static int t4_ready(device_t);
94 static int t4_read_port_device(device_t, int, device_t *);
95 static device_method_t t4_methods[] = {
96 DEVMETHOD(device_probe, t4_probe),
97 DEVMETHOD(device_attach, t4_attach),
98 DEVMETHOD(device_detach, t4_detach),
100 DEVMETHOD(t4_is_main_ready, t4_ready),
101 DEVMETHOD(t4_read_port_device, t4_read_port_device),
105 static driver_t t4_driver = {
108 sizeof(struct adapter)
112 /* T4 port (cxgbe) interface */
113 static int cxgbe_probe(device_t);
114 static int cxgbe_attach(device_t);
115 static int cxgbe_detach(device_t);
116 device_method_t cxgbe_methods[] = {
117 DEVMETHOD(device_probe, cxgbe_probe),
118 DEVMETHOD(device_attach, cxgbe_attach),
119 DEVMETHOD(device_detach, cxgbe_detach),
122 static driver_t cxgbe_driver = {
125 sizeof(struct port_info)
128 /* T4 VI (vcxgbe) interface */
129 static int vcxgbe_probe(device_t);
130 static int vcxgbe_attach(device_t);
131 static int vcxgbe_detach(device_t);
132 static device_method_t vcxgbe_methods[] = {
133 DEVMETHOD(device_probe, vcxgbe_probe),
134 DEVMETHOD(device_attach, vcxgbe_attach),
135 DEVMETHOD(device_detach, vcxgbe_detach),
138 static driver_t vcxgbe_driver = {
141 sizeof(struct vi_info)
144 static d_ioctl_t t4_ioctl;
146 static struct cdevsw t4_cdevsw = {
147 .d_version = D_VERSION,
152 /* T5 bus driver interface */
153 static int t5_probe(device_t);
154 static device_method_t t5_methods[] = {
155 DEVMETHOD(device_probe, t5_probe),
156 DEVMETHOD(device_attach, t4_attach),
157 DEVMETHOD(device_detach, t4_detach),
159 DEVMETHOD(t4_is_main_ready, t4_ready),
160 DEVMETHOD(t4_read_port_device, t4_read_port_device),
164 static driver_t t5_driver = {
167 sizeof(struct adapter)
171 /* T5 port (cxl) interface */
172 static driver_t cxl_driver = {
175 sizeof(struct port_info)
178 /* T5 VI (vcxl) interface */
179 static driver_t vcxl_driver = {
182 sizeof(struct vi_info)
185 /* T6 bus driver interface */
186 static int t6_probe(device_t);
187 static device_method_t t6_methods[] = {
188 DEVMETHOD(device_probe, t6_probe),
189 DEVMETHOD(device_attach, t4_attach),
190 DEVMETHOD(device_detach, t4_detach),
192 DEVMETHOD(t4_is_main_ready, t4_ready),
193 DEVMETHOD(t4_read_port_device, t4_read_port_device),
197 static driver_t t6_driver = {
200 sizeof(struct adapter)
204 /* T6 port (cc) interface */
205 static driver_t cc_driver = {
208 sizeof(struct port_info)
211 /* T6 VI (vcc) interface */
212 static driver_t vcc_driver = {
215 sizeof(struct vi_info)
218 /* ifnet interface */
219 static void cxgbe_init(void *);
220 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
221 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
222 static void cxgbe_qflush(struct ifnet *);
224 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
227 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
228 * then ADAPTER_LOCK, then t4_uld_list_lock.
230 static struct sx t4_list_lock;
231 SLIST_HEAD(, adapter) t4_list;
233 static struct sx t4_uld_list_lock;
234 SLIST_HEAD(, uld_info) t4_uld_list;
238 * Tunables. See tweak_tunables() too.
240 * Each tunable is set to a default value here if it's known at compile-time.
241 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
242 * provide a reasonable default (upto n) when the driver is loaded.
244 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
245 * T5 are under hw.cxl.
249 * Number of queues for tx and rx, NIC and offload.
253 TUNABLE_INT("hw.cxgbe.ntxq", &t4_ntxq);
254 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
258 TUNABLE_INT("hw.cxgbe.nrxq", &t4_nrxq);
259 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
262 static int t4_ntxq_vi = -NTXQ_VI;
263 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
266 static int t4_nrxq_vi = -NRXQ_VI;
267 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
269 static int t4_rsrv_noflowq = 0;
270 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
272 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
274 static int t4_nofldtxq = -NOFLDTXQ;
275 TUNABLE_INT("hw.cxgbe.nofldtxq", &t4_nofldtxq);
278 static int t4_nofldrxq = -NOFLDRXQ;
279 TUNABLE_INT("hw.cxgbe.nofldrxq", &t4_nofldrxq);
281 #define NOFLDTXQ_VI 1
282 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
283 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
285 #define NOFLDRXQ_VI 1
286 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
287 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
289 #define TMR_IDX_OFLD 1
290 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
291 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld);
293 #define PKTC_IDX_OFLD (-1)
294 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
295 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld);
297 /* 0 means chip/fw default, non-zero number is value in microseconds */
298 static u_long t4_toe_keepalive_idle = 0;
299 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle);
301 /* 0 means chip/fw default, non-zero number is value in microseconds */
302 static u_long t4_toe_keepalive_interval = 0;
303 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval);
305 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
306 static int t4_toe_keepalive_count = 0;
307 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count);
309 /* 0 means chip/fw default, non-zero number is value in microseconds */
310 static u_long t4_toe_rexmt_min = 0;
311 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min);
313 /* 0 means chip/fw default, non-zero number is value in microseconds */
314 static u_long t4_toe_rexmt_max = 0;
315 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max);
317 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
318 static int t4_toe_rexmt_count = 0;
319 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count);
321 /* -1 means chip/fw default, other values are raw backoff values to use */
322 static int t4_toe_rexmt_backoff[16] = {
323 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
325 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]);
326 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]);
327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]);
328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]);
329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]);
330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]);
331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]);
332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]);
333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]);
334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]);
335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]);
336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]);
337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]);
338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]);
339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]);
340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]);
345 static int t4_nnmtxq_vi = -NNMTXQ_VI;
346 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
349 static int t4_nnmrxq_vi = -NNMRXQ_VI;
350 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
354 * Holdoff parameters for ports.
357 int t4_tmr_idx = TMR_IDX;
358 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx", &t4_tmr_idx);
359 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
361 #define PKTC_IDX (-1)
362 int t4_pktc_idx = PKTC_IDX;
363 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx", &t4_pktc_idx);
364 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
367 * Size (# of entries) of each tx and rx queue.
369 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
370 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
372 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
373 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
376 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
378 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
379 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
382 * Configuration file. All the _CF names here are special.
384 #define DEFAULT_CF "default"
385 #define BUILTIN_CF "built-in"
386 #define FLASH_CF "flash"
387 #define UWIRE_CF "uwire"
388 #define FPGA_CF "fpga"
389 static char t4_cfg_file[32] = DEFAULT_CF;
390 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
393 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
394 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
395 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
396 * mark or when signalled to do so, 0 to never emit PAUSE.
397 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
398 * negotiated settings will override rx_pause/tx_pause.
399 * Otherwise rx_pause/tx_pause are applied forcibly.
401 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
402 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
405 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
406 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
409 static int t4_fec = -1;
410 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
413 * Link autonegotiation.
414 * -1 to run with the firmware default.
418 static int t4_autoneg = -1;
419 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
422 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
423 * encouraged respectively).
425 static unsigned int t4_fw_install = 1;
426 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
429 * ASIC features that will be used. Disable the ones you don't want so that the
430 * chip resources aren't wasted on features that will not be used.
432 static int t4_nbmcaps_allowed = 0;
433 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
435 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
436 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
438 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
439 FW_CAPS_CONFIG_SWITCH_EGRESS;
440 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
443 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
444 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
446 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
447 FW_CAPS_CONFIG_NIC_HASHFILTER;
449 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
451 static int t4_toecaps_allowed = -1;
452 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
454 static int t4_rdmacaps_allowed = -1;
455 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
457 static int t4_cryptocaps_allowed = -1;
458 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
460 static int t4_iscsicaps_allowed = -1;
461 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
463 static int t4_fcoecaps_allowed = 0;
464 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
466 static int t5_write_combine = 0;
467 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
469 static int t4_num_vis = 1;
470 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
472 * PCIe Relaxed Ordering.
473 * -1: driver should figure out a good value.
478 static int pcie_relaxed_ordering = -1;
479 TUNABLE_INT("hw.cxgbe.pcie_relaxed_ordering", &pcie_relaxed_ordering);
481 static int t4_panic_on_fatal_err = 0;
482 TUNABLE_INT("hw.cxgbe.panic_on_fatal_err", &t4_panic_on_fatal_err);
488 static int t4_cop_managed_offloading = 0;
489 TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading);
492 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
493 static int vi_mac_funcs[] = {
497 FW_VI_FUNC_OPENISCSI,
503 struct intrs_and_queues {
504 uint16_t intr_type; /* INTx, MSI, or MSI-X */
505 uint16_t num_vis; /* number of VIs for each port */
506 uint16_t nirq; /* Total # of vectors */
507 uint16_t ntxq; /* # of NIC txq's for each port */
508 uint16_t nrxq; /* # of NIC rxq's for each port */
509 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
510 uint16_t nofldrxq; /* # of TOE rxq's for each port */
512 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
513 uint16_t ntxq_vi; /* # of NIC txq's */
514 uint16_t nrxq_vi; /* # of NIC rxq's */
515 uint16_t nofldtxq_vi; /* # of TOE txq's */
516 uint16_t nofldrxq_vi; /* # of TOE rxq's */
517 uint16_t nnmtxq_vi; /* # of netmap txq's */
518 uint16_t nnmrxq_vi; /* # of netmap rxq's */
521 static void setup_memwin(struct adapter *);
522 static void position_memwin(struct adapter *, int, uint32_t);
523 static int validate_mem_range(struct adapter *, uint32_t, int);
524 static int fwmtype_to_hwmtype(int);
525 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
527 static int fixup_devlog_params(struct adapter *);
528 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
529 static int prep_firmware(struct adapter *);
530 static int partition_resources(struct adapter *, const struct firmware *,
532 static int get_params__pre_init(struct adapter *);
533 static int get_params__post_init(struct adapter *);
534 static int set_params__post_init(struct adapter *);
535 static void t4_set_desc(struct adapter *);
536 static bool fixed_ifmedia(struct port_info *);
537 static void build_medialist(struct port_info *);
538 static void init_link_config(struct port_info *);
539 static int fixup_link_config(struct port_info *);
540 static int apply_link_config(struct port_info *);
541 static int cxgbe_init_synchronized(struct vi_info *);
542 static int cxgbe_uninit_synchronized(struct vi_info *);
543 static void quiesce_txq(struct adapter *, struct sge_txq *);
544 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
545 static void quiesce_iq(struct adapter *, struct sge_iq *);
546 static void quiesce_fl(struct adapter *, struct sge_fl *);
547 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
548 driver_intr_t *, void *, char *);
549 static int t4_free_irq(struct adapter *, struct irq *);
550 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
551 static void vi_refresh_stats(struct adapter *, struct vi_info *);
552 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
553 static void cxgbe_tick(void *);
554 static void cxgbe_sysctls(struct port_info *);
555 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
556 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
557 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
558 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
559 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
560 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
561 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
562 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
563 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
564 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
565 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
566 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
567 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
568 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
569 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
570 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
571 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
572 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
573 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
574 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
575 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
576 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
577 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
578 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
579 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
580 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
581 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
582 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
583 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
584 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
585 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
586 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
587 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
588 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
589 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
590 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
591 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
592 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
593 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
594 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
595 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
596 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
597 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
598 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
600 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
601 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
602 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
603 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
604 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
605 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
606 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
607 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
609 static int get_sge_context(struct adapter *, struct t4_sge_context *);
610 static int load_fw(struct adapter *, struct t4_data *);
611 static int load_cfg(struct adapter *, struct t4_data *);
612 static int load_boot(struct adapter *, struct t4_bootrom *);
613 static int load_bootcfg(struct adapter *, struct t4_data *);
614 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
615 static void free_offload_policy(struct t4_offload_policy *);
616 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
617 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
618 static int read_i2c(struct adapter *, struct t4_i2c_data *);
620 static int toe_capability(struct vi_info *, int);
622 static int mod_event(module_t, int, void *);
623 static int notify_siblings(device_t, int);
629 {0xa000, "Chelsio Terminator 4 FPGA"},
630 {0x4400, "Chelsio T440-dbg"},
631 {0x4401, "Chelsio T420-CR"},
632 {0x4402, "Chelsio T422-CR"},
633 {0x4403, "Chelsio T440-CR"},
634 {0x4404, "Chelsio T420-BCH"},
635 {0x4405, "Chelsio T440-BCH"},
636 {0x4406, "Chelsio T440-CH"},
637 {0x4407, "Chelsio T420-SO"},
638 {0x4408, "Chelsio T420-CX"},
639 {0x4409, "Chelsio T420-BT"},
640 {0x440a, "Chelsio T404-BT"},
641 {0x440e, "Chelsio T440-LP-CR"},
643 {0xb000, "Chelsio Terminator 5 FPGA"},
644 {0x5400, "Chelsio T580-dbg"},
645 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
646 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
647 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
648 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
649 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
650 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
651 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
652 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
653 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
654 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
655 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
656 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
657 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
658 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
659 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
660 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
661 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
663 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
664 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
665 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
666 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
667 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
668 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
669 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
670 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
671 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
672 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
673 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
674 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
675 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
676 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
677 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
678 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
681 {0x6480, "Custom T6225-CR"},
682 {0x6481, "Custom T62100-CR"},
683 {0x6482, "Custom T6225-CR"},
684 {0x6483, "Custom T62100-CR"},
685 {0x6484, "Custom T64100-CR"},
686 {0x6485, "Custom T6240-SO"},
687 {0x6486, "Custom T6225-SO-CR"},
688 {0x6487, "Custom T6225-CR"},
693 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
694 * be exactly the same for both rxq and ofld_rxq.
696 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
697 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
699 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
702 t4_probe(device_t dev)
705 uint16_t v = pci_get_vendor(dev);
706 uint16_t d = pci_get_device(dev);
707 uint8_t f = pci_get_function(dev);
709 if (v != PCI_VENDOR_ID_CHELSIO)
712 /* Attach only to PF0 of the FPGA */
713 if (d == 0xa000 && f != 0)
716 for (i = 0; i < nitems(t4_pciids); i++) {
717 if (d == t4_pciids[i].device) {
718 device_set_desc(dev, t4_pciids[i].desc);
719 return (BUS_PROBE_DEFAULT);
727 t5_probe(device_t dev)
730 uint16_t v = pci_get_vendor(dev);
731 uint16_t d = pci_get_device(dev);
732 uint8_t f = pci_get_function(dev);
734 if (v != PCI_VENDOR_ID_CHELSIO)
737 /* Attach only to PF0 of the FPGA */
738 if (d == 0xb000 && f != 0)
741 for (i = 0; i < nitems(t5_pciids); i++) {
742 if (d == t5_pciids[i].device) {
743 device_set_desc(dev, t5_pciids[i].desc);
744 return (BUS_PROBE_DEFAULT);
752 t6_probe(device_t dev)
755 uint16_t v = pci_get_vendor(dev);
756 uint16_t d = pci_get_device(dev);
758 if (v != PCI_VENDOR_ID_CHELSIO)
761 for (i = 0; i < nitems(t6_pciids); i++) {
762 if (d == t6_pciids[i].device) {
763 device_set_desc(dev, t6_pciids[i].desc);
764 return (BUS_PROBE_DEFAULT);
772 t5_attribute_workaround(device_t dev)
778 * The T5 chips do not properly echo the No Snoop and Relaxed
779 * Ordering attributes when replying to a TLP from a Root
780 * Port. As a workaround, find the parent Root Port and
781 * disable No Snoop and Relaxed Ordering. Note that this
782 * affects all devices under this root port.
784 root_port = pci_find_pcie_root_port(dev);
785 if (root_port == NULL) {
786 device_printf(dev, "Unable to find parent root port\n");
790 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
791 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
792 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
794 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
795 device_get_nameunit(root_port));
798 static const struct devnames devnames[] = {
800 .nexus_name = "t4nex",
801 .ifnet_name = "cxgbe",
802 .vi_ifnet_name = "vcxgbe",
803 .pf03_drv_name = "t4iov",
804 .vf_nexus_name = "t4vf",
805 .vf_ifnet_name = "cxgbev"
807 .nexus_name = "t5nex",
809 .vi_ifnet_name = "vcxl",
810 .pf03_drv_name = "t5iov",
811 .vf_nexus_name = "t5vf",
812 .vf_ifnet_name = "cxlv"
814 .nexus_name = "t6nex",
816 .vi_ifnet_name = "vcc",
817 .pf03_drv_name = "t6iov",
818 .vf_nexus_name = "t6vf",
819 .vf_ifnet_name = "ccv"
824 t4_init_devnames(struct adapter *sc)
829 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
830 sc->names = &devnames[id - CHELSIO_T4];
832 device_printf(sc->dev, "chip id %d is not supported.\n", id);
838 t4_attach(device_t dev)
841 int rc = 0, i, j, rqidx, tqidx, nports;
842 struct make_dev_args mda;
843 struct intrs_and_queues iaq;
846 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
853 int nm_rqidx, nm_tqidx;
857 sc = device_get_softc(dev);
859 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
861 if ((pci_get_device(dev) & 0xff00) == 0x5400)
862 t5_attribute_workaround(dev);
863 pci_enable_busmaster(dev);
864 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
867 pci_set_max_read_req(dev, 4096);
868 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
869 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
870 if (pcie_relaxed_ordering == 0 &&
871 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
872 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
873 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
874 } else if (pcie_relaxed_ordering == 1 &&
875 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
876 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
877 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
881 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
882 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
884 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
885 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
886 device_get_nameunit(dev));
888 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
889 device_get_nameunit(dev));
890 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
893 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
894 TAILQ_INIT(&sc->sfl);
895 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
897 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
900 rw_init(&sc->policy_lock, "connection offload policy");
902 rc = t4_map_bars_0_and_4(sc);
904 goto done; /* error message displayed already */
906 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
908 /* Prepare the adapter for operation. */
909 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
910 rc = -t4_prep_adapter(sc, buf);
913 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
918 * This is the real PF# to which we're attaching. Works from within PCI
919 * passthrough environments too, where pci_get_function() could return a
920 * different PF# depending on the passthrough configuration. We need to
921 * use the real PF# in all our communication with the firmware.
923 j = t4_read_reg(sc, A_PL_WHOAMI);
924 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
927 t4_init_devnames(sc);
928 if (sc->names == NULL) {
930 goto done; /* error message displayed already */
934 * Do this really early, with the memory windows set up even before the
935 * character device. The userland tool's register i/o and mem read
936 * will work even in "recovery mode".
939 if (t4_init_devlog_params(sc, 0) == 0)
940 fixup_devlog_params(sc);
941 make_dev_args_init(&mda);
942 mda.mda_devsw = &t4_cdevsw;
943 mda.mda_uid = UID_ROOT;
944 mda.mda_gid = GID_WHEEL;
946 mda.mda_si_drv1 = sc;
947 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
949 device_printf(dev, "failed to create nexus char device: %d.\n",
952 /* Go no further if recovery mode has been requested. */
953 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
954 device_printf(dev, "recovery mode.\n");
958 #if defined(__i386__)
959 if ((cpu_feature & CPUID_CX8) == 0) {
960 device_printf(dev, "64 bit atomics not available.\n");
966 /* Prepare the firmware for operation */
967 rc = prep_firmware(sc);
969 goto done; /* error message displayed already */
971 rc = get_params__post_init(sc);
973 goto done; /* error message displayed already */
975 rc = set_params__post_init(sc);
977 goto done; /* error message displayed already */
979 rc = t4_map_bar_2(sc);
981 goto done; /* error message displayed already */
983 rc = t4_create_dma_tag(sc);
985 goto done; /* error message displayed already */
988 * First pass over all the ports - allocate VIs and initialize some
989 * basic parameters like mac address, port type, etc.
991 for_each_port(sc, i) {
992 struct port_info *pi;
994 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
997 /* These must be set before t4_port_init */
1001 * XXX: vi[0] is special so we can't delay this allocation until
1002 * pi->nvi's final value is known.
1004 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1008 * Allocate the "main" VI and initialize parameters
1011 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1013 device_printf(dev, "unable to initialize port %d: %d\n",
1015 free(pi->vi, M_CXGBE);
1021 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1022 device_get_nameunit(dev), i);
1023 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1024 sc->chan_map[pi->tx_chan] = i;
1026 /* All VIs on this port share this media. */
1027 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1028 cxgbe_media_status);
1031 init_link_config(pi);
1032 fixup_link_config(pi);
1033 build_medialist(pi);
1034 if (fixed_ifmedia(pi))
1035 pi->flags |= FIXED_IFMEDIA;
1038 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1039 if (pi->dev == NULL) {
1041 "failed to add device for port %d.\n", i);
1045 pi->vi[0].dev = pi->dev;
1046 device_set_softc(pi->dev, pi);
1050 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1052 nports = sc->params.nports;
1053 rc = cfg_itype_and_nqueues(sc, &iaq);
1055 goto done; /* error message displayed already */
1057 num_vis = iaq.num_vis;
1058 sc->intr_type = iaq.intr_type;
1059 sc->intr_count = iaq.nirq;
1062 s->nrxq = nports * iaq.nrxq;
1063 s->ntxq = nports * iaq.ntxq;
1065 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1066 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1068 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1069 s->neq += nports; /* ctrl queues: 1 per port */
1070 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1071 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1072 if (is_offload(sc) || is_ethoffload(sc)) {
1073 s->nofldtxq = nports * iaq.nofldtxq;
1075 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1076 s->neq += s->nofldtxq;
1078 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1079 M_CXGBE, M_ZERO | M_WAITOK);
1083 if (is_offload(sc)) {
1084 s->nofldrxq = nports * iaq.nofldrxq;
1086 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1087 s->neq += s->nofldrxq; /* free list */
1088 s->niq += s->nofldrxq;
1090 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1091 M_CXGBE, M_ZERO | M_WAITOK);
1096 s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi;
1097 s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi;
1099 s->neq += s->nnmtxq + s->nnmrxq;
1100 s->niq += s->nnmrxq;
1102 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1103 M_CXGBE, M_ZERO | M_WAITOK);
1104 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1105 M_CXGBE, M_ZERO | M_WAITOK);
1108 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1110 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1112 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1114 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1116 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1119 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1122 t4_init_l2t(sc, M_WAITOK);
1123 t4_init_smt(sc, M_WAITOK);
1124 t4_init_tx_sched(sc);
1126 t4_init_etid_table(sc);
1130 * Second pass over the ports. This time we know the number of rx and
1131 * tx queues that each port should get.
1134 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1141 nm_rqidx = nm_tqidx = 0;
1143 for_each_port(sc, i) {
1144 struct port_info *pi = sc->port[i];
1151 for_each_vi(pi, j, vi) {
1153 vi->qsize_rxq = t4_qsize_rxq;
1154 vi->qsize_txq = t4_qsize_txq;
1156 vi->first_rxq = rqidx;
1157 vi->first_txq = tqidx;
1158 vi->tmr_idx = t4_tmr_idx;
1159 vi->pktc_idx = t4_pktc_idx;
1160 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1161 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1166 if (j == 0 && vi->ntxq > 1)
1167 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1169 vi->rsrv_noflowq = 0;
1171 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1172 vi->first_ofld_txq = ofld_tqidx;
1173 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1174 ofld_tqidx += vi->nofldtxq;
1177 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1178 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1179 vi->first_ofld_rxq = ofld_rqidx;
1180 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1182 ofld_rqidx += vi->nofldrxq;
1186 vi->first_nm_rxq = nm_rqidx;
1187 vi->first_nm_txq = nm_tqidx;
1188 vi->nnmrxq = iaq.nnmrxq_vi;
1189 vi->nnmtxq = iaq.nnmtxq_vi;
1190 nm_rqidx += vi->nnmrxq;
1191 nm_tqidx += vi->nnmtxq;
1197 rc = t4_setup_intr_handlers(sc);
1200 "failed to setup interrupt handlers: %d\n", rc);
1204 rc = bus_generic_probe(dev);
1206 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1211 * Ensure thread-safe mailbox access (in debug builds).
1213 * So far this was the only thread accessing the mailbox but various
1214 * ifnets and sysctls are about to be created and their handlers/ioctls
1215 * will access the mailbox from different threads.
1217 sc->flags |= CHK_MBOX_ACCESS;
1219 rc = bus_generic_attach(dev);
1222 "failed to attach all child ports: %d\n", rc);
1227 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1228 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1229 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1230 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1231 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1235 notify_siblings(dev, 0);
1238 if (rc != 0 && sc->cdev) {
1239 /* cdev was created and so cxgbetool works; recover that way. */
1241 "error during attach, adapter is now in recovery mode.\n");
1246 t4_detach_common(dev);
1254 t4_ready(device_t dev)
1258 sc = device_get_softc(dev);
1259 if (sc->flags & FW_OK)
1265 t4_read_port_device(device_t dev, int port, device_t *child)
1268 struct port_info *pi;
1270 sc = device_get_softc(dev);
1271 if (port < 0 || port >= MAX_NPORTS)
1273 pi = sc->port[port];
1274 if (pi == NULL || pi->dev == NULL)
1281 notify_siblings(device_t dev, int detaching)
1287 for (i = 0; i < PCI_FUNCMAX; i++) {
1288 if (i == pci_get_function(dev))
1290 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1291 pci_get_slot(dev), i);
1292 if (sibling == NULL || !device_is_attached(sibling))
1295 error = T4_DETACH_CHILD(sibling);
1297 (void)T4_ATTACH_CHILD(sibling);
1308 t4_detach(device_t dev)
1313 sc = device_get_softc(dev);
1315 rc = notify_siblings(dev, 1);
1318 "failed to detach sibling devices: %d\n", rc);
1322 return (t4_detach_common(dev));
1326 t4_detach_common(device_t dev)
1329 struct port_info *pi;
1332 sc = device_get_softc(dev);
1335 destroy_dev(sc->cdev);
1339 sc->flags &= ~CHK_MBOX_ACCESS;
1340 if (sc->flags & FULL_INIT_DONE) {
1341 if (!(sc->flags & IS_VF))
1342 t4_intr_disable(sc);
1345 if (device_is_attached(dev)) {
1346 rc = bus_generic_detach(dev);
1349 "failed to detach child devices: %d\n", rc);
1354 for (i = 0; i < sc->intr_count; i++)
1355 t4_free_irq(sc, &sc->irq[i]);
1357 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1358 t4_free_tx_sched(sc);
1360 for (i = 0; i < MAX_NPORTS; i++) {
1363 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1365 device_delete_child(dev, pi->dev);
1367 mtx_destroy(&pi->pi_lock);
1368 free(pi->vi, M_CXGBE);
1373 device_delete_children(dev);
1375 if (sc->flags & FULL_INIT_DONE)
1376 adapter_full_uninit(sc);
1378 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1379 t4_fw_bye(sc, sc->mbox);
1381 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1382 pci_release_msi(dev);
1385 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1389 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1393 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1397 t4_free_l2t(sc->l2t);
1399 t4_free_smt(sc->smt);
1401 t4_free_etid_table(sc);
1404 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1405 free(sc->sge.ofld_txq, M_CXGBE);
1408 free(sc->sge.ofld_rxq, M_CXGBE);
1411 free(sc->sge.nm_rxq, M_CXGBE);
1412 free(sc->sge.nm_txq, M_CXGBE);
1414 free(sc->irq, M_CXGBE);
1415 free(sc->sge.rxq, M_CXGBE);
1416 free(sc->sge.txq, M_CXGBE);
1417 free(sc->sge.ctrlq, M_CXGBE);
1418 free(sc->sge.iqmap, M_CXGBE);
1419 free(sc->sge.eqmap, M_CXGBE);
1420 free(sc->tids.ftid_tab, M_CXGBE);
1421 free(sc->tids.hpftid_tab, M_CXGBE);
1422 free_hftid_hash(&sc->tids);
1423 free(sc->tids.atid_tab, M_CXGBE);
1424 free(sc->tids.tid_tab, M_CXGBE);
1425 free(sc->tt.tls_rx_ports, M_CXGBE);
1426 t4_destroy_dma_tag(sc);
1427 if (mtx_initialized(&sc->sc_lock)) {
1428 sx_xlock(&t4_list_lock);
1429 SLIST_REMOVE(&t4_list, sc, adapter, link);
1430 sx_xunlock(&t4_list_lock);
1431 mtx_destroy(&sc->sc_lock);
1434 callout_drain(&sc->sfl_callout);
1435 if (mtx_initialized(&sc->tids.ftid_lock)) {
1436 mtx_destroy(&sc->tids.ftid_lock);
1437 cv_destroy(&sc->tids.ftid_cv);
1439 if (mtx_initialized(&sc->tids.atid_lock))
1440 mtx_destroy(&sc->tids.atid_lock);
1441 if (mtx_initialized(&sc->sfl_lock))
1442 mtx_destroy(&sc->sfl_lock);
1443 if (mtx_initialized(&sc->ifp_lock))
1444 mtx_destroy(&sc->ifp_lock);
1445 if (mtx_initialized(&sc->reg_lock))
1446 mtx_destroy(&sc->reg_lock);
1448 if (rw_initialized(&sc->policy_lock)) {
1449 rw_destroy(&sc->policy_lock);
1451 if (sc->policy != NULL)
1452 free_offload_policy(sc->policy);
1456 for (i = 0; i < NUM_MEMWIN; i++) {
1457 struct memwin *mw = &sc->memwin[i];
1459 if (rw_initialized(&mw->mw_lock))
1460 rw_destroy(&mw->mw_lock);
1463 bzero(sc, sizeof(*sc));
1469 cxgbe_probe(device_t dev)
1472 struct port_info *pi = device_get_softc(dev);
1474 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1475 device_set_desc_copy(dev, buf);
1477 return (BUS_PROBE_DEFAULT);
1480 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1481 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1482 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
1484 #define T4_CAP_ENABLE (T4_CAP)
1487 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1492 vi->xact_addr_filt = -1;
1493 callout_init(&vi->tick, 1);
1495 /* Allocate an ifnet and set it up */
1496 ifp = if_alloc(IFT_ETHER);
1498 device_printf(dev, "Cannot allocate ifnet\n");
1504 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1505 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1507 ifp->if_init = cxgbe_init;
1508 ifp->if_ioctl = cxgbe_ioctl;
1509 ifp->if_transmit = cxgbe_transmit;
1510 ifp->if_qflush = cxgbe_qflush;
1511 ifp->if_get_counter = cxgbe_get_counter;
1513 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1514 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1515 ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1516 ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1519 ifp->if_capabilities = T4_CAP;
1520 ifp->if_capenable = T4_CAP_ENABLE;
1522 if (vi->nofldrxq != 0)
1523 ifp->if_capabilities |= IFCAP_TOE;
1526 if (vi->nnmrxq != 0)
1527 ifp->if_capabilities |= IFCAP_NETMAP;
1530 if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0) {
1531 ifp->if_capabilities |= IFCAP_TXRTLMT;
1532 ifp->if_capenable |= IFCAP_TXRTLMT;
1535 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1536 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1538 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1539 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1540 ifp->if_hw_tsomaxsegsize = 65536;
1542 ether_ifattach(ifp, vi->hw_addr);
1544 if (ifp->if_capabilities & IFCAP_NETMAP)
1545 cxgbe_nm_attach(vi);
1547 sb = sbuf_new_auto();
1548 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1549 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1550 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1552 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1554 case IFCAP_TOE | IFCAP_TXRTLMT:
1555 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1558 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1563 if (ifp->if_capabilities & IFCAP_TOE)
1564 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1567 if (ifp->if_capabilities & IFCAP_NETMAP)
1568 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1569 vi->nnmtxq, vi->nnmrxq);
1572 device_printf(dev, "%s\n", sbuf_data(sb));
1581 cxgbe_attach(device_t dev)
1583 struct port_info *pi = device_get_softc(dev);
1584 struct adapter *sc = pi->adapter;
1588 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1590 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1594 for_each_vi(pi, i, vi) {
1597 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1598 if (vi->dev == NULL) {
1599 device_printf(dev, "failed to add VI %d\n", i);
1602 device_set_softc(vi->dev, vi);
1607 bus_generic_attach(dev);
1613 cxgbe_vi_detach(struct vi_info *vi)
1615 struct ifnet *ifp = vi->ifp;
1617 ether_ifdetach(ifp);
1619 /* Let detach proceed even if these fail. */
1621 if (ifp->if_capabilities & IFCAP_NETMAP)
1622 cxgbe_nm_detach(vi);
1624 cxgbe_uninit_synchronized(vi);
1625 callout_drain(&vi->tick);
1633 cxgbe_detach(device_t dev)
1635 struct port_info *pi = device_get_softc(dev);
1636 struct adapter *sc = pi->adapter;
1639 /* Detach the extra VIs first. */
1640 rc = bus_generic_detach(dev);
1643 device_delete_children(dev);
1645 doom_vi(sc, &pi->vi[0]);
1647 if (pi->flags & HAS_TRACEQ) {
1648 sc->traceq = -1; /* cloner should not create ifnet */
1649 t4_tracer_port_detach(sc);
1652 cxgbe_vi_detach(&pi->vi[0]);
1653 callout_drain(&pi->tick);
1654 ifmedia_removeall(&pi->media);
1656 end_synchronized_op(sc, 0);
1662 cxgbe_init(void *arg)
1664 struct vi_info *vi = arg;
1665 struct adapter *sc = vi->pi->adapter;
1667 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1669 cxgbe_init_synchronized(vi);
1670 end_synchronized_op(sc, 0);
1674 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1676 int rc = 0, mtu, flags;
1677 struct vi_info *vi = ifp->if_softc;
1678 struct port_info *pi = vi->pi;
1679 struct adapter *sc = pi->adapter;
1680 struct ifreq *ifr = (struct ifreq *)data;
1686 if (mtu < ETHERMIN || mtu > MAX_MTU)
1689 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1693 if (vi->flags & VI_INIT_DONE) {
1694 t4_update_fl_bufsize(ifp);
1695 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1696 rc = update_mac_settings(ifp, XGMAC_MTU);
1698 end_synchronized_op(sc, 0);
1702 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1706 if (ifp->if_flags & IFF_UP) {
1707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708 flags = vi->if_flags;
1709 if ((ifp->if_flags ^ flags) &
1710 (IFF_PROMISC | IFF_ALLMULTI)) {
1711 rc = update_mac_settings(ifp,
1712 XGMAC_PROMISC | XGMAC_ALLMULTI);
1715 rc = cxgbe_init_synchronized(vi);
1717 vi->if_flags = ifp->if_flags;
1718 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1719 rc = cxgbe_uninit_synchronized(vi);
1721 end_synchronized_op(sc, 0);
1726 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1729 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1730 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1731 end_synchronized_op(sc, 0);
1735 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1739 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1740 if (mask & IFCAP_TXCSUM) {
1741 ifp->if_capenable ^= IFCAP_TXCSUM;
1742 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1744 if (IFCAP_TSO4 & ifp->if_capenable &&
1745 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1746 ifp->if_capenable &= ~IFCAP_TSO4;
1748 "tso4 disabled due to -txcsum.\n");
1751 if (mask & IFCAP_TXCSUM_IPV6) {
1752 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1753 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1755 if (IFCAP_TSO6 & ifp->if_capenable &&
1756 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1757 ifp->if_capenable &= ~IFCAP_TSO6;
1759 "tso6 disabled due to -txcsum6.\n");
1762 if (mask & IFCAP_RXCSUM)
1763 ifp->if_capenable ^= IFCAP_RXCSUM;
1764 if (mask & IFCAP_RXCSUM_IPV6)
1765 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1768 * Note that we leave CSUM_TSO alone (it is always set). The
1769 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1770 * sending a TSO request our way, so it's sufficient to toggle
1773 if (mask & IFCAP_TSO4) {
1774 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1775 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1776 if_printf(ifp, "enable txcsum first.\n");
1780 ifp->if_capenable ^= IFCAP_TSO4;
1782 if (mask & IFCAP_TSO6) {
1783 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1784 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1785 if_printf(ifp, "enable txcsum6 first.\n");
1789 ifp->if_capenable ^= IFCAP_TSO6;
1791 if (mask & IFCAP_LRO) {
1792 #if defined(INET) || defined(INET6)
1794 struct sge_rxq *rxq;
1796 ifp->if_capenable ^= IFCAP_LRO;
1797 for_each_rxq(vi, i, rxq) {
1798 if (ifp->if_capenable & IFCAP_LRO)
1799 rxq->iq.flags |= IQ_LRO_ENABLED;
1801 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1806 if (mask & IFCAP_TOE) {
1807 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1809 rc = toe_capability(vi, enable);
1813 ifp->if_capenable ^= mask;
1816 if (mask & IFCAP_VLAN_HWTAGGING) {
1817 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1818 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1819 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1821 if (mask & IFCAP_VLAN_MTU) {
1822 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1824 /* Need to find out how to disable auto-mtu-inflation */
1826 if (mask & IFCAP_VLAN_HWTSO)
1827 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1828 if (mask & IFCAP_VLAN_HWCSUM)
1829 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1831 if (mask & IFCAP_TXRTLMT)
1832 ifp->if_capenable ^= IFCAP_TXRTLMT;
1834 if (mask & IFCAP_HWRXTSTMP) {
1836 struct sge_rxq *rxq;
1838 ifp->if_capenable ^= IFCAP_HWRXTSTMP;
1839 for_each_rxq(vi, i, rxq) {
1840 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
1841 rxq->iq.flags |= IQ_RX_TIMESTAMP;
1843 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
1847 #ifdef VLAN_CAPABILITIES
1848 VLAN_CAPABILITIES(ifp);
1851 end_synchronized_op(sc, 0);
1857 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1861 struct ifi2creq i2c;
1863 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
1866 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1870 if (i2c.len > sizeof(i2c.data)) {
1874 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1877 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1878 i2c.offset, i2c.len, &i2c.data[0]);
1879 end_synchronized_op(sc, 0);
1881 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
1886 rc = ether_ioctl(ifp, cmd, data);
1893 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1895 struct vi_info *vi = ifp->if_softc;
1896 struct port_info *pi = vi->pi;
1897 struct adapter *sc = pi->adapter;
1898 struct sge_txq *txq;
1903 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1905 if (__predict_false(pi->link_cfg.link_ok == false)) {
1910 rc = parse_pkt(sc, &m);
1911 if (__predict_false(rc != 0)) {
1912 MPASS(m == NULL); /* was freed already */
1913 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1917 if (m->m_pkthdr.snd_tag != NULL) {
1918 /* EAGAIN tells the stack we are not the correct interface. */
1919 if (__predict_false(ifp != m->m_pkthdr.snd_tag->ifp)) {
1924 return (ethofld_transmit(ifp, m));
1929 txq = &sc->sge.txq[vi->first_txq];
1930 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1931 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1935 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1936 if (__predict_false(rc != 0))
1943 cxgbe_qflush(struct ifnet *ifp)
1945 struct vi_info *vi = ifp->if_softc;
1946 struct sge_txq *txq;
1949 /* queues do not exist if !VI_INIT_DONE. */
1950 if (vi->flags & VI_INIT_DONE) {
1951 for_each_txq(vi, i, txq) {
1953 txq->eq.flags |= EQ_QFLUSH;
1955 while (!mp_ring_is_idle(txq->r)) {
1956 mp_ring_check_drainage(txq->r, 0);
1960 txq->eq.flags &= ~EQ_QFLUSH;
1968 vi_get_counter(struct ifnet *ifp, ift_counter c)
1970 struct vi_info *vi = ifp->if_softc;
1971 struct fw_vi_stats_vf *s = &vi->stats;
1973 vi_refresh_stats(vi->pi->adapter, vi);
1976 case IFCOUNTER_IPACKETS:
1977 return (s->rx_bcast_frames + s->rx_mcast_frames +
1978 s->rx_ucast_frames);
1979 case IFCOUNTER_IERRORS:
1980 return (s->rx_err_frames);
1981 case IFCOUNTER_OPACKETS:
1982 return (s->tx_bcast_frames + s->tx_mcast_frames +
1983 s->tx_ucast_frames + s->tx_offload_frames);
1984 case IFCOUNTER_OERRORS:
1985 return (s->tx_drop_frames);
1986 case IFCOUNTER_IBYTES:
1987 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1989 case IFCOUNTER_OBYTES:
1990 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1991 s->tx_ucast_bytes + s->tx_offload_bytes);
1992 case IFCOUNTER_IMCASTS:
1993 return (s->rx_mcast_frames);
1994 case IFCOUNTER_OMCASTS:
1995 return (s->tx_mcast_frames);
1996 case IFCOUNTER_OQDROPS: {
2000 if (vi->flags & VI_INIT_DONE) {
2002 struct sge_txq *txq;
2004 for_each_txq(vi, i, txq)
2005 drops += counter_u64_fetch(txq->r->drops);
2013 return (if_get_counter_default(ifp, c));
2018 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
2020 struct vi_info *vi = ifp->if_softc;
2021 struct port_info *pi = vi->pi;
2022 struct adapter *sc = pi->adapter;
2023 struct port_stats *s = &pi->stats;
2025 if (pi->nvi > 1 || sc->flags & IS_VF)
2026 return (vi_get_counter(ifp, c));
2028 cxgbe_refresh_stats(sc, pi);
2031 case IFCOUNTER_IPACKETS:
2032 return (s->rx_frames);
2034 case IFCOUNTER_IERRORS:
2035 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2036 s->rx_fcs_err + s->rx_len_err);
2038 case IFCOUNTER_OPACKETS:
2039 return (s->tx_frames);
2041 case IFCOUNTER_OERRORS:
2042 return (s->tx_error_frames);
2044 case IFCOUNTER_IBYTES:
2045 return (s->rx_octets);
2047 case IFCOUNTER_OBYTES:
2048 return (s->tx_octets);
2050 case IFCOUNTER_IMCASTS:
2051 return (s->rx_mcast_frames);
2053 case IFCOUNTER_OMCASTS:
2054 return (s->tx_mcast_frames);
2056 case IFCOUNTER_IQDROPS:
2057 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2058 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2059 s->rx_trunc3 + pi->tnl_cong_drops);
2061 case IFCOUNTER_OQDROPS: {
2065 if (vi->flags & VI_INIT_DONE) {
2067 struct sge_txq *txq;
2069 for_each_txq(vi, i, txq)
2070 drops += counter_u64_fetch(txq->r->drops);
2078 return (if_get_counter_default(ifp, c));
2083 * The kernel picks a media from the list we had provided but we still validate
2087 cxgbe_media_change(struct ifnet *ifp)
2089 struct vi_info *vi = ifp->if_softc;
2090 struct port_info *pi = vi->pi;
2091 struct ifmedia *ifm = &pi->media;
2092 struct link_config *lc = &pi->link_cfg;
2093 struct adapter *sc = pi->adapter;
2096 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2100 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2101 /* ifconfig .. media autoselect */
2102 if (!(lc->supported & FW_PORT_CAP32_ANEG)) {
2103 rc = ENOTSUP; /* AN not supported by transceiver */
2106 lc->requested_aneg = AUTONEG_ENABLE;
2107 lc->requested_speed = 0;
2108 lc->requested_fc |= PAUSE_AUTONEG;
2110 lc->requested_aneg = AUTONEG_DISABLE;
2111 lc->requested_speed =
2112 ifmedia_baudrate(ifm->ifm_media) / 1000000;
2113 lc->requested_fc = 0;
2114 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2115 lc->requested_fc |= PAUSE_RX;
2116 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2117 lc->requested_fc |= PAUSE_TX;
2119 if (pi->up_vis > 0) {
2120 fixup_link_config(pi);
2121 rc = apply_link_config(pi);
2125 end_synchronized_op(sc, 0);
2130 * Base media word (without ETHER, pause, link active, etc.) for the port at the
2134 port_mword(struct port_info *pi, uint32_t speed)
2137 MPASS(speed & M_FW_PORT_CAP32_SPEED);
2138 MPASS(powerof2(speed));
2140 switch(pi->port_type) {
2141 case FW_PORT_TYPE_BT_SGMII:
2142 case FW_PORT_TYPE_BT_XFI:
2143 case FW_PORT_TYPE_BT_XAUI:
2146 case FW_PORT_CAP32_SPEED_100M:
2148 case FW_PORT_CAP32_SPEED_1G:
2149 return (IFM_1000_T);
2150 case FW_PORT_CAP32_SPEED_10G:
2154 case FW_PORT_TYPE_KX4:
2155 if (speed == FW_PORT_CAP32_SPEED_10G)
2156 return (IFM_10G_KX4);
2158 case FW_PORT_TYPE_CX4:
2159 if (speed == FW_PORT_CAP32_SPEED_10G)
2160 return (IFM_10G_CX4);
2162 case FW_PORT_TYPE_KX:
2163 if (speed == FW_PORT_CAP32_SPEED_1G)
2164 return (IFM_1000_KX);
2166 case FW_PORT_TYPE_KR:
2167 case FW_PORT_TYPE_BP_AP:
2168 case FW_PORT_TYPE_BP4_AP:
2169 case FW_PORT_TYPE_BP40_BA:
2170 case FW_PORT_TYPE_KR4_100G:
2171 case FW_PORT_TYPE_KR_SFP28:
2172 case FW_PORT_TYPE_KR_XLAUI:
2174 case FW_PORT_CAP32_SPEED_1G:
2175 return (IFM_1000_KX);
2176 case FW_PORT_CAP32_SPEED_10G:
2177 return (IFM_10G_KR);
2178 case FW_PORT_CAP32_SPEED_25G:
2179 return (IFM_25G_KR);
2180 case FW_PORT_CAP32_SPEED_40G:
2181 return (IFM_40G_KR4);
2182 case FW_PORT_CAP32_SPEED_50G:
2183 return (IFM_50G_KR2);
2184 case FW_PORT_CAP32_SPEED_100G:
2185 return (IFM_100G_KR4);
2188 case FW_PORT_TYPE_FIBER_XFI:
2189 case FW_PORT_TYPE_FIBER_XAUI:
2190 case FW_PORT_TYPE_SFP:
2191 case FW_PORT_TYPE_QSFP_10G:
2192 case FW_PORT_TYPE_QSA:
2193 case FW_PORT_TYPE_QSFP:
2194 case FW_PORT_TYPE_CR4_QSFP:
2195 case FW_PORT_TYPE_CR_QSFP:
2196 case FW_PORT_TYPE_CR2_QSFP:
2197 case FW_PORT_TYPE_SFP28:
2198 /* Pluggable transceiver */
2199 switch (pi->mod_type) {
2200 case FW_PORT_MOD_TYPE_LR:
2202 case FW_PORT_CAP32_SPEED_1G:
2203 return (IFM_1000_LX);
2204 case FW_PORT_CAP32_SPEED_10G:
2205 return (IFM_10G_LR);
2206 case FW_PORT_CAP32_SPEED_25G:
2207 return (IFM_25G_LR);
2208 case FW_PORT_CAP32_SPEED_40G:
2209 return (IFM_40G_LR4);
2210 case FW_PORT_CAP32_SPEED_50G:
2211 return (IFM_50G_LR2);
2212 case FW_PORT_CAP32_SPEED_100G:
2213 return (IFM_100G_LR4);
2216 case FW_PORT_MOD_TYPE_SR:
2218 case FW_PORT_CAP32_SPEED_1G:
2219 return (IFM_1000_SX);
2220 case FW_PORT_CAP32_SPEED_10G:
2221 return (IFM_10G_SR);
2222 case FW_PORT_CAP32_SPEED_25G:
2223 return (IFM_25G_SR);
2224 case FW_PORT_CAP32_SPEED_40G:
2225 return (IFM_40G_SR4);
2226 case FW_PORT_CAP32_SPEED_50G:
2227 return (IFM_50G_SR2);
2228 case FW_PORT_CAP32_SPEED_100G:
2229 return (IFM_100G_SR4);
2232 case FW_PORT_MOD_TYPE_ER:
2233 if (speed == FW_PORT_CAP32_SPEED_10G)
2234 return (IFM_10G_ER);
2236 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2237 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2239 case FW_PORT_CAP32_SPEED_1G:
2240 return (IFM_1000_CX);
2241 case FW_PORT_CAP32_SPEED_10G:
2242 return (IFM_10G_TWINAX);
2243 case FW_PORT_CAP32_SPEED_25G:
2244 return (IFM_25G_CR);
2245 case FW_PORT_CAP32_SPEED_40G:
2246 return (IFM_40G_CR4);
2247 case FW_PORT_CAP32_SPEED_50G:
2248 return (IFM_50G_CR2);
2249 case FW_PORT_CAP32_SPEED_100G:
2250 return (IFM_100G_CR4);
2253 case FW_PORT_MOD_TYPE_LRM:
2254 if (speed == FW_PORT_CAP32_SPEED_10G)
2255 return (IFM_10G_LRM);
2257 case FW_PORT_MOD_TYPE_NA:
2258 MPASS(0); /* Not pluggable? */
2260 case FW_PORT_MOD_TYPE_ERROR:
2261 case FW_PORT_MOD_TYPE_UNKNOWN:
2262 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2264 case FW_PORT_MOD_TYPE_NONE:
2268 case FW_PORT_TYPE_NONE:
2272 return (IFM_UNKNOWN);
2276 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2278 struct vi_info *vi = ifp->if_softc;
2279 struct port_info *pi = vi->pi;
2280 struct adapter *sc = pi->adapter;
2281 struct link_config *lc = &pi->link_cfg;
2283 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2287 if (pi->up_vis == 0) {
2289 * If all the interfaces are administratively down the firmware
2290 * does not report transceiver changes. Refresh port info here
2291 * so that ifconfig displays accurate ifmedia at all times.
2292 * This is the only reason we have a synchronized op in this
2293 * function. Just PORT_LOCK would have been enough otherwise.
2295 t4_update_port_info(pi);
2296 build_medialist(pi);
2300 ifmr->ifm_status = IFM_AVALID;
2301 if (lc->link_ok == false)
2303 ifmr->ifm_status |= IFM_ACTIVE;
2306 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2307 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2308 if (lc->fc & PAUSE_RX)
2309 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2310 if (lc->fc & PAUSE_TX)
2311 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2312 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
2315 end_synchronized_op(sc, 0);
2319 vcxgbe_probe(device_t dev)
2322 struct vi_info *vi = device_get_softc(dev);
2324 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2326 device_set_desc_copy(dev, buf);
2328 return (BUS_PROBE_DEFAULT);
2332 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2334 int func, index, rc;
2335 uint32_t param, val;
2337 ASSERT_SYNCHRONIZED_OP(sc);
2339 index = vi - pi->vi;
2340 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2341 KASSERT(index < nitems(vi_mac_funcs),
2342 ("%s: VI %s doesn't have a MAC func", __func__,
2343 device_get_nameunit(vi->dev)));
2344 func = vi_mac_funcs[index];
2345 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2346 vi->hw_addr, &vi->rss_size, func, 0);
2348 device_printf(vi->dev, "failed to allocate virtual interface %d"
2349 "for port %d: %d\n", index, pi->port_id, -rc);
2353 if (chip_id(sc) <= CHELSIO_T5)
2354 vi->smt_idx = (rc & 0x7f) << 1;
2356 vi->smt_idx = (rc & 0x7f);
2358 if (vi->rss_size == 1) {
2360 * This VI didn't get a slice of the RSS table. Reduce the
2361 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2362 * configuration file (nvi, rssnvi for this PF) if this is a
2365 device_printf(vi->dev, "RSS table not available.\n");
2366 vi->rss_base = 0xffff;
2371 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2372 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2373 V_FW_PARAMS_PARAM_YZ(vi->viid);
2374 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2376 vi->rss_base = 0xffff;
2378 MPASS((val >> 16) == vi->rss_size);
2379 vi->rss_base = val & 0xffff;
2386 vcxgbe_attach(device_t dev)
2389 struct port_info *pi;
2393 vi = device_get_softc(dev);
2397 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2400 rc = alloc_extra_vi(sc, pi, vi);
2401 end_synchronized_op(sc, 0);
2405 rc = cxgbe_vi_attach(dev, vi);
2407 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2414 vcxgbe_detach(device_t dev)
2419 vi = device_get_softc(dev);
2420 sc = vi->pi->adapter;
2424 cxgbe_vi_detach(vi);
2425 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2427 end_synchronized_op(sc, 0);
2433 t4_fatal_err(struct adapter *sc)
2435 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2436 t4_intr_disable(sc);
2437 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2438 device_get_nameunit(sc->dev));
2439 if (t4_panic_on_fatal_err)
2440 panic("panic requested on fatal error");
2444 t4_add_adapter(struct adapter *sc)
2446 sx_xlock(&t4_list_lock);
2447 SLIST_INSERT_HEAD(&t4_list, sc, link);
2448 sx_xunlock(&t4_list_lock);
2452 t4_map_bars_0_and_4(struct adapter *sc)
2454 sc->regs_rid = PCIR_BAR(0);
2455 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2456 &sc->regs_rid, RF_ACTIVE);
2457 if (sc->regs_res == NULL) {
2458 device_printf(sc->dev, "cannot map registers.\n");
2461 sc->bt = rman_get_bustag(sc->regs_res);
2462 sc->bh = rman_get_bushandle(sc->regs_res);
2463 sc->mmio_len = rman_get_size(sc->regs_res);
2464 setbit(&sc->doorbells, DOORBELL_KDB);
2466 sc->msix_rid = PCIR_BAR(4);
2467 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2468 &sc->msix_rid, RF_ACTIVE);
2469 if (sc->msix_res == NULL) {
2470 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2478 t4_map_bar_2(struct adapter *sc)
2482 * T4: only iWARP driver uses the userspace doorbells. There is no need
2483 * to map it if RDMA is disabled.
2485 if (is_t4(sc) && sc->rdmacaps == 0)
2488 sc->udbs_rid = PCIR_BAR(2);
2489 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2490 &sc->udbs_rid, RF_ACTIVE);
2491 if (sc->udbs_res == NULL) {
2492 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2495 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2497 if (chip_id(sc) >= CHELSIO_T5) {
2498 setbit(&sc->doorbells, DOORBELL_UDB);
2499 #if defined(__i386__) || defined(__amd64__)
2500 if (t5_write_combine) {
2504 * Enable write combining on BAR2. This is the
2505 * userspace doorbell BAR and is split into 128B
2506 * (UDBS_SEG_SIZE) doorbell regions, each associated
2507 * with an egress queue. The first 64B has the doorbell
2508 * and the second 64B can be used to submit a tx work
2509 * request with an implicit doorbell.
2512 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2513 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2515 clrbit(&sc->doorbells, DOORBELL_UDB);
2516 setbit(&sc->doorbells, DOORBELL_WCWR);
2517 setbit(&sc->doorbells, DOORBELL_UDBWC);
2519 device_printf(sc->dev,
2520 "couldn't enable write combining: %d\n",
2524 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2525 t4_write_reg(sc, A_SGE_STAT_CFG,
2526 V_STATSOURCE_T5(7) | mode);
2530 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2535 struct memwin_init {
2540 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2541 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2542 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2543 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2546 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2547 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2548 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2549 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2553 setup_memwin(struct adapter *sc)
2555 const struct memwin_init *mw_init;
2562 * Read low 32b of bar0 indirectly via the hardware backdoor
2563 * mechanism. Works from within PCI passthrough environments
2564 * too, where rman_get_start() can return a different value. We
2565 * need to program the T4 memory window decoders with the actual
2566 * addresses that will be coming across the PCIe link.
2568 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2569 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2571 mw_init = &t4_memwin[0];
2573 /* T5+ use the relative offset inside the PCIe BAR */
2576 mw_init = &t5_memwin[0];
2579 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2580 rw_init(&mw->mw_lock, "memory window access");
2581 mw->mw_base = mw_init->base;
2582 mw->mw_aperture = mw_init->aperture;
2585 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2586 (mw->mw_base + bar0) | V_BIR(0) |
2587 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2588 rw_wlock(&mw->mw_lock);
2589 position_memwin(sc, i, 0);
2590 rw_wunlock(&mw->mw_lock);
2594 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2598 * Positions the memory window at the given address in the card's address space.
2599 * There are some alignment requirements and the actual position may be at an
2600 * address prior to the requested address. mw->mw_curpos always has the actual
2601 * position of the window.
2604 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2610 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2611 mw = &sc->memwin[idx];
2612 rw_assert(&mw->mw_lock, RA_WLOCKED);
2616 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2618 pf = V_PFNUM(sc->pf);
2619 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2621 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2622 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2623 t4_read_reg(sc, reg); /* flush */
2627 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2633 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2635 /* Memory can only be accessed in naturally aligned 4 byte units */
2636 if (addr & 3 || len & 3 || len <= 0)
2639 mw = &sc->memwin[idx];
2641 rw_rlock(&mw->mw_lock);
2642 mw_end = mw->mw_curpos + mw->mw_aperture;
2643 if (addr >= mw_end || addr < mw->mw_curpos) {
2644 /* Will need to reposition the window */
2645 if (!rw_try_upgrade(&mw->mw_lock)) {
2646 rw_runlock(&mw->mw_lock);
2647 rw_wlock(&mw->mw_lock);
2649 rw_assert(&mw->mw_lock, RA_WLOCKED);
2650 position_memwin(sc, idx, addr);
2651 rw_downgrade(&mw->mw_lock);
2652 mw_end = mw->mw_curpos + mw->mw_aperture;
2654 rw_assert(&mw->mw_lock, RA_RLOCKED);
2655 while (addr < mw_end && len > 0) {
2657 v = t4_read_reg(sc, mw->mw_base + addr -
2659 *val++ = le32toh(v);
2662 t4_write_reg(sc, mw->mw_base + addr -
2663 mw->mw_curpos, htole32(v));
2668 rw_runlock(&mw->mw_lock);
2675 alloc_atid_tab(struct tid_info *t, int flags)
2679 MPASS(t->natids > 0);
2680 MPASS(t->atid_tab == NULL);
2682 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
2684 if (t->atid_tab == NULL)
2686 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
2687 t->afree = t->atid_tab;
2688 t->atids_in_use = 0;
2689 for (i = 1; i < t->natids; i++)
2690 t->atid_tab[i - 1].next = &t->atid_tab[i];
2691 t->atid_tab[t->natids - 1].next = NULL;
2697 free_atid_tab(struct tid_info *t)
2700 KASSERT(t->atids_in_use == 0,
2701 ("%s: %d atids still in use.", __func__, t->atids_in_use));
2703 if (mtx_initialized(&t->atid_lock))
2704 mtx_destroy(&t->atid_lock);
2705 free(t->atid_tab, M_CXGBE);
2710 alloc_atid(struct adapter *sc, void *ctx)
2712 struct tid_info *t = &sc->tids;
2715 mtx_lock(&t->atid_lock);
2717 union aopen_entry *p = t->afree;
2719 atid = p - t->atid_tab;
2720 MPASS(atid <= M_TID_TID);
2725 mtx_unlock(&t->atid_lock);
2730 lookup_atid(struct adapter *sc, int atid)
2732 struct tid_info *t = &sc->tids;
2734 return (t->atid_tab[atid].data);
2738 free_atid(struct adapter *sc, int atid)
2740 struct tid_info *t = &sc->tids;
2741 union aopen_entry *p = &t->atid_tab[atid];
2743 mtx_lock(&t->atid_lock);
2747 mtx_unlock(&t->atid_lock);
2751 queue_tid_release(struct adapter *sc, int tid)
2754 CXGBE_UNIMPLEMENTED("deferred tid release");
2758 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
2761 struct cpl_tid_release *req;
2763 wr = alloc_wrqe(sizeof(*req), ctrlq);
2765 queue_tid_release(sc, tid); /* defer */
2770 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
2776 t4_range_cmp(const void *a, const void *b)
2778 return ((const struct t4_range *)a)->start -
2779 ((const struct t4_range *)b)->start;
2783 * Verify that the memory range specified by the addr/len pair is valid within
2784 * the card's address space.
2787 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2789 struct t4_range mem_ranges[4], *r, *next;
2790 uint32_t em, addr_len;
2791 int i, n, remaining;
2793 /* Memory can only be accessed in naturally aligned 4 byte units */
2794 if (addr & 3 || len & 3 || len <= 0)
2797 /* Enabled memories */
2798 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2802 bzero(r, sizeof(mem_ranges));
2803 if (em & F_EDRAM0_ENABLE) {
2804 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2805 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2807 r->start = G_EDRAM0_BASE(addr_len) << 20;
2808 if (addr >= r->start &&
2809 addr + len <= r->start + r->size)
2815 if (em & F_EDRAM1_ENABLE) {
2816 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2817 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2819 r->start = G_EDRAM1_BASE(addr_len) << 20;
2820 if (addr >= r->start &&
2821 addr + len <= r->start + r->size)
2827 if (em & F_EXT_MEM_ENABLE) {
2828 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2829 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2831 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2832 if (addr >= r->start &&
2833 addr + len <= r->start + r->size)
2839 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2840 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2841 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2843 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2844 if (addr >= r->start &&
2845 addr + len <= r->start + r->size)
2851 MPASS(n <= nitems(mem_ranges));
2854 /* Sort and merge the ranges. */
2855 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2857 /* Start from index 0 and examine the next n - 1 entries. */
2859 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2861 MPASS(r->size > 0); /* r is a valid entry. */
2863 MPASS(next->size > 0); /* and so is the next one. */
2865 while (r->start + r->size >= next->start) {
2866 /* Merge the next one into the current entry. */
2867 r->size = max(r->start + r->size,
2868 next->start + next->size) - r->start;
2869 n--; /* One fewer entry in total. */
2870 if (--remaining == 0)
2871 goto done; /* short circuit */
2874 if (next != r + 1) {
2876 * Some entries were merged into r and next
2877 * points to the first valid entry that couldn't
2880 MPASS(next->size > 0); /* must be valid */
2881 memcpy(r + 1, next, remaining * sizeof(*r));
2884 * This so that the foo->size assertion in the
2885 * next iteration of the loop do the right
2886 * thing for entries that were pulled up and are
2889 MPASS(n < nitems(mem_ranges));
2890 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2891 sizeof(struct t4_range));
2896 /* Done merging the ranges. */
2899 for (i = 0; i < n; i++, r++) {
2900 if (addr >= r->start &&
2901 addr + len <= r->start + r->size)
2910 fwmtype_to_hwmtype(int mtype)
2914 case FW_MEMTYPE_EDC0:
2916 case FW_MEMTYPE_EDC1:
2918 case FW_MEMTYPE_EXTMEM:
2920 case FW_MEMTYPE_EXTMEM1:
2923 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2928 * Verify that the memory range specified by the memtype/offset/len pair is
2929 * valid and lies entirely within the memtype specified. The global address of
2930 * the start of the range is returned in addr.
2933 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2936 uint32_t em, addr_len, maddr;
2938 /* Memory can only be accessed in naturally aligned 4 byte units */
2939 if (off & 3 || len & 3 || len == 0)
2942 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2943 switch (fwmtype_to_hwmtype(mtype)) {
2945 if (!(em & F_EDRAM0_ENABLE))
2947 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2948 maddr = G_EDRAM0_BASE(addr_len) << 20;
2951 if (!(em & F_EDRAM1_ENABLE))
2953 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2954 maddr = G_EDRAM1_BASE(addr_len) << 20;
2957 if (!(em & F_EXT_MEM_ENABLE))
2959 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2960 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2963 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2965 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2966 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2972 *addr = maddr + off; /* global address */
2973 return (validate_mem_range(sc, *addr, len));
2977 fixup_devlog_params(struct adapter *sc)
2979 struct devlog_params *dparams = &sc->params.devlog;
2982 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2983 dparams->size, &dparams->addr);
2989 update_nirq(struct intrs_and_queues *iaq, int nports)
2991 int extra = T4_EXTRA_INTR;
2994 iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
2995 iaq->nirq += nports * (iaq->num_vis - 1) *
2996 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
2997 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
3001 * Adjust requirements to fit the number of interrupts available.
3004 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
3008 const int nports = sc->params.nports;
3013 bzero(iaq, sizeof(*iaq));
3014 iaq->intr_type = itype;
3015 iaq->num_vis = t4_num_vis;
3016 iaq->ntxq = t4_ntxq;
3017 iaq->ntxq_vi = t4_ntxq_vi;
3018 iaq->nrxq = t4_nrxq;
3019 iaq->nrxq_vi = t4_nrxq_vi;
3020 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3021 if (is_offload(sc) || is_ethoffload(sc)) {
3022 iaq->nofldtxq = t4_nofldtxq;
3023 iaq->nofldtxq_vi = t4_nofldtxq_vi;
3027 if (is_offload(sc)) {
3028 iaq->nofldrxq = t4_nofldrxq;
3029 iaq->nofldrxq_vi = t4_nofldrxq_vi;
3033 iaq->nnmtxq_vi = t4_nnmtxq_vi;
3034 iaq->nnmrxq_vi = t4_nnmrxq_vi;
3037 update_nirq(iaq, nports);
3038 if (iaq->nirq <= navail &&
3039 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3041 * This is the normal case -- there are enough interrupts for
3048 * If extra VIs have been configured try reducing their count and see if
3051 while (iaq->num_vis > 1) {
3053 update_nirq(iaq, nports);
3054 if (iaq->nirq <= navail &&
3055 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3056 device_printf(sc->dev, "virtual interfaces per port "
3057 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
3058 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
3059 "itype %d, navail %u, nirq %d.\n",
3060 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3061 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3062 itype, navail, iaq->nirq);
3068 * Extra VIs will not be created. Log a message if they were requested.
3070 MPASS(iaq->num_vis == 1);
3071 iaq->ntxq_vi = iaq->nrxq_vi = 0;
3072 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3073 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3074 if (iaq->num_vis != t4_num_vis) {
3075 device_printf(sc->dev, "extra virtual interfaces disabled. "
3076 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3077 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
3078 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3079 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3083 * Keep reducing the number of NIC rx queues to the next lower power of
3084 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3088 if (iaq->nrxq > 1) {
3091 } while (!powerof2(iaq->nrxq));
3093 if (iaq->nofldrxq > 1)
3094 iaq->nofldrxq >>= 1;
3096 old_nirq = iaq->nirq;
3097 update_nirq(iaq, nports);
3098 if (iaq->nirq <= navail &&
3099 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3100 device_printf(sc->dev, "running with reduced number of "
3101 "rx queues because of shortage of interrupts. "
3102 "nrxq=%u, nofldrxq=%u. "
3103 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3104 iaq->nofldrxq, itype, navail, iaq->nirq);
3107 } while (old_nirq != iaq->nirq);
3109 /* One interrupt for everything. Ugh. */
3110 device_printf(sc->dev, "running with minimal number of queues. "
3111 "itype %d, navail %u.\n", itype, navail);
3113 MPASS(iaq->nrxq == 1);
3115 if (iaq->nofldrxq > 1)
3118 MPASS(iaq->num_vis > 0);
3119 if (iaq->num_vis > 1) {
3120 MPASS(iaq->nrxq_vi > 0);
3121 MPASS(iaq->ntxq_vi > 0);
3123 MPASS(iaq->nirq > 0);
3124 MPASS(iaq->nrxq > 0);
3125 MPASS(iaq->ntxq > 0);
3126 if (itype == INTR_MSI) {
3127 MPASS(powerof2(iaq->nirq));
3132 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3134 int rc, itype, navail, nalloc;
3136 for (itype = INTR_MSIX; itype; itype >>= 1) {
3138 if ((itype & t4_intr_types) == 0)
3139 continue; /* not allowed */
3141 if (itype == INTR_MSIX)
3142 navail = pci_msix_count(sc->dev);
3143 else if (itype == INTR_MSI)
3144 navail = pci_msi_count(sc->dev);
3151 calculate_iaq(sc, iaq, itype, navail);
3154 if (itype == INTR_MSIX)
3155 rc = pci_alloc_msix(sc->dev, &nalloc);
3156 else if (itype == INTR_MSI)
3157 rc = pci_alloc_msi(sc->dev, &nalloc);
3159 if (rc == 0 && nalloc > 0) {
3160 if (nalloc == iaq->nirq)
3164 * Didn't get the number requested. Use whatever number
3165 * the kernel is willing to allocate.
3167 device_printf(sc->dev, "fewer vectors than requested, "
3168 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3169 itype, iaq->nirq, nalloc);
3170 pci_release_msi(sc->dev);
3175 device_printf(sc->dev,
3176 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3177 itype, rc, iaq->nirq, nalloc);
3180 device_printf(sc->dev,
3181 "failed to find a usable interrupt type. "
3182 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3183 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3188 #define FW_VERSION(chip) ( \
3189 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3190 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3191 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3192 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3193 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3199 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
3203 .kld_name = "t4fw_cfg",
3204 .fw_mod_name = "t4fw",
3206 .chip = FW_HDR_CHIP_T4,
3207 .fw_ver = htobe32(FW_VERSION(T4)),
3208 .intfver_nic = FW_INTFVER(T4, NIC),
3209 .intfver_vnic = FW_INTFVER(T4, VNIC),
3210 .intfver_ofld = FW_INTFVER(T4, OFLD),
3211 .intfver_ri = FW_INTFVER(T4, RI),
3212 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3213 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3214 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3215 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3219 .kld_name = "t5fw_cfg",
3220 .fw_mod_name = "t5fw",
3222 .chip = FW_HDR_CHIP_T5,
3223 .fw_ver = htobe32(FW_VERSION(T5)),
3224 .intfver_nic = FW_INTFVER(T5, NIC),
3225 .intfver_vnic = FW_INTFVER(T5, VNIC),
3226 .intfver_ofld = FW_INTFVER(T5, OFLD),
3227 .intfver_ri = FW_INTFVER(T5, RI),
3228 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3229 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3230 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3231 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3235 .kld_name = "t6fw_cfg",
3236 .fw_mod_name = "t6fw",
3238 .chip = FW_HDR_CHIP_T6,
3239 .fw_ver = htobe32(FW_VERSION(T6)),
3240 .intfver_nic = FW_INTFVER(T6, NIC),
3241 .intfver_vnic = FW_INTFVER(T6, VNIC),
3242 .intfver_ofld = FW_INTFVER(T6, OFLD),
3243 .intfver_ri = FW_INTFVER(T6, RI),
3244 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3245 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3246 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3247 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3252 static struct fw_info *
3253 find_fw_info(int chip)
3257 for (i = 0; i < nitems(fw_info); i++) {
3258 if (fw_info[i].chip == chip)
3259 return (&fw_info[i]);
3265 * Is the given firmware API compatible with the one the driver was compiled
3269 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3272 /* short circuit if it's the exact same firmware version */
3273 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3277 * XXX: Is this too conservative? Perhaps I should limit this to the
3278 * features that are supported in the driver.
3280 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3281 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3282 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3283 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3291 * The firmware in the KLD is usable, but should it be installed? This routine
3292 * explains itself in detail if it indicates the KLD firmware should be
3296 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
3300 if (!card_fw_usable) {
3301 reason = "incompatible or unusable";
3306 reason = "older than the version bundled with this driver";
3310 if (t4_fw_install == 2 && k != c) {
3311 reason = "different than the version bundled with this driver";
3318 if (t4_fw_install == 0) {
3319 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3320 "but the driver is prohibited from installing a different "
3321 "firmware on the card.\n",
3322 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3323 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3328 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3329 "installing firmware %u.%u.%u.%u on card.\n",
3330 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3331 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3332 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3333 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3339 * Establish contact with the firmware and determine if we are the master driver
3340 * or not, and whether we are responsible for chip initialization.
3343 prep_firmware(struct adapter *sc)
3345 const struct firmware *fw = NULL, *default_cfg;
3346 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
3347 enum dev_state state;
3348 struct fw_info *fw_info;
3349 struct fw_hdr *card_fw; /* fw on the card */
3350 const struct fw_hdr *kld_fw; /* fw in the KLD */
3351 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
3354 /* This is the firmware whose headers the driver was compiled against */
3355 fw_info = find_fw_info(chip_id(sc));
3356 if (fw_info == NULL) {
3357 device_printf(sc->dev,
3358 "unable to look up firmware information for chip %d.\n",
3362 drv_fw = &fw_info->fw_hdr;
3365 * The firmware KLD contains many modules. The KLD name is also the
3366 * name of the module that contains the default config file.
3368 default_cfg = firmware_get(fw_info->kld_name);
3370 /* This is the firmware in the KLD */
3371 fw = firmware_get(fw_info->fw_mod_name);
3373 kld_fw = (const void *)fw->data;
3374 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3380 /* Read the header of the firmware on the card */
3381 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3382 rc = -t4_read_flash(sc, FLASH_FW_START,
3383 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3385 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3386 if (card_fw->fw_ver == be32toh(0xffffffff)) {
3387 uint32_t d = be32toh(kld_fw->fw_ver);
3389 if (!kld_fw_usable) {
3390 device_printf(sc->dev,
3391 "no firmware on the card and no usable "
3392 "firmware bundled with the driver.\n");
3395 } else if (t4_fw_install == 0) {
3396 device_printf(sc->dev,
3397 "no firmware on the card and the driver "
3398 "is prohibited from installing new "
3404 device_printf(sc->dev, "no firmware on the card, "
3405 "installing firmware %d.%d.%d.%d\n",
3406 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3407 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3408 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize);
3411 device_printf(sc->dev,
3412 "firmware install failed: %d.\n", rc);
3415 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3420 device_printf(sc->dev,
3421 "Unable to read card's firmware header: %d\n", rc);
3425 /* Contact firmware. */
3426 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3427 if (rc < 0 || state == DEV_STATE_ERR) {
3429 device_printf(sc->dev,
3430 "failed to connect to the firmware: %d, %d.\n", rc, state);
3435 sc->flags |= MASTER_PF;
3436 else if (state == DEV_STATE_UNINIT) {
3438 * We didn't get to be the master so we definitely won't be
3439 * configuring the chip. It's a bug if someone else hasn't
3440 * configured it already.
3442 device_printf(sc->dev, "couldn't be master(%d), "
3443 "device not already initialized either(%d).\n", rc, state);
3448 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3449 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3451 * Common case: the firmware on the card is an exact match and
3452 * the KLD is an exact match too, or the KLD is
3453 * absent/incompatible. Note that t4_fw_install = 2 is ignored
3454 * here -- use cxgbetool loadfw if you want to reinstall the
3455 * same firmware as the one on the card.
3457 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3458 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3459 be32toh(card_fw->fw_ver))) {
3461 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3463 device_printf(sc->dev,
3464 "failed to install firmware: %d\n", rc);
3468 /* Installed successfully, update the cached header too. */
3469 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3471 need_fw_reset = 0; /* already reset as part of load_fw */
3474 if (!card_fw_usable) {
3477 d = ntohl(drv_fw->fw_ver);
3478 c = ntohl(card_fw->fw_ver);
3479 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3481 device_printf(sc->dev, "Cannot find a usable firmware: "
3482 "fw_install %d, chip state %d, "
3483 "driver compiled with %d.%d.%d.%d, "
3484 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3485 t4_fw_install, state,
3486 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3487 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3488 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3489 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3490 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3491 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3497 if (need_fw_reset &&
3498 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3499 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3500 if (rc != ETIMEDOUT && rc != EIO)
3501 t4_fw_bye(sc, sc->mbox);
3506 rc = get_params__pre_init(sc);
3508 goto done; /* error message displayed already */
3510 /* Partition adapter resources as specified in the config file. */
3511 if (state == DEV_STATE_UNINIT) {
3513 KASSERT(sc->flags & MASTER_PF,
3514 ("%s: trying to change chip settings when not master.",
3517 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3519 goto done; /* error message displayed already */
3521 t4_tweak_chip_settings(sc);
3523 /* get basic stuff going */
3524 rc = -t4_fw_initialize(sc, sc->mbox);
3526 device_printf(sc->dev, "fw init failed: %d.\n", rc);
3530 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3535 free(card_fw, M_CXGBE);
3537 firmware_put(fw, FIRMWARE_UNLOAD);
3538 if (default_cfg != NULL)
3539 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3544 #define FW_PARAM_DEV(param) \
3545 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3546 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3547 #define FW_PARAM_PFVF(param) \
3548 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3549 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3552 * Partition chip resources for use between various PFs, VFs, etc.
3555 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3556 const char *name_prefix)
3558 const struct firmware *cfg = NULL;
3560 struct fw_caps_config_cmd caps;
3561 uint32_t mtype, moff, finicsum, cfcsum;
3564 * Figure out what configuration file to use. Pick the default config
3565 * file for the card if the user hasn't specified one explicitly.
3567 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3568 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3569 /* Card specific overrides go here. */
3570 if (pci_get_device(sc->dev) == 0x440a)
3571 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3573 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3574 } else if (strncmp(t4_cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0)
3575 goto use_built_in_config; /* go straight to config. */
3578 * We need to load another module if the profile is anything except
3579 * "default" or "flash".
3581 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3582 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3585 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3586 cfg = firmware_get(s);
3588 if (default_cfg != NULL) {
3589 device_printf(sc->dev,
3590 "unable to load module \"%s\" for "
3591 "configuration profile \"%s\", will use "
3592 "the default config file instead.\n",
3594 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3597 device_printf(sc->dev,
3598 "unable to load module \"%s\" for "
3599 "configuration profile \"%s\", will use "
3600 "the config file on the card's flash "
3601 "instead.\n", s, sc->cfg_file);
3602 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3608 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3609 default_cfg == NULL) {
3610 device_printf(sc->dev,
3611 "default config file not available, will use the config "
3612 "file on the card's flash instead.\n");
3613 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3616 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3618 const uint32_t *cfdata;
3619 uint32_t param, val, addr;
3621 KASSERT(cfg != NULL || default_cfg != NULL,
3622 ("%s: no config to upload", __func__));
3625 * Ask the firmware where it wants us to upload the config file.
3627 param = FW_PARAM_DEV(CF);
3628 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3630 /* No support for config file? Shouldn't happen. */
3631 device_printf(sc->dev,
3632 "failed to query config file location: %d.\n", rc);
3635 mtype = G_FW_PARAMS_PARAM_Y(val);
3636 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3639 * XXX: sheer laziness. We deliberately added 4 bytes of
3640 * useless stuffing/comments at the end of the config file so
3641 * it's ok to simply throw away the last remaining bytes when
3642 * the config file is not an exact multiple of 4. This also
3643 * helps with the validate_mt_off_len check.
3646 cflen = cfg->datasize & ~3;
3649 cflen = default_cfg->datasize & ~3;
3650 cfdata = default_cfg->data;
3653 if (cflen > FLASH_CFG_MAX_SIZE) {
3654 device_printf(sc->dev,
3655 "config file too long (%d, max allowed is %d). "
3656 "Will try to use the config on the card, if any.\n",
3657 cflen, FLASH_CFG_MAX_SIZE);
3658 goto use_config_on_flash;
3661 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3663 device_printf(sc->dev,
3664 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
3665 "Will try to use the config on the card, if any.\n",
3666 __func__, mtype, moff, cflen, rc);
3667 goto use_config_on_flash;
3669 write_via_memwin(sc, 2, addr, cfdata, cflen);
3671 use_config_on_flash:
3672 mtype = FW_MEMTYPE_FLASH;
3673 moff = t4_flash_cfg_addr(sc);
3676 bzero(&caps, sizeof(caps));
3677 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3678 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3679 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3680 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3681 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3682 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3684 device_printf(sc->dev,
3685 "failed to pre-process config file: %d "
3686 "(mtype %d, moff 0x%x). Will reset the firmware and retry "
3687 "with the built-in configuration.\n", rc, mtype, moff);
3689 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
3691 device_printf(sc->dev,
3692 "firmware reset failed: %d.\n", rc);
3693 if (rc != ETIMEDOUT && rc != EIO) {
3694 t4_fw_bye(sc, sc->mbox);
3695 sc->flags &= ~FW_OK;
3699 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "built-in");
3700 use_built_in_config:
3701 bzero(&caps, sizeof(caps));
3702 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3703 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3704 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3705 rc = t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3707 device_printf(sc->dev,
3708 "built-in configuration failed: %d.\n", rc);
3713 finicsum = be32toh(caps.finicsum);
3714 cfcsum = be32toh(caps.cfcsum);
3715 if (finicsum != cfcsum) {
3716 device_printf(sc->dev,
3717 "WARNING: config file checksum mismatch: %08x %08x\n",
3720 sc->cfcsum = cfcsum;
3722 #define LIMIT_CAPS(x) do { \
3723 caps.x &= htobe16(t4_##x##_allowed); \
3727 * Let the firmware know what features will (not) be used so it can tune
3728 * things accordingly.
3730 LIMIT_CAPS(nbmcaps);
3731 LIMIT_CAPS(linkcaps);
3732 LIMIT_CAPS(switchcaps);
3733 LIMIT_CAPS(niccaps);
3734 LIMIT_CAPS(toecaps);
3735 LIMIT_CAPS(rdmacaps);
3736 LIMIT_CAPS(cryptocaps);
3737 LIMIT_CAPS(iscsicaps);
3738 LIMIT_CAPS(fcoecaps);
3741 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
3743 * TOE and hashfilters are mutually exclusive. It is a config
3744 * file or firmware bug if both are reported as available. Try
3745 * to cope with the situation in non-debug builds by disabling
3748 MPASS(caps.toecaps == 0);
3755 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3756 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3757 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3758 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3760 device_printf(sc->dev,
3761 "failed to process config file: %d.\n", rc);
3765 firmware_put(cfg, FIRMWARE_UNLOAD);
3770 * Retrieve parameters that are needed (or nice to have) very early.
3773 get_params__pre_init(struct adapter *sc)
3776 uint32_t param[2], val[2];
3778 t4_get_version_info(sc);
3780 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3781 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3782 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3783 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3784 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3786 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3787 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3788 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3789 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3790 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3792 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3793 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3794 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3795 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3796 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3798 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3799 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3800 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3801 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3802 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3804 param[0] = FW_PARAM_DEV(PORTVEC);
3805 param[1] = FW_PARAM_DEV(CCLK);
3806 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3808 device_printf(sc->dev,
3809 "failed to query parameters (pre_init): %d.\n", rc);
3813 sc->params.portvec = val[0];
3814 sc->params.nports = bitcount32(val[0]);
3815 sc->params.vpd.cclk = val[1];
3817 /* Read device log parameters. */
3818 rc = -t4_init_devlog_params(sc, 1);
3820 fixup_devlog_params(sc);
3822 device_printf(sc->dev,
3823 "failed to get devlog parameters: %d.\n", rc);
3824 rc = 0; /* devlog isn't critical for device operation */
3831 * Retrieve various parameters that are of interest to the driver. The device
3832 * has been initialized by the firmware at this point.
3835 get_params__post_init(struct adapter *sc)
3838 uint32_t param[7], val[7];
3839 struct fw_caps_config_cmd caps;
3841 param[0] = FW_PARAM_PFVF(IQFLINT_START);
3842 param[1] = FW_PARAM_PFVF(EQ_START);
3843 param[2] = FW_PARAM_PFVF(FILTER_START);
3844 param[3] = FW_PARAM_PFVF(FILTER_END);
3845 param[4] = FW_PARAM_PFVF(L2T_START);
3846 param[5] = FW_PARAM_PFVF(L2T_END);
3847 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3848 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3849 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
3850 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
3852 device_printf(sc->dev,
3853 "failed to query parameters (post_init): %d.\n", rc);
3857 sc->sge.iq_start = val[0];
3858 sc->sge.eq_start = val[1];
3859 if ((int)val[3] > (int)val[2]) {
3860 sc->tids.ftid_base = val[2];
3861 sc->tids.ftid_end = val[3];
3862 sc->tids.nftids = val[3] - val[2] + 1;
3864 sc->vres.l2t.start = val[4];
3865 sc->vres.l2t.size = val[5] - val[4] + 1;
3866 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3867 ("%s: L2 table size (%u) larger than expected (%u)",
3868 __func__, sc->vres.l2t.size, L2T_SIZE));
3869 sc->params.core_vdd = val[6];
3871 if (chip_id(sc) >= CHELSIO_T6) {
3874 if (sc->params.fw_vers >=
3875 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
3876 V_FW_HDR_FW_VER_MICRO(1) | V_FW_HDR_FW_VER_BUILD(0))) {
3878 * Note that the code to enable the region should run
3879 * before t4_fw_initialize and not here. This is just a
3880 * reminder to add said code.
3882 device_printf(sc->dev,
3883 "hpfilter region not enabled.\n");
3887 sc->tids.tid_base = t4_read_reg(sc,
3888 A_LE_DB_ACTIVE_TABLE_START_INDEX);
3890 param[0] = FW_PARAM_PFVF(HPFILTER_START);
3891 param[1] = FW_PARAM_PFVF(HPFILTER_END);
3892 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3894 device_printf(sc->dev,
3895 "failed to query hpfilter parameters: %d.\n", rc);
3898 if ((int)val[1] > (int)val[0]) {
3899 sc->tids.hpftid_base = val[0];
3900 sc->tids.hpftid_end = val[1];
3901 sc->tids.nhpftids = val[1] - val[0] + 1;
3904 * These should go off if the layout changes and the
3905 * driver needs to catch up.
3907 MPASS(sc->tids.hpftid_base == 0);
3908 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
3913 * MPSBGMAP is queried separately because only recent firmwares support
3914 * it as a parameter and we don't want the compound query above to fail
3915 * on older firmwares.
3917 param[0] = FW_PARAM_DEV(MPSBGMAP);
3919 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3921 sc->params.mps_bg_map = val[0];
3923 sc->params.mps_bg_map = 0;
3926 * Determine whether the firmware supports the filter2 work request.
3927 * This is queried separately for the same reason as MPSBGMAP above.
3929 param[0] = FW_PARAM_DEV(FILTER2_WR);
3931 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3933 sc->params.filter2_wr_support = val[0] != 0;
3935 sc->params.filter2_wr_support = 0;
3937 /* get capabilites */
3938 bzero(&caps, sizeof(caps));
3939 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3940 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3941 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3942 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3944 device_printf(sc->dev,
3945 "failed to get card capabilities: %d.\n", rc);
3949 #define READ_CAPS(x) do { \
3950 sc->x = htobe16(caps.x); \
3953 READ_CAPS(linkcaps);
3954 READ_CAPS(switchcaps);
3957 READ_CAPS(rdmacaps);
3958 READ_CAPS(cryptocaps);
3959 READ_CAPS(iscsicaps);
3960 READ_CAPS(fcoecaps);
3962 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
3963 MPASS(chip_id(sc) > CHELSIO_T4);
3964 MPASS(sc->toecaps == 0);
3967 param[0] = FW_PARAM_DEV(NTID);
3968 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3970 device_printf(sc->dev,
3971 "failed to query HASHFILTER parameters: %d.\n", rc);
3974 sc->tids.ntids = val[0];
3975 if (sc->params.fw_vers <
3976 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
3977 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
3978 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
3979 sc->tids.ntids -= sc->tids.nhpftids;
3981 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3982 sc->params.hash_filter = 1;
3984 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3985 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3986 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3987 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3988 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3990 device_printf(sc->dev,
3991 "failed to query NIC parameters: %d.\n", rc);
3994 if ((int)val[1] > (int)val[0]) {
3995 sc->tids.etid_base = val[0];
3996 sc->tids.etid_end = val[1];
3997 sc->tids.netids = val[1] - val[0] + 1;
3998 sc->params.eo_wr_cred = val[2];
3999 sc->params.ethoffload = 1;
4003 /* query offload-related parameters */
4004 param[0] = FW_PARAM_DEV(NTID);
4005 param[1] = FW_PARAM_PFVF(SERVER_START);
4006 param[2] = FW_PARAM_PFVF(SERVER_END);
4007 param[3] = FW_PARAM_PFVF(TDDP_START);
4008 param[4] = FW_PARAM_PFVF(TDDP_END);
4009 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4010 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4012 device_printf(sc->dev,
4013 "failed to query TOE parameters: %d.\n", rc);
4016 sc->tids.ntids = val[0];
4017 if (sc->params.fw_vers <
4018 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
4019 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
4020 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4021 sc->tids.ntids -= sc->tids.nhpftids;
4023 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4024 if ((int)val[2] > (int)val[1]) {
4025 sc->tids.stid_base = val[1];
4026 sc->tids.nstids = val[2] - val[1] + 1;
4028 sc->vres.ddp.start = val[3];
4029 sc->vres.ddp.size = val[4] - val[3] + 1;
4030 sc->params.ofldq_wr_cred = val[5];
4031 sc->params.offload = 1;
4034 * The firmware attempts memfree TOE configuration for -SO cards
4035 * and will report toecaps=0 if it runs out of resources (this
4036 * depends on the config file). It may not report 0 for other
4037 * capabilities dependent on the TOE in this case. Set them to
4038 * 0 here so that the driver doesn't bother tracking resources
4039 * that will never be used.
4045 param[0] = FW_PARAM_PFVF(STAG_START);
4046 param[1] = FW_PARAM_PFVF(STAG_END);
4047 param[2] = FW_PARAM_PFVF(RQ_START);
4048 param[3] = FW_PARAM_PFVF(RQ_END);
4049 param[4] = FW_PARAM_PFVF(PBL_START);
4050 param[5] = FW_PARAM_PFVF(PBL_END);
4051 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4053 device_printf(sc->dev,
4054 "failed to query RDMA parameters(1): %d.\n", rc);
4057 sc->vres.stag.start = val[0];
4058 sc->vres.stag.size = val[1] - val[0] + 1;
4059 sc->vres.rq.start = val[2];
4060 sc->vres.rq.size = val[3] - val[2] + 1;
4061 sc->vres.pbl.start = val[4];
4062 sc->vres.pbl.size = val[5] - val[4] + 1;
4064 param[0] = FW_PARAM_PFVF(SQRQ_START);
4065 param[1] = FW_PARAM_PFVF(SQRQ_END);
4066 param[2] = FW_PARAM_PFVF(CQ_START);
4067 param[3] = FW_PARAM_PFVF(CQ_END);
4068 param[4] = FW_PARAM_PFVF(OCQ_START);
4069 param[5] = FW_PARAM_PFVF(OCQ_END);
4070 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4072 device_printf(sc->dev,
4073 "failed to query RDMA parameters(2): %d.\n", rc);
4076 sc->vres.qp.start = val[0];
4077 sc->vres.qp.size = val[1] - val[0] + 1;
4078 sc->vres.cq.start = val[2];
4079 sc->vres.cq.size = val[3] - val[2] + 1;
4080 sc->vres.ocq.start = val[4];
4081 sc->vres.ocq.size = val[5] - val[4] + 1;
4083 param[0] = FW_PARAM_PFVF(SRQ_START);
4084 param[1] = FW_PARAM_PFVF(SRQ_END);
4085 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4086 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4087 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4089 device_printf(sc->dev,
4090 "failed to query RDMA parameters(3): %d.\n", rc);
4093 sc->vres.srq.start = val[0];
4094 sc->vres.srq.size = val[1] - val[0] + 1;
4095 sc->params.max_ordird_qp = val[2];
4096 sc->params.max_ird_adapter = val[3];
4098 if (sc->iscsicaps) {
4099 param[0] = FW_PARAM_PFVF(ISCSI_START);
4100 param[1] = FW_PARAM_PFVF(ISCSI_END);
4101 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4103 device_printf(sc->dev,
4104 "failed to query iSCSI parameters: %d.\n", rc);
4107 sc->vres.iscsi.start = val[0];
4108 sc->vres.iscsi.size = val[1] - val[0] + 1;
4110 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4111 param[0] = FW_PARAM_PFVF(TLS_START);
4112 param[1] = FW_PARAM_PFVF(TLS_END);
4113 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4115 device_printf(sc->dev,
4116 "failed to query TLS parameters: %d.\n", rc);
4119 sc->vres.key.start = val[0];
4120 sc->vres.key.size = val[1] - val[0] + 1;
4123 t4_init_sge_params(sc);
4126 * We've got the params we wanted to query via the firmware. Now grab
4127 * some others directly from the chip.
4129 rc = t4_read_chip_settings(sc);
4135 set_params__post_init(struct adapter *sc)
4137 uint32_t param, val;
4142 /* ask for encapsulated CPLs */
4143 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4145 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4147 /* Enable 32b port caps if the firmware supports it. */
4148 param = FW_PARAM_PFVF(PORT_CAPS32);
4150 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0)
4151 sc->params.port_caps32 = 1;
4155 * Override the TOE timers with user provided tunables. This is not the
4156 * recommended way to change the timers (the firmware config file is) so
4157 * these tunables are not documented.
4159 * All the timer tunables are in microseconds.
4161 if (t4_toe_keepalive_idle != 0) {
4162 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4163 v &= M_KEEPALIVEIDLE;
4164 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4165 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4167 if (t4_toe_keepalive_interval != 0) {
4168 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4169 v &= M_KEEPALIVEINTVL;
4170 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4171 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4173 if (t4_toe_keepalive_count != 0) {
4174 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4175 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4176 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4177 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4178 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4180 if (t4_toe_rexmt_min != 0) {
4181 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4183 t4_set_reg_field(sc, A_TP_RXT_MIN,
4184 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4186 if (t4_toe_rexmt_max != 0) {
4187 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4189 t4_set_reg_field(sc, A_TP_RXT_MAX,
4190 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4192 if (t4_toe_rexmt_count != 0) {
4193 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4194 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4195 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4196 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4197 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4199 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4200 if (t4_toe_rexmt_backoff[i] != -1) {
4201 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4202 shift = (i & 3) << 3;
4203 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4204 M_TIMERBACKOFFINDEX0 << shift, v << shift);
4211 #undef FW_PARAM_PFVF
4215 t4_set_desc(struct adapter *sc)
4218 struct adapter_params *p = &sc->params;
4220 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4222 device_set_desc_copy(sc->dev, buf);
4226 ifmedia_add4(struct ifmedia *ifm, int m)
4229 ifmedia_add(ifm, m, 0, NULL);
4230 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4231 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4232 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4236 * This is the selected media, which is not quite the same as the active media.
4237 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
4238 * and active are not the same, and "media: Ethernet selected" otherwise.
4241 set_current_media(struct port_info *pi)
4243 struct link_config *lc;
4244 struct ifmedia *ifm;
4248 PORT_LOCK_ASSERT_OWNED(pi);
4250 /* Leave current media alone if it's already set to IFM_NONE. */
4252 if (ifm->ifm_cur != NULL &&
4253 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4257 if (lc->requested_aneg != AUTONEG_DISABLE &&
4258 lc->supported & FW_PORT_CAP32_ANEG) {
4259 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4262 mword = IFM_ETHER | IFM_FDX;
4263 if (lc->requested_fc & PAUSE_TX)
4264 mword |= IFM_ETH_TXPAUSE;
4265 if (lc->requested_fc & PAUSE_RX)
4266 mword |= IFM_ETH_RXPAUSE;
4267 if (lc->requested_speed == 0)
4268 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
4270 speed = lc->requested_speed;
4271 mword |= port_mword(pi, speed_to_fwcap(speed));
4272 ifmedia_set(ifm, mword);
4276 * Returns true if the ifmedia list for the port cannot change.
4279 fixed_ifmedia(struct port_info *pi)
4282 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
4283 pi->port_type == FW_PORT_TYPE_BT_XFI ||
4284 pi->port_type == FW_PORT_TYPE_BT_XAUI ||
4285 pi->port_type == FW_PORT_TYPE_KX4 ||
4286 pi->port_type == FW_PORT_TYPE_KX ||
4287 pi->port_type == FW_PORT_TYPE_KR ||
4288 pi->port_type == FW_PORT_TYPE_BP_AP ||
4289 pi->port_type == FW_PORT_TYPE_BP4_AP ||
4290 pi->port_type == FW_PORT_TYPE_BP40_BA ||
4291 pi->port_type == FW_PORT_TYPE_KR4_100G ||
4292 pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
4293 pi->port_type == FW_PORT_TYPE_KR_XLAUI);
4297 build_medialist(struct port_info *pi)
4300 int unknown, mword, bit;
4301 struct link_config *lc;
4302 struct ifmedia *ifm;
4304 PORT_LOCK_ASSERT_OWNED(pi);
4306 if (pi->flags & FIXED_IFMEDIA)
4310 * Rebuild the ifmedia list.
4313 ifmedia_removeall(ifm);
4315 ss = G_FW_PORT_CAP32_SPEED(lc->supported); /* Supported Speeds */
4316 if (__predict_false(ss == 0)) { /* not supposed to happen. */
4319 MPASS(LIST_EMPTY(&ifm->ifm_list));
4320 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4321 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4326 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
4328 MPASS(speed & M_FW_PORT_CAP32_SPEED);
4330 mword = port_mword(pi, speed);
4331 if (mword == IFM_NONE) {
4333 } else if (mword == IFM_UNKNOWN)
4336 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4339 if (unknown > 0) /* Add one unknown for all unknown media types. */
4340 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4341 if (lc->supported & FW_PORT_CAP32_ANEG)
4342 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4344 set_current_media(pi);
4348 * Initialize the requested fields in the link config based on driver tunables.
4351 init_link_config(struct port_info *pi)
4353 struct link_config *lc = &pi->link_cfg;
4355 PORT_LOCK_ASSERT_OWNED(pi);
4357 lc->requested_speed = 0;
4359 if (t4_autoneg == 0)
4360 lc->requested_aneg = AUTONEG_DISABLE;
4361 else if (t4_autoneg == 1)
4362 lc->requested_aneg = AUTONEG_ENABLE;
4364 lc->requested_aneg = AUTONEG_AUTO;
4366 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
4369 if (t4_fec == -1 || t4_fec & FEC_AUTO)
4370 lc->requested_fec = FEC_AUTO;
4372 lc->requested_fec = FEC_NONE;
4373 if (t4_fec & FEC_RS)
4374 lc->requested_fec |= FEC_RS;
4375 if (t4_fec & FEC_BASER_RS)
4376 lc->requested_fec |= FEC_BASER_RS;
4381 * Makes sure that all requested settings comply with what's supported by the
4382 * port. Returns the number of settings that were invalid and had to be fixed.
4385 fixup_link_config(struct port_info *pi)
4388 struct link_config *lc = &pi->link_cfg;
4391 PORT_LOCK_ASSERT_OWNED(pi);
4393 /* Speed (when not autonegotiating) */
4394 if (lc->requested_speed != 0) {
4395 fwspeed = speed_to_fwcap(lc->requested_speed);
4396 if ((fwspeed & lc->supported) == 0) {
4398 lc->requested_speed = 0;
4402 /* Link autonegotiation */
4403 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
4404 lc->requested_aneg == AUTONEG_DISABLE ||
4405 lc->requested_aneg == AUTONEG_AUTO);
4406 if (lc->requested_aneg == AUTONEG_ENABLE &&
4407 !(lc->supported & FW_PORT_CAP32_ANEG)) {
4409 lc->requested_aneg = AUTONEG_AUTO;
4413 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
4414 if (lc->requested_fc & PAUSE_TX &&
4415 !(lc->supported & FW_PORT_CAP32_FC_TX)) {
4417 lc->requested_fc &= ~PAUSE_TX;
4419 if (lc->requested_fc & PAUSE_RX &&
4420 !(lc->supported & FW_PORT_CAP32_FC_RX)) {
4422 lc->requested_fc &= ~PAUSE_RX;
4424 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
4425 !(lc->supported & FW_PORT_CAP32_FORCE_PAUSE)) {
4427 lc->requested_fc |= PAUSE_AUTONEG;
4431 if ((lc->requested_fec & FEC_RS &&
4432 !(lc->supported & FW_PORT_CAP32_FEC_RS)) ||
4433 (lc->requested_fec & FEC_BASER_RS &&
4434 !(lc->supported & FW_PORT_CAP32_FEC_BASER_RS))) {
4436 lc->requested_fec = FEC_AUTO;
4443 * Apply the requested L1 settings, which are expected to be valid, to the
4447 apply_link_config(struct port_info *pi)
4449 struct adapter *sc = pi->adapter;
4450 struct link_config *lc = &pi->link_cfg;
4454 ASSERT_SYNCHRONIZED_OP(sc);
4455 PORT_LOCK_ASSERT_OWNED(pi);
4457 if (lc->requested_aneg == AUTONEG_ENABLE)
4458 MPASS(lc->supported & FW_PORT_CAP32_ANEG);
4459 if (!(lc->requested_fc & PAUSE_AUTONEG))
4460 MPASS(lc->supported & FW_PORT_CAP32_FORCE_PAUSE);
4461 if (lc->requested_fc & PAUSE_TX)
4462 MPASS(lc->supported & FW_PORT_CAP32_FC_TX);
4463 if (lc->requested_fc & PAUSE_RX)
4464 MPASS(lc->supported & FW_PORT_CAP32_FC_RX);
4465 if (lc->requested_fec & FEC_RS)
4466 MPASS(lc->supported & FW_PORT_CAP32_FEC_RS);
4467 if (lc->requested_fec & FEC_BASER_RS)
4468 MPASS(lc->supported & FW_PORT_CAP32_FEC_BASER_RS);
4470 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4472 /* Don't complain if the VF driver gets back an EPERM. */
4473 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
4474 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4477 * An L1_CFG will almost always result in a link-change event if
4478 * the link is up, and the driver will refresh the actual
4479 * fec/fc/etc. when the notification is processed. If the link
4480 * is down then the actual settings are meaningless.
4482 * This takes care of the case where a change in the L1 settings
4483 * may not result in a notification.
4485 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
4486 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
4491 #define FW_MAC_EXACT_CHUNK 7
4494 * Program the port's XGMAC based on parameters in ifnet. The caller also
4495 * indicates which parameters should be programmed (the rest are left alone).
4498 update_mac_settings(struct ifnet *ifp, int flags)
4501 struct vi_info *vi = ifp->if_softc;
4502 struct port_info *pi = vi->pi;
4503 struct adapter *sc = pi->adapter;
4504 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4506 ASSERT_SYNCHRONIZED_OP(sc);
4507 KASSERT(flags, ("%s: not told what to update.", __func__));
4509 if (flags & XGMAC_MTU)
4512 if (flags & XGMAC_PROMISC)
4513 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4515 if (flags & XGMAC_ALLMULTI)
4516 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4518 if (flags & XGMAC_VLANEX)
4519 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4521 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4522 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4523 allmulti, 1, vlanex, false);
4525 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4531 if (flags & XGMAC_UCADDR) {
4532 uint8_t ucaddr[ETHER_ADDR_LEN];
4534 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4535 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4536 ucaddr, true, true);
4539 if_printf(ifp, "change_mac failed: %d\n", rc);
4542 vi->xact_addr_filt = rc;
4547 if (flags & XGMAC_MCADDRS) {
4548 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4551 struct ifmultiaddr *ifma;
4554 if_maddr_rlock(ifp);
4555 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4556 if (ifma->ifma_addr->sa_family != AF_LINK)
4559 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4560 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4563 if (i == FW_MAC_EXACT_CHUNK) {
4564 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4565 del, i, mcaddr, NULL, &hash, 0);
4568 for (j = 0; j < i; j++) {
4570 "failed to add mc address"
4572 "%02x:%02x:%02x rc=%d\n",
4573 mcaddr[j][0], mcaddr[j][1],
4574 mcaddr[j][2], mcaddr[j][3],
4575 mcaddr[j][4], mcaddr[j][5],
4585 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4586 mcaddr, NULL, &hash, 0);
4589 for (j = 0; j < i; j++) {
4591 "failed to add mc address"
4593 "%02x:%02x:%02x rc=%d\n",
4594 mcaddr[j][0], mcaddr[j][1],
4595 mcaddr[j][2], mcaddr[j][3],
4596 mcaddr[j][4], mcaddr[j][5],
4603 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4605 if_printf(ifp, "failed to set mc address hash: %d", rc);
4607 if_maddr_runlock(ifp);
4614 * {begin|end}_synchronized_op must be called from the same thread.
4617 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4623 /* the caller thinks it's ok to sleep, but is it really? */
4624 if (flags & SLEEP_OK)
4625 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4626 "begin_synchronized_op");
4637 if (vi && IS_DOOMED(vi)) {
4647 if (!(flags & SLEEP_OK)) {
4652 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4658 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4661 sc->last_op = wmesg;
4662 sc->last_op_thr = curthread;
4663 sc->last_op_flags = flags;
4667 if (!(flags & HOLD_LOCK) || rc)
4674 * Tell if_ioctl and if_init that the VI is going away. This is
4675 * special variant of begin_synchronized_op and must be paired with a
4676 * call to end_synchronized_op.
4679 doom_vi(struct adapter *sc, struct vi_info *vi)
4686 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4689 sc->last_op = "t4detach";
4690 sc->last_op_thr = curthread;
4691 sc->last_op_flags = 0;
4697 * {begin|end}_synchronized_op must be called from the same thread.
4700 end_synchronized_op(struct adapter *sc, int flags)
4703 if (flags & LOCK_HELD)
4704 ADAPTER_LOCK_ASSERT_OWNED(sc);
4708 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
4715 cxgbe_init_synchronized(struct vi_info *vi)
4717 struct port_info *pi = vi->pi;
4718 struct adapter *sc = pi->adapter;
4719 struct ifnet *ifp = vi->ifp;
4721 struct sge_txq *txq;
4723 ASSERT_SYNCHRONIZED_OP(sc);
4725 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4726 return (0); /* already running */
4728 if (!(sc->flags & FULL_INIT_DONE) &&
4729 ((rc = adapter_full_init(sc)) != 0))
4730 return (rc); /* error message displayed already */
4732 if (!(vi->flags & VI_INIT_DONE) &&
4733 ((rc = vi_full_init(vi)) != 0))
4734 return (rc); /* error message displayed already */
4736 rc = update_mac_settings(ifp, XGMAC_ALL);
4738 goto done; /* error message displayed already */
4741 if (pi->up_vis == 0) {
4742 t4_update_port_info(pi);
4743 fixup_link_config(pi);
4744 build_medialist(pi);
4745 apply_link_config(pi);
4748 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4750 if_printf(ifp, "enable_vi failed: %d\n", rc);
4756 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
4760 for_each_txq(vi, i, txq) {
4762 txq->eq.flags |= EQ_ENABLED;
4767 * The first iq of the first port to come up is used for tracing.
4769 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4770 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4771 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
4772 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4773 V_QUEUENUMBER(sc->traceq));
4774 pi->flags |= HAS_TRACEQ;
4779 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4781 if (pi->nvi > 1 || sc->flags & IS_VF)
4782 callout_reset(&vi->tick, hz, vi_tick, vi);
4784 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4788 cxgbe_uninit_synchronized(vi);
4797 cxgbe_uninit_synchronized(struct vi_info *vi)
4799 struct port_info *pi = vi->pi;
4800 struct adapter *sc = pi->adapter;
4801 struct ifnet *ifp = vi->ifp;
4803 struct sge_txq *txq;
4805 ASSERT_SYNCHRONIZED_OP(sc);
4807 if (!(vi->flags & VI_INIT_DONE)) {
4808 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4809 KASSERT(0, ("uninited VI is running"));
4810 if_printf(ifp, "uninited VI with running ifnet. "
4811 "vi->flags 0x%016lx, if_flags 0x%08x, "
4812 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
4819 * Disable the VI so that all its data in either direction is discarded
4820 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
4821 * tick) intact as the TP can deliver negative advice or data that it's
4822 * holding in its RAM (for an offloaded connection) even after the VI is
4825 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4827 if_printf(ifp, "disable_vi failed: %d\n", rc);
4831 for_each_txq(vi, i, txq) {
4833 txq->eq.flags &= ~EQ_ENABLED;
4838 if (pi->nvi > 1 || sc->flags & IS_VF)
4839 callout_stop(&vi->tick);
4841 callout_stop(&pi->tick);
4842 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4846 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4848 if (pi->up_vis > 0) {
4853 pi->link_cfg.link_ok = false;
4854 pi->link_cfg.speed = 0;
4855 pi->link_cfg.link_down_rc = 255;
4856 t4_os_link_changed(pi);
4863 * It is ok for this function to fail midway and return right away. t4_detach
4864 * will walk the entire sc->irq list and clean up whatever is valid.
4867 t4_setup_intr_handlers(struct adapter *sc)
4869 int rc, rid, p, q, v;
4872 struct port_info *pi;
4874 struct sge *sge = &sc->sge;
4875 struct sge_rxq *rxq;
4877 struct sge_ofld_rxq *ofld_rxq;
4880 struct sge_nm_rxq *nm_rxq;
4883 int nbuckets = rss_getnumbuckets();
4890 rid = sc->intr_type == INTR_INTX ? 0 : 1;
4891 if (forwarding_intr_to_fwq(sc))
4892 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4894 /* Multiple interrupts. */
4895 if (sc->flags & IS_VF)
4896 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4897 ("%s: too few intr.", __func__));
4899 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4900 ("%s: too few intr.", __func__));
4902 /* The first one is always error intr on PFs */
4903 if (!(sc->flags & IS_VF)) {
4904 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4911 /* The second one is always the firmware event queue (first on VFs) */
4912 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4918 for_each_port(sc, p) {
4920 for_each_vi(pi, v, vi) {
4921 vi->first_intr = rid - 1;
4923 if (vi->nnmrxq > 0) {
4924 int n = max(vi->nrxq, vi->nnmrxq);
4926 rxq = &sge->rxq[vi->first_rxq];
4928 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4930 for (q = 0; q < n; q++) {
4931 snprintf(s, sizeof(s), "%x%c%x", p,
4937 irq->nm_rxq = nm_rxq++;
4939 if (irq->nm_rxq != NULL &&
4941 /* Netmap rx only */
4942 rc = t4_alloc_irq(sc, irq, rid,
4943 t4_nm_intr, irq->nm_rxq, s);
4945 if (irq->nm_rxq != NULL &&
4947 /* NIC and Netmap rx */
4948 rc = t4_alloc_irq(sc, irq, rid,
4949 t4_vi_intr, irq, s);
4952 if (irq->rxq != NULL &&
4953 irq->nm_rxq == NULL) {
4955 rc = t4_alloc_irq(sc, irq, rid,
4956 t4_intr, irq->rxq, s);
4962 bus_bind_intr(sc->dev, irq->res,
4963 rss_getcpu(q % nbuckets));
4971 for_each_rxq(vi, q, rxq) {
4972 snprintf(s, sizeof(s), "%x%c%x", p,
4974 rc = t4_alloc_irq(sc, irq, rid,
4979 bus_bind_intr(sc->dev, irq->res,
4980 rss_getcpu(q % nbuckets));
4988 for_each_ofld_rxq(vi, q, ofld_rxq) {
4989 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
4990 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
5001 MPASS(irq == &sc->irq[sc->intr_count]);
5007 adapter_full_init(struct adapter *sc)
5011 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5012 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5015 ASSERT_SYNCHRONIZED_OP(sc);
5016 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5017 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
5018 ("%s: FULL_INIT_DONE already", __func__));
5021 * queues that belong to the adapter (not any particular port).
5023 rc = t4_setup_adapter_queues(sc);
5027 for (i = 0; i < nitems(sc->tq); i++) {
5028 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
5029 taskqueue_thread_enqueue, &sc->tq[i]);
5030 if (sc->tq[i] == NULL) {
5031 device_printf(sc->dev,
5032 "failed to allocate task queue %d\n", i);
5036 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
5037 device_get_nameunit(sc->dev), i);
5040 MPASS(RSS_KEYSIZE == 40);
5041 rss_getkey((void *)&raw_rss_key[0]);
5042 for (i = 0; i < nitems(rss_key); i++) {
5043 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
5045 t4_write_rss_key(sc, &rss_key[0], -1, 1);
5048 if (!(sc->flags & IS_VF))
5050 sc->flags |= FULL_INIT_DONE;
5053 adapter_full_uninit(sc);
5059 adapter_full_uninit(struct adapter *sc)
5063 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5065 t4_teardown_adapter_queues(sc);
5067 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
5068 taskqueue_free(sc->tq[i]);
5072 sc->flags &= ~FULL_INIT_DONE;
5078 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
5079 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
5080 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
5081 RSS_HASHTYPE_RSS_UDP_IPV6)
5083 /* Translates kernel hash types to hardware. */
5085 hashconfig_to_hashen(int hashconfig)
5089 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
5090 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
5091 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
5092 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
5093 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
5094 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5095 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5097 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
5098 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5099 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5101 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
5102 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5103 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
5104 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5109 /* Translates hardware hash types to kernel. */
5111 hashen_to_hashconfig(int hashen)
5115 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
5117 * If UDP hashing was enabled it must have been enabled for
5118 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
5119 * enabling any 4-tuple hash is nonsense configuration.
5121 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5122 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
5124 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5125 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
5126 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5127 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
5129 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5130 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
5131 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5132 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
5133 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
5134 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
5135 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
5136 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
5138 return (hashconfig);
5143 vi_full_init(struct vi_info *vi)
5145 struct adapter *sc = vi->pi->adapter;
5146 struct ifnet *ifp = vi->ifp;
5148 struct sge_rxq *rxq;
5149 int rc, i, j, hashen;
5151 int nbuckets = rss_getnumbuckets();
5152 int hashconfig = rss_gethashconfig();
5156 ASSERT_SYNCHRONIZED_OP(sc);
5157 KASSERT((vi->flags & VI_INIT_DONE) == 0,
5158 ("%s: VI_INIT_DONE already", __func__));
5160 sysctl_ctx_init(&vi->ctx);
5161 vi->flags |= VI_SYSCTL_CTX;
5164 * Allocate tx/rx/fl queues for this VI.
5166 rc = t4_setup_vi_queues(vi);
5168 goto done; /* error message displayed already */
5171 * Setup RSS for this VI. Save a copy of the RSS table for later use.
5173 if (vi->nrxq > vi->rss_size) {
5174 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
5175 "some queues will never receive traffic.\n", vi->nrxq,
5177 } else if (vi->rss_size % vi->nrxq) {
5178 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
5179 "expect uneven traffic distribution.\n", vi->nrxq,
5183 if (vi->nrxq != nbuckets) {
5184 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5185 "performance will be impacted.\n", vi->nrxq, nbuckets);
5188 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5189 for (i = 0; i < vi->rss_size;) {
5191 j = rss_get_indirection_to_bucket(i);
5193 rxq = &sc->sge.rxq[vi->first_rxq + j];
5194 rss[i++] = rxq->iq.abs_id;
5196 for_each_rxq(vi, j, rxq) {
5197 rss[i++] = rxq->iq.abs_id;
5198 if (i == vi->rss_size)
5204 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5208 if_printf(ifp, "rss_config failed: %d\n", rc);
5213 hashen = hashconfig_to_hashen(hashconfig);
5216 * We may have had to enable some hashes even though the global config
5217 * wants them disabled. This is a potential problem that must be
5218 * reported to the user.
5220 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
5223 * If we consider only the supported hash types, then the enabled hashes
5224 * are a superset of the requested hashes. In other words, there cannot
5225 * be any supported hash that was requested but not enabled, but there
5226 * can be hashes that were not requested but had to be enabled.
5228 extra &= SUPPORTED_RSS_HASHTYPES;
5229 MPASS((extra & hashconfig) == 0);
5233 "global RSS config (0x%x) cannot be accommodated.\n",
5236 if (extra & RSS_HASHTYPE_RSS_IPV4)
5237 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5238 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5239 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5240 if (extra & RSS_HASHTYPE_RSS_IPV6)
5241 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5242 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5243 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5244 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5245 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5246 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5247 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5249 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5250 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5251 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5252 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5254 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
5257 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5262 vi->flags |= VI_INIT_DONE;
5274 vi_full_uninit(struct vi_info *vi)
5276 struct port_info *pi = vi->pi;
5277 struct adapter *sc = pi->adapter;
5279 struct sge_rxq *rxq;
5280 struct sge_txq *txq;
5282 struct sge_ofld_rxq *ofld_rxq;
5283 struct sge_wrq *ofld_txq;
5286 if (vi->flags & VI_INIT_DONE) {
5288 /* Need to quiesce queues. */
5290 /* XXX: Only for the first VI? */
5291 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5292 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5294 for_each_txq(vi, i, txq) {
5295 quiesce_txq(sc, txq);
5299 for_each_ofld_txq(vi, i, ofld_txq) {
5300 quiesce_wrq(sc, ofld_txq);
5304 for_each_rxq(vi, i, rxq) {
5305 quiesce_iq(sc, &rxq->iq);
5306 quiesce_fl(sc, &rxq->fl);
5310 for_each_ofld_rxq(vi, i, ofld_rxq) {
5311 quiesce_iq(sc, &ofld_rxq->iq);
5312 quiesce_fl(sc, &ofld_rxq->fl);
5315 free(vi->rss, M_CXGBE);
5316 free(vi->nm_rss, M_CXGBE);
5319 t4_teardown_vi_queues(vi);
5320 vi->flags &= ~VI_INIT_DONE;
5326 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5328 struct sge_eq *eq = &txq->eq;
5329 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5331 (void) sc; /* unused */
5335 MPASS((eq->flags & EQ_ENABLED) == 0);
5339 /* Wait for the mp_ring to empty. */
5340 while (!mp_ring_is_idle(txq->r)) {
5341 mp_ring_check_drainage(txq->r, 0);
5342 pause("rquiesce", 1);
5345 /* Then wait for the hardware to finish. */
5346 while (spg->cidx != htobe16(eq->pidx))
5347 pause("equiesce", 1);
5349 /* Finally, wait for the driver to reclaim all descriptors. */
5350 while (eq->cidx != eq->pidx)
5351 pause("dquiesce", 1);
5355 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5362 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5364 (void) sc; /* unused */
5366 /* Synchronize with the interrupt handler */
5367 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5372 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
5374 mtx_lock(&sc->sfl_lock);
5376 fl->flags |= FL_DOOMED;
5378 callout_stop(&sc->sfl_callout);
5379 mtx_unlock(&sc->sfl_lock);
5381 KASSERT((fl->flags & FL_STARVING) == 0,
5382 ("%s: still starving", __func__));
5386 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
5387 driver_intr_t *handler, void *arg, char *name)
5392 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
5393 RF_SHAREABLE | RF_ACTIVE);
5394 if (irq->res == NULL) {
5395 device_printf(sc->dev,
5396 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
5400 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
5401 NULL, handler, arg, &irq->tag);
5403 device_printf(sc->dev,
5404 "failed to setup interrupt for rid %d, name %s: %d\n",
5407 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
5413 t4_free_irq(struct adapter *sc, struct irq *irq)
5416 bus_teardown_intr(sc->dev, irq->res, irq->tag);
5418 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
5420 bzero(irq, sizeof(*irq));
5426 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
5429 regs->version = chip_id(sc) | chip_rev(sc) << 10;
5430 t4_get_regs(sc, buf, regs->len);
5433 #define A_PL_INDIR_CMD 0x1f8
5435 #define S_PL_AUTOINC 31
5436 #define M_PL_AUTOINC 0x1U
5437 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
5438 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
5440 #define S_PL_VFID 20
5441 #define M_PL_VFID 0xffU
5442 #define V_PL_VFID(x) ((x) << S_PL_VFID)
5443 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
5446 #define M_PL_ADDR 0xfffffU
5447 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
5448 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
5450 #define A_PL_INDIR_DATA 0x1fc
5453 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
5457 mtx_assert(&sc->reg_lock, MA_OWNED);
5458 if (sc->flags & IS_VF) {
5459 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5460 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5462 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5463 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5464 V_PL_ADDR(VF_MPS_REG(reg)));
5465 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5466 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5468 return (((uint64_t)stats[1]) << 32 | stats[0]);
5472 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
5473 struct fw_vi_stats_vf *stats)
5476 #define GET_STAT(name) \
5477 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
5479 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
5480 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
5481 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
5482 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
5483 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
5484 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
5485 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
5486 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
5487 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5488 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
5489 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
5490 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
5491 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
5492 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
5493 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
5494 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
5500 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
5504 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5505 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5506 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5507 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5508 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5509 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5513 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5516 const struct timeval interval = {0, 250000}; /* 250ms */
5518 if (!(vi->flags & VI_INIT_DONE))
5522 timevalsub(&tv, &interval);
5523 if (timevalcmp(&tv, &vi->last_refreshed, <))
5526 mtx_lock(&sc->reg_lock);
5527 t4_get_vi_stats(sc, vi->viid, &vi->stats);
5528 getmicrotime(&vi->last_refreshed);
5529 mtx_unlock(&sc->reg_lock);
5533 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5535 u_int i, v, tnl_cong_drops, bg_map;
5537 const struct timeval interval = {0, 250000}; /* 250ms */
5540 timevalsub(&tv, &interval);
5541 if (timevalcmp(&tv, &pi->last_refreshed, <))
5545 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5546 bg_map = pi->mps_bg_map;
5548 i = ffs(bg_map) - 1;
5549 mtx_lock(&sc->reg_lock);
5550 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5551 A_TP_MIB_TNL_CNG_DROP_0 + i);
5552 mtx_unlock(&sc->reg_lock);
5553 tnl_cong_drops += v;
5554 bg_map &= ~(1 << i);
5556 pi->tnl_cong_drops = tnl_cong_drops;
5557 getmicrotime(&pi->last_refreshed);
5561 cxgbe_tick(void *arg)
5563 struct port_info *pi = arg;
5564 struct adapter *sc = pi->adapter;
5566 PORT_LOCK_ASSERT_OWNED(pi);
5567 cxgbe_refresh_stats(sc, pi);
5569 callout_schedule(&pi->tick, hz);
5575 struct vi_info *vi = arg;
5576 struct adapter *sc = vi->pi->adapter;
5578 vi_refresh_stats(sc, vi);
5580 callout_schedule(&vi->tick, hz);
5584 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5586 static char *caps_decoder[] = {
5587 "\20\001IPMI\002NCSI", /* 0: NBM */
5588 "\20\001PPP\002QFC\003DCBX", /* 1: link */
5589 "\20\001INGRESS\002EGRESS", /* 2: switch */
5590 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
5591 "\006HASHFILTER\007ETHOFLD",
5592 "\20\001TOE", /* 4: TOE */
5593 "\20\001RDDP\002RDMAC", /* 5: RDMA */
5594 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
5595 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5596 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5598 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5599 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
5600 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
5601 "\004PO_INITIATOR\005PO_TARGET",
5605 t4_sysctls(struct adapter *sc)
5607 struct sysctl_ctx_list *ctx;
5608 struct sysctl_oid *oid;
5609 struct sysctl_oid_list *children, *c0;
5610 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5612 ctx = device_get_sysctl_ctx(sc->dev);
5617 oid = device_get_sysctl_tree(sc->dev);
5618 c0 = children = SYSCTL_CHILDREN(oid);
5620 sc->sc_do_rxcopy = 1;
5621 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5622 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5624 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5625 sc->params.nports, "# of ports");
5627 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5628 CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells,
5629 sysctl_bitfield_8b, "A", "available doorbells");
5631 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5632 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5635 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5636 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5637 "interrupt holdoff timer values (us)");
5639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5640 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5641 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5642 "interrupt holdoff packet counter values");
5644 t4_sge_sysctls(sc, ctx, children);
5646 sc->lro_timeout = 100;
5647 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5648 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5650 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5651 &sc->debug_flags, 0, "flags to enable runtime debugging");
5653 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5654 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5656 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5657 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5659 if (sc->flags & IS_VF)
5662 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5663 NULL, chip_rev(sc), "chip hardware revision");
5665 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5666 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5668 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5669 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5671 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5672 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5674 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
5675 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
5677 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5678 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5680 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5681 sc->er_version, 0, "expansion ROM version");
5683 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5684 sc->bs_version, 0, "bootstrap firmware version");
5686 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5687 NULL, sc->params.scfg_vers, "serial config version");
5689 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5690 NULL, sc->params.vpd_vers, "VPD version");
5692 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5693 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5695 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5696 sc->cfcsum, "config file checksum");
5698 #define SYSCTL_CAP(name, n, text) \
5699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5700 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \
5701 sysctl_bitfield_16b, "A", "available " text " capabilities")
5703 SYSCTL_CAP(nbmcaps, 0, "NBM");
5704 SYSCTL_CAP(linkcaps, 1, "link");
5705 SYSCTL_CAP(switchcaps, 2, "switch");
5706 SYSCTL_CAP(niccaps, 3, "NIC");
5707 SYSCTL_CAP(toecaps, 4, "TCP offload");
5708 SYSCTL_CAP(rdmacaps, 5, "RDMA");
5709 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5710 SYSCTL_CAP(cryptocaps, 7, "crypto");
5711 SYSCTL_CAP(fcoecaps, 8, "FCoE");
5714 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5715 NULL, sc->tids.nftids, "number of filters");
5717 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5718 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5719 "chip temperature (in Celsius)");
5721 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING |
5722 CTLFLAG_RD, sc, 0, sysctl_loadavg, "A",
5723 "microprocessor load averages (debug firmwares only)");
5725 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD,
5726 &sc->params.core_vdd, 0, "core Vdd (in mV)");
5728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
5729 CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS,
5730 sysctl_cpus, "A", "local CPUs");
5732 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
5733 CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS,
5734 sysctl_cpus, "A", "preferred CPUs for interrupts");
5737 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
5739 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5740 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5741 "logs and miscellaneous information");
5742 children = SYSCTL_CHILDREN(oid);
5744 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5745 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5746 sysctl_cctrl, "A", "congestion control");
5748 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5749 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5750 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5752 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5753 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5754 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5756 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5757 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5758 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5760 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5761 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5762 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5764 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5765 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5766 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5768 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5769 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5770 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5772 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5773 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5774 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5775 "A", "CIM logic analyzer");
5777 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5778 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5779 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5781 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5782 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5783 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5785 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5786 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5787 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5789 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5790 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5791 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5793 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5794 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5795 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5797 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5798 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5799 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5801 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5802 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5803 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5805 if (chip_id(sc) > CHELSIO_T4) {
5806 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5807 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5808 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5810 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5811 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5812 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5816 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5817 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5820 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5821 sysctl_cim_qcfg, "A", "CIM queue configuration");
5823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5824 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5825 sysctl_cpl_stats, "A", "CPL statistics");
5827 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5828 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5829 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5831 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5832 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5833 sysctl_devlog, "A", "firmware's device log");
5835 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5836 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5837 sysctl_fcoe_stats, "A", "FCoE statistics");
5839 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5840 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5841 sysctl_hw_sched, "A", "hardware scheduler ");
5843 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5844 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5845 sysctl_l2t, "A", "hardware L2 table");
5847 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
5848 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5849 sysctl_smt, "A", "hardware source MAC table");
5851 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5852 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5853 sysctl_lb_stats, "A", "loopback statistics");
5855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5856 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5857 sysctl_meminfo, "A", "memory regions");
5859 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5860 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5861 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5862 "A", "MPS TCAM entries");
5864 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5865 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5866 sysctl_path_mtus, "A", "path MTUs");
5868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5869 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5870 sysctl_pm_stats, "A", "PM statistics");
5872 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5873 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5874 sysctl_rdma_stats, "A", "RDMA statistics");
5876 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5877 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5878 sysctl_tcp_stats, "A", "TCP statistics");
5880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5881 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5882 sysctl_tids, "A", "TID information");
5884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5885 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5886 sysctl_tp_err_stats, "A", "TP error statistics");
5888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5889 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5890 "TP logic analyzer event capture mask");
5892 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5893 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5894 sysctl_tp_la, "A", "TP logic analyzer");
5896 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5897 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5898 sysctl_tx_rate, "A", "Tx rate");
5900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5901 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5902 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5904 if (chip_id(sc) >= CHELSIO_T5) {
5905 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5906 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5907 sysctl_wcwr_stats, "A", "write combined work requests");
5911 if (is_offload(sc)) {
5918 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5919 NULL, "TOE parameters");
5920 children = SYSCTL_CHILDREN(oid);
5922 sc->tt.cong_algorithm = -1;
5923 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
5924 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
5925 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
5928 sc->tt.sndbuf = 256 * 1024;
5929 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5930 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5933 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5934 &sc->tt.ddp, 0, "DDP allowed");
5936 sc->tt.rx_coalesce = 1;
5937 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5938 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5941 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
5942 &sc->tt.tls, 0, "Inline TLS allowed");
5944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
5945 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
5946 "I", "TCP ports that use inline TLS+TOE RX");
5948 sc->tt.tx_align = 1;
5949 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5950 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5952 sc->tt.tx_zcopy = 0;
5953 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5954 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5955 "Enable zero-copy aio_write(2)");
5957 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
5958 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5959 "cop_managed_offloading", CTLFLAG_RW,
5960 &sc->tt.cop_managed_offloading, 0,
5961 "COP (Connection Offload Policy) controls all TOE offload");
5963 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5964 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5965 "TP timer tick (us)");
5967 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5968 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5969 "TCP timestamp tick (us)");
5971 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5972 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5975 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5976 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5977 "IU", "DACK timer (us)");
5979 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5980 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5981 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
5983 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5984 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5985 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
5987 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5988 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5989 sysctl_tp_timer, "LU", "Persist timer min (us)");
5991 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5992 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5993 sysctl_tp_timer, "LU", "Persist timer max (us)");
5995 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5996 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5997 sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
5999 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
6000 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
6001 sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
6003 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
6004 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
6005 sysctl_tp_timer, "LU", "Initial SRTT (us)");
6007 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
6008 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
6009 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
6011 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
6012 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
6013 sysctl_tp_shift_cnt, "IU",
6014 "Number of SYN retransmissions before abort");
6016 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
6017 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
6018 sysctl_tp_shift_cnt, "IU",
6019 "Number of retransmissions before abort");
6021 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
6022 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
6023 sysctl_tp_shift_cnt, "IU",
6024 "Number of keepalive probes before abort");
6026 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
6027 CTLFLAG_RD, NULL, "TOE retransmit backoffs");
6028 children = SYSCTL_CHILDREN(oid);
6029 for (i = 0; i < 16; i++) {
6030 snprintf(s, sizeof(s), "%u", i);
6031 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
6032 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
6033 "IU", "TOE retransmit backoff");
6040 vi_sysctls(struct vi_info *vi)
6042 struct sysctl_ctx_list *ctx;
6043 struct sysctl_oid *oid;
6044 struct sysctl_oid_list *children;
6046 ctx = device_get_sysctl_ctx(vi->dev);
6049 * dev.v?(cxgbe|cxl).X.
6051 oid = device_get_sysctl_tree(vi->dev);
6052 children = SYSCTL_CHILDREN(oid);
6054 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
6055 vi->viid, "VI identifer");
6056 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
6057 &vi->nrxq, 0, "# of rx queues");
6058 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
6059 &vi->ntxq, 0, "# of tx queues");
6060 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
6061 &vi->first_rxq, 0, "index of first rx queue");
6062 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
6063 &vi->first_txq, 0, "index of first tx queue");
6064 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
6065 vi->rss_size, "size of RSS indirection table");
6067 if (IS_MAIN_VI(vi)) {
6068 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
6069 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
6070 "Reserve queue 0 for non-flowid packets");
6074 if (vi->nofldrxq != 0) {
6075 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
6077 "# of rx queues for offloaded TCP connections");
6078 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
6080 "# of tx queues for offloaded TCP connections");
6081 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
6082 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
6083 "index of first TOE rx queue");
6084 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
6085 CTLFLAG_RD, &vi->first_ofld_txq, 0,
6086 "index of first TOE tx queue");
6087 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
6088 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
6089 sysctl_holdoff_tmr_idx_ofld, "I",
6090 "holdoff timer index for TOE queues");
6091 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
6092 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
6093 sysctl_holdoff_pktc_idx_ofld, "I",
6094 "holdoff packet counter index for TOE queues");
6098 if (vi->nnmrxq != 0) {
6099 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
6100 &vi->nnmrxq, 0, "# of netmap rx queues");
6101 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
6102 &vi->nnmtxq, 0, "# of netmap tx queues");
6103 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
6104 CTLFLAG_RD, &vi->first_nm_rxq, 0,
6105 "index of first netmap rx queue");
6106 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
6107 CTLFLAG_RD, &vi->first_nm_txq, 0,
6108 "index of first netmap tx queue");
6112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
6113 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
6114 "holdoff timer index");
6115 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
6116 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
6117 "holdoff packet counter index");
6119 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
6120 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
6122 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
6123 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
6128 cxgbe_sysctls(struct port_info *pi)
6130 struct sysctl_ctx_list *ctx;
6131 struct sysctl_oid *oid;
6132 struct sysctl_oid_list *children, *children2;
6133 struct adapter *sc = pi->adapter;
6136 static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
6138 ctx = device_get_sysctl_ctx(pi->dev);
6143 oid = device_get_sysctl_tree(pi->dev);
6144 children = SYSCTL_CHILDREN(oid);
6146 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
6147 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
6148 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
6149 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6150 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
6151 "PHY temperature (in Celsius)");
6152 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
6153 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
6154 "PHY firmware version");
6157 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
6158 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
6159 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
6160 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
6161 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
6162 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
6163 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
6164 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
6165 "autonegotiation (-1 = not supported)");
6167 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
6168 port_top_speed(pi), "max speed (in Gbps)");
6169 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
6170 pi->mps_bg_map, "MPS buffer group map");
6171 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
6172 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
6174 if (sc->flags & IS_VF)
6178 * dev.(cxgbe|cxl).X.tc.
6180 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
6181 "Tx scheduler traffic classes (cl_rl)");
6182 children2 = SYSCTL_CHILDREN(oid);
6183 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
6184 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
6185 "pktsize for per-flow cl-rl (0 means up to the driver )");
6186 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
6187 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
6188 "burstsize for per-flow cl-rl (0 means up to the driver)");
6189 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6190 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6192 snprintf(name, sizeof(name), "%d", i);
6193 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6194 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
6196 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
6197 CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags,
6198 sysctl_bitfield_8b, "A", "flags");
6199 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6200 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6201 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6202 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
6203 sysctl_tc_params, "A", "traffic class parameters");
6207 * dev.cxgbe.X.stats.
6209 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
6210 NULL, "port statistics");
6211 children = SYSCTL_CHILDREN(oid);
6212 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6213 &pi->tx_parse_error, 0,
6214 "# of tx packets with invalid length or # of segments");
6216 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6217 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6218 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
6219 sysctl_handle_t4_reg64, "QU", desc)
6221 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6222 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6223 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6224 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6225 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6226 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6227 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6228 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6229 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6230 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6231 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6232 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6233 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6234 "# of tx frames in this range",
6235 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6236 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6237 "# of tx frames in this range",
6238 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6239 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6240 "# of tx frames in this range",
6241 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6242 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6243 "# of tx frames in this range",
6244 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6245 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6246 "# of tx frames in this range",
6247 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6248 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6249 "# of tx frames in this range",
6250 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6251 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6252 "# of tx frames in this range",
6253 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6254 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6255 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6256 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6257 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6258 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6259 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6260 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6261 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6262 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6263 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6264 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6265 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6266 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6267 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6268 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6269 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6270 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6271 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6272 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6273 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6275 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6276 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6277 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6278 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6279 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6280 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6281 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6282 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6283 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6284 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6285 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6286 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6287 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6288 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6289 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6290 "# of frames received with bad FCS",
6291 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6292 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6293 "# of frames received with length error",
6294 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6295 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6296 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6297 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6298 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6299 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
6300 "# of rx frames in this range",
6301 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
6302 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
6303 "# of rx frames in this range",
6304 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
6305 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
6306 "# of rx frames in this range",
6307 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
6308 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
6309 "# of rx frames in this range",
6310 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
6311 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
6312 "# of rx frames in this range",
6313 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
6314 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
6315 "# of rx frames in this range",
6316 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
6317 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
6318 "# of rx frames in this range",
6319 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
6320 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
6321 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
6322 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
6323 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
6324 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
6325 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
6326 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
6327 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
6328 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
6329 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
6330 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
6331 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
6332 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
6333 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
6334 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
6335 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
6336 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
6337 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
6339 #undef SYSCTL_ADD_T4_REG64
6341 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
6342 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
6343 &pi->stats.name, desc)
6345 /* We get these from port_stats and they may be stale by up to 1s */
6346 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
6347 "# drops due to buffer-group 0 overflows");
6348 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
6349 "# drops due to buffer-group 1 overflows");
6350 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
6351 "# drops due to buffer-group 2 overflows");
6352 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
6353 "# drops due to buffer-group 3 overflows");
6354 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
6355 "# of buffer-group 0 truncated packets");
6356 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
6357 "# of buffer-group 1 truncated packets");
6358 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
6359 "# of buffer-group 2 truncated packets");
6360 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
6361 "# of buffer-group 3 truncated packets");
6363 #undef SYSCTL_ADD_T4_PORTSTAT
6365 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records",
6366 CTLFLAG_RD, &pi->tx_tls_records,
6367 "# of TLS records transmitted");
6368 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets",
6369 CTLFLAG_RD, &pi->tx_tls_octets,
6370 "# of payload octets in transmitted TLS records");
6371 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records",
6372 CTLFLAG_RD, &pi->rx_tls_records,
6373 "# of TLS records received");
6374 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets",
6375 CTLFLAG_RD, &pi->rx_tls_octets,
6376 "# of payload octets in received TLS records");
6380 sysctl_int_array(SYSCTL_HANDLER_ARGS)
6382 int rc, *i, space = 0;
6385 sbuf_new_for_sysctl(&sb, NULL, 64, req);
6386 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
6388 sbuf_printf(&sb, " ");
6389 sbuf_printf(&sb, "%d", *i);
6392 rc = sbuf_finish(&sb);
6398 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
6403 rc = sysctl_wire_old_buffer(req, 0);
6407 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6411 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
6412 rc = sbuf_finish(sb);
6419 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
6424 rc = sysctl_wire_old_buffer(req, 0);
6428 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6432 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
6433 rc = sbuf_finish(sb);
6440 sysctl_btphy(SYSCTL_HANDLER_ARGS)
6442 struct port_info *pi = arg1;
6444 struct adapter *sc = pi->adapter;
6448 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
6451 /* XXX: magic numbers */
6452 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
6454 end_synchronized_op(sc, 0);
6460 rc = sysctl_handle_int(oidp, &v, 0, req);
6465 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
6467 struct vi_info *vi = arg1;
6470 val = vi->rsrv_noflowq;
6471 rc = sysctl_handle_int(oidp, &val, 0, req);
6472 if (rc != 0 || req->newptr == NULL)
6475 if ((val >= 1) && (vi->ntxq > 1))
6476 vi->rsrv_noflowq = 1;
6478 vi->rsrv_noflowq = 0;
6484 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
6486 struct vi_info *vi = arg1;
6487 struct adapter *sc = vi->pi->adapter;
6489 struct sge_rxq *rxq;
6494 rc = sysctl_handle_int(oidp, &idx, 0, req);
6495 if (rc != 0 || req->newptr == NULL)
6498 if (idx < 0 || idx >= SGE_NTIMERS)
6501 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6506 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
6507 for_each_rxq(vi, i, rxq) {
6508 #ifdef atomic_store_rel_8
6509 atomic_store_rel_8(&rxq->iq.intr_params, v);
6511 rxq->iq.intr_params = v;
6516 end_synchronized_op(sc, LOCK_HELD);
6521 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6523 struct vi_info *vi = arg1;
6524 struct adapter *sc = vi->pi->adapter;
6529 rc = sysctl_handle_int(oidp, &idx, 0, req);
6530 if (rc != 0 || req->newptr == NULL)
6533 if (idx < -1 || idx >= SGE_NCOUNTERS)
6536 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6541 if (vi->flags & VI_INIT_DONE)
6542 rc = EBUSY; /* cannot be changed once the queues are created */
6546 end_synchronized_op(sc, LOCK_HELD);
6551 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6553 struct vi_info *vi = arg1;
6554 struct adapter *sc = vi->pi->adapter;
6557 qsize = vi->qsize_rxq;
6559 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6560 if (rc != 0 || req->newptr == NULL)
6563 if (qsize < 128 || (qsize & 7))
6566 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6571 if (vi->flags & VI_INIT_DONE)
6572 rc = EBUSY; /* cannot be changed once the queues are created */
6574 vi->qsize_rxq = qsize;
6576 end_synchronized_op(sc, LOCK_HELD);
6581 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6583 struct vi_info *vi = arg1;
6584 struct adapter *sc = vi->pi->adapter;
6587 qsize = vi->qsize_txq;
6589 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6590 if (rc != 0 || req->newptr == NULL)
6593 if (qsize < 128 || qsize > 65536)
6596 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6601 if (vi->flags & VI_INIT_DONE)
6602 rc = EBUSY; /* cannot be changed once the queues are created */
6604 vi->qsize_txq = qsize;
6606 end_synchronized_op(sc, LOCK_HELD);
6611 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6613 struct port_info *pi = arg1;
6614 struct adapter *sc = pi->adapter;
6615 struct link_config *lc = &pi->link_cfg;
6618 if (req->newptr == NULL) {
6620 static char *bits = "\20\1RX\2TX\3AUTO";
6622 rc = sysctl_wire_old_buffer(req, 0);
6626 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6631 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
6632 (lc->requested_fc & PAUSE_AUTONEG), bits);
6634 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
6635 PAUSE_RX | PAUSE_AUTONEG), bits);
6637 rc = sbuf_finish(sb);
6643 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
6647 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6653 if (s[0] < '0' || s[0] > '9')
6654 return (EINVAL); /* not a number */
6656 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
6657 return (EINVAL); /* some other bit is set too */
6659 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6664 lc->requested_fc = n;
6665 fixup_link_config(pi);
6667 rc = apply_link_config(pi);
6668 set_current_media(pi);
6670 end_synchronized_op(sc, 0);
6677 sysctl_fec(SYSCTL_HANDLER_ARGS)
6679 struct port_info *pi = arg1;
6680 struct adapter *sc = pi->adapter;
6681 struct link_config *lc = &pi->link_cfg;
6685 if (req->newptr == NULL) {
6687 static char *bits = "\20\1RS\2BASE-R\3RSVD1\4RSVD2\5RSVD3\6AUTO";
6689 rc = sysctl_wire_old_buffer(req, 0);
6693 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6698 * Display the requested_fec when the link is down -- the actual
6699 * FEC makes sense only when the link is up.
6702 sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) |
6703 (lc->requested_fec & FEC_AUTO), bits);
6705 sbuf_printf(sb, "%b", lc->requested_fec, bits);
6707 rc = sbuf_finish(sb);
6713 snprintf(s, sizeof(s), "%d",
6714 lc->requested_fec == FEC_AUTO ? -1 :
6715 lc->requested_fec & M_FW_PORT_CAP32_FEC);
6717 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6721 n = strtol(&s[0], NULL, 0);
6722 if (n < 0 || n & FEC_AUTO)
6725 if (n & ~M_FW_PORT_CAP32_FEC)
6726 return (EINVAL);/* some other bit is set too */
6728 return (EINVAL);/* one bit can be set at most */
6731 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6736 old = lc->requested_fec;
6738 lc->requested_fec = FEC_AUTO;
6740 lc->requested_fec = FEC_NONE;
6742 if ((lc->supported | V_FW_PORT_CAP32_FEC(n)) !=
6747 lc->requested_fec = n;
6749 fixup_link_config(pi);
6750 if (pi->up_vis > 0) {
6751 rc = apply_link_config(pi);
6753 lc->requested_fec = old;
6754 if (rc == FW_EPROTO)
6760 end_synchronized_op(sc, 0);
6767 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
6769 struct port_info *pi = arg1;
6770 struct adapter *sc = pi->adapter;
6771 struct link_config *lc = &pi->link_cfg;
6774 if (lc->supported & FW_PORT_CAP32_ANEG)
6775 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
6778 rc = sysctl_handle_int(oidp, &val, 0, req);
6779 if (rc != 0 || req->newptr == NULL)
6782 val = AUTONEG_DISABLE;
6784 val = AUTONEG_ENABLE;
6788 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6793 if (val == AUTONEG_ENABLE && !(lc->supported & FW_PORT_CAP32_ANEG)) {
6797 lc->requested_aneg = val;
6798 fixup_link_config(pi);
6800 rc = apply_link_config(pi);
6801 set_current_media(pi);
6804 end_synchronized_op(sc, 0);
6809 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
6811 struct adapter *sc = arg1;
6815 val = t4_read_reg64(sc, reg);
6817 return (sysctl_handle_64(oidp, &val, 0, req));
6821 sysctl_temperature(SYSCTL_HANDLER_ARGS)
6823 struct adapter *sc = arg1;
6825 uint32_t param, val;
6827 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
6830 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6831 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
6832 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
6833 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6834 end_synchronized_op(sc, 0);
6838 /* unknown is returned as 0 but we display -1 in that case */
6839 t = val == 0 ? -1 : val;
6841 rc = sysctl_handle_int(oidp, &t, 0, req);
6846 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
6848 struct adapter *sc = arg1;
6851 uint32_t param, val;
6853 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
6856 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6857 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
6858 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6859 end_synchronized_op(sc, 0);
6863 rc = sysctl_wire_old_buffer(req, 0);
6867 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6871 if (val == 0xffffffff) {
6872 /* Only debug and custom firmwares report load averages. */
6873 sbuf_printf(sb, "not available");
6875 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
6876 (val >> 16) & 0xff);
6878 rc = sbuf_finish(sb);
6885 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6887 struct adapter *sc = arg1;
6890 uint16_t incr[NMTUS][NCCTRL_WIN];
6891 static const char *dec_fac[] = {
6892 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6896 rc = sysctl_wire_old_buffer(req, 0);
6900 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6904 t4_read_cong_tbl(sc, incr);
6906 for (i = 0; i < NCCTRL_WIN; ++i) {
6907 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6908 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6909 incr[5][i], incr[6][i], incr[7][i]);
6910 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6911 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6912 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6913 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6916 rc = sbuf_finish(sb);
6922 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6923 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
6924 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6925 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
6929 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6931 struct adapter *sc = arg1;
6933 int rc, i, n, qid = arg2;
6936 u_int cim_num_obq = sc->chip_params->cim_num_obq;
6938 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6939 ("%s: bad qid %d\n", __func__, qid));
6941 if (qid < CIM_NUM_IBQ) {
6944 n = 4 * CIM_IBQ_SIZE;
6945 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6946 rc = t4_read_cim_ibq(sc, qid, buf, n);
6948 /* outbound queue */
6951 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6952 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6953 rc = t4_read_cim_obq(sc, qid, buf, n);
6960 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6962 rc = sysctl_wire_old_buffer(req, 0);
6966 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6972 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6973 for (i = 0, p = buf; i < n; i += 16, p += 4)
6974 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6977 rc = sbuf_finish(sb);
6985 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6987 struct adapter *sc = arg1;
6993 MPASS(chip_id(sc) <= CHELSIO_T5);
6995 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6999 rc = sysctl_wire_old_buffer(req, 0);
7003 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7007 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
7010 rc = -t4_cim_read_la(sc, buf, NULL);
7014 sbuf_printf(sb, "Status Data PC%s",
7015 cfg & F_UPDBGLACAPTPCONLY ? "" :
7016 " LS0Stat LS0Addr LS0Data");
7018 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
7019 if (cfg & F_UPDBGLACAPTPCONLY) {
7020 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
7022 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
7023 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
7024 p[4] & 0xff, p[5] >> 8);
7025 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
7026 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7027 p[1] & 0xf, p[2] >> 4);
7030 "\n %02x %x%07x %x%07x %08x %08x "
7032 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7033 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
7038 rc = sbuf_finish(sb);
7046 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
7048 struct adapter *sc = arg1;
7054 MPASS(chip_id(sc) > CHELSIO_T5);
7056 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
7060 rc = sysctl_wire_old_buffer(req, 0);
7064 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7068 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
7071 rc = -t4_cim_read_la(sc, buf, NULL);
7075 sbuf_printf(sb, "Status Inst Data PC%s",
7076 cfg & F_UPDBGLACAPTPCONLY ? "" :
7077 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
7079 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
7080 if (cfg & F_UPDBGLACAPTPCONLY) {
7081 sbuf_printf(sb, "\n %02x %08x %08x %08x",
7082 p[3] & 0xff, p[2], p[1], p[0]);
7083 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
7084 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
7085 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
7086 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
7087 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
7088 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
7091 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
7092 "%08x %08x %08x %08x %08x %08x",
7093 (p[9] >> 16) & 0xff,
7094 p[9] & 0xffff, p[8] >> 16,
7095 p[8] & 0xffff, p[7] >> 16,
7096 p[7] & 0xffff, p[6] >> 16,
7097 p[2], p[1], p[0], p[5], p[4], p[3]);
7101 rc = sbuf_finish(sb);
7109 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
7111 struct adapter *sc = arg1;
7117 rc = sysctl_wire_old_buffer(req, 0);
7121 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7125 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
7128 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
7131 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7132 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
7136 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
7137 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7138 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
7139 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
7140 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
7141 (p[1] >> 2) | ((p[2] & 3) << 30),
7142 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
7146 rc = sbuf_finish(sb);
7153 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
7155 struct adapter *sc = arg1;
7161 rc = sysctl_wire_old_buffer(req, 0);
7165 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7169 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
7172 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
7175 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
7176 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7177 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
7178 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
7179 p[4], p[3], p[2], p[1], p[0]);
7182 sbuf_printf(sb, "\n\nCntl ID Data");
7183 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7184 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
7185 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
7188 rc = sbuf_finish(sb);
7195 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
7197 struct adapter *sc = arg1;
7200 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7201 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7202 uint16_t thres[CIM_NUM_IBQ];
7203 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
7204 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
7205 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
7207 cim_num_obq = sc->chip_params->cim_num_obq;
7209 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
7210 obq_rdaddr = A_UP_OBQ_0_REALADDR;
7212 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
7213 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
7215 nq = CIM_NUM_IBQ + cim_num_obq;
7217 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
7219 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
7223 t4_read_cimq_cfg(sc, base, size, thres);
7225 rc = sysctl_wire_old_buffer(req, 0);
7229 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7234 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
7236 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
7237 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
7238 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
7239 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7240 G_QUEREMFLITS(p[2]) * 16);
7241 for ( ; i < nq; i++, p += 4, wr += 2)
7242 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
7243 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
7244 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7245 G_QUEREMFLITS(p[2]) * 16);
7247 rc = sbuf_finish(sb);
7254 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
7256 struct adapter *sc = arg1;
7259 struct tp_cpl_stats stats;
7261 rc = sysctl_wire_old_buffer(req, 0);
7265 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7269 mtx_lock(&sc->reg_lock);
7270 t4_tp_get_cpl_stats(sc, &stats, 0);
7271 mtx_unlock(&sc->reg_lock);
7273 if (sc->chip_params->nchan > 2) {
7274 sbuf_printf(sb, " channel 0 channel 1"
7275 " channel 2 channel 3");
7276 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
7277 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
7278 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
7279 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
7281 sbuf_printf(sb, " channel 0 channel 1");
7282 sbuf_printf(sb, "\nCPL requests: %10u %10u",
7283 stats.req[0], stats.req[1]);
7284 sbuf_printf(sb, "\nCPL responses: %10u %10u",
7285 stats.rsp[0], stats.rsp[1]);
7288 rc = sbuf_finish(sb);
7295 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
7297 struct adapter *sc = arg1;
7300 struct tp_usm_stats stats;
7302 rc = sysctl_wire_old_buffer(req, 0);
7306 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7310 t4_get_usm_stats(sc, &stats, 1);
7312 sbuf_printf(sb, "Frames: %u\n", stats.frames);
7313 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
7314 sbuf_printf(sb, "Drops: %u", stats.drops);
7316 rc = sbuf_finish(sb);
7322 static const char * const devlog_level_strings[] = {
7323 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
7324 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
7325 [FW_DEVLOG_LEVEL_ERR] = "ERR",
7326 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
7327 [FW_DEVLOG_LEVEL_INFO] = "INFO",
7328 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
7331 static const char * const devlog_facility_strings[] = {
7332 [FW_DEVLOG_FACILITY_CORE] = "CORE",
7333 [FW_DEVLOG_FACILITY_CF] = "CF",
7334 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
7335 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
7336 [FW_DEVLOG_FACILITY_RES] = "RES",
7337 [FW_DEVLOG_FACILITY_HW] = "HW",
7338 [FW_DEVLOG_FACILITY_FLR] = "FLR",
7339 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
7340 [FW_DEVLOG_FACILITY_PHY] = "PHY",
7341 [FW_DEVLOG_FACILITY_MAC] = "MAC",
7342 [FW_DEVLOG_FACILITY_PORT] = "PORT",
7343 [FW_DEVLOG_FACILITY_VI] = "VI",
7344 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
7345 [FW_DEVLOG_FACILITY_ACL] = "ACL",
7346 [FW_DEVLOG_FACILITY_TM] = "TM",
7347 [FW_DEVLOG_FACILITY_QFC] = "QFC",
7348 [FW_DEVLOG_FACILITY_DCB] = "DCB",
7349 [FW_DEVLOG_FACILITY_ETH] = "ETH",
7350 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
7351 [FW_DEVLOG_FACILITY_RI] = "RI",
7352 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
7353 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
7354 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
7355 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
7356 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
7360 sysctl_devlog(SYSCTL_HANDLER_ARGS)
7362 struct adapter *sc = arg1;
7363 struct devlog_params *dparams = &sc->params.devlog;
7364 struct fw_devlog_e *buf, *e;
7365 int i, j, rc, nentries, first = 0;
7367 uint64_t ftstamp = UINT64_MAX;
7369 if (dparams->addr == 0)
7372 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
7376 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
7380 nentries = dparams->size / sizeof(struct fw_devlog_e);
7381 for (i = 0; i < nentries; i++) {
7384 if (e->timestamp == 0)
7387 e->timestamp = be64toh(e->timestamp);
7388 e->seqno = be32toh(e->seqno);
7389 for (j = 0; j < 8; j++)
7390 e->params[j] = be32toh(e->params[j]);
7392 if (e->timestamp < ftstamp) {
7393 ftstamp = e->timestamp;
7398 if (buf[first].timestamp == 0)
7399 goto done; /* nothing in the log */
7401 rc = sysctl_wire_old_buffer(req, 0);
7405 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7410 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
7411 "Seq#", "Tstamp", "Level", "Facility", "Message");
7416 if (e->timestamp == 0)
7419 sbuf_printf(sb, "%10d %15ju %8s %8s ",
7420 e->seqno, e->timestamp,
7421 (e->level < nitems(devlog_level_strings) ?
7422 devlog_level_strings[e->level] : "UNKNOWN"),
7423 (e->facility < nitems(devlog_facility_strings) ?
7424 devlog_facility_strings[e->facility] : "UNKNOWN"));
7425 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
7426 e->params[2], e->params[3], e->params[4],
7427 e->params[5], e->params[6], e->params[7]);
7429 if (++i == nentries)
7431 } while (i != first);
7433 rc = sbuf_finish(sb);
7441 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
7443 struct adapter *sc = arg1;
7446 struct tp_fcoe_stats stats[MAX_NCHAN];
7447 int i, nchan = sc->chip_params->nchan;
7449 rc = sysctl_wire_old_buffer(req, 0);
7453 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7457 for (i = 0; i < nchan; i++)
7458 t4_get_fcoe_stats(sc, i, &stats[i], 1);
7461 sbuf_printf(sb, " channel 0 channel 1"
7462 " channel 2 channel 3");
7463 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
7464 stats[0].octets_ddp, stats[1].octets_ddp,
7465 stats[2].octets_ddp, stats[3].octets_ddp);
7466 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
7467 stats[0].frames_ddp, stats[1].frames_ddp,
7468 stats[2].frames_ddp, stats[3].frames_ddp);
7469 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
7470 stats[0].frames_drop, stats[1].frames_drop,
7471 stats[2].frames_drop, stats[3].frames_drop);
7473 sbuf_printf(sb, " channel 0 channel 1");
7474 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
7475 stats[0].octets_ddp, stats[1].octets_ddp);
7476 sbuf_printf(sb, "\nframesDDP: %16u %16u",
7477 stats[0].frames_ddp, stats[1].frames_ddp);
7478 sbuf_printf(sb, "\nframesDrop: %16u %16u",
7479 stats[0].frames_drop, stats[1].frames_drop);
7482 rc = sbuf_finish(sb);
7489 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
7491 struct adapter *sc = arg1;
7494 unsigned int map, kbps, ipg, mode;
7495 unsigned int pace_tab[NTX_SCHED];
7497 rc = sysctl_wire_old_buffer(req, 0);
7501 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7505 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
7506 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
7507 t4_read_pace_tbl(sc, pace_tab);
7509 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
7510 "Class IPG (0.1 ns) Flow IPG (us)");
7512 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
7513 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
7514 sbuf_printf(sb, "\n %u %-5s %u ", i,
7515 (mode & (1 << i)) ? "flow" : "class", map & 3);
7517 sbuf_printf(sb, "%9u ", kbps);
7519 sbuf_printf(sb, " disabled ");
7522 sbuf_printf(sb, "%13u ", ipg);
7524 sbuf_printf(sb, " disabled ");
7527 sbuf_printf(sb, "%10u", pace_tab[i]);
7529 sbuf_printf(sb, " disabled");
7532 rc = sbuf_finish(sb);
7539 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
7541 struct adapter *sc = arg1;
7545 struct lb_port_stats s[2];
7546 static const char *stat_name[] = {
7547 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
7548 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
7549 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
7550 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
7551 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
7552 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
7553 "BG2FramesTrunc:", "BG3FramesTrunc:"
7556 rc = sysctl_wire_old_buffer(req, 0);
7560 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7564 memset(s, 0, sizeof(s));
7566 for (i = 0; i < sc->chip_params->nchan; i += 2) {
7567 t4_get_lb_stats(sc, i, &s[0]);
7568 t4_get_lb_stats(sc, i + 1, &s[1]);
7572 sbuf_printf(sb, "%s Loopback %u"
7573 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
7575 for (j = 0; j < nitems(stat_name); j++)
7576 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
7580 rc = sbuf_finish(sb);
7587 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
7590 struct port_info *pi = arg1;
7591 struct link_config *lc = &pi->link_cfg;
7594 rc = sysctl_wire_old_buffer(req, 0);
7597 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7601 if (lc->link_ok || lc->link_down_rc == 255)
7602 sbuf_printf(sb, "n/a");
7604 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7606 rc = sbuf_finish(sb);
7619 mem_desc_cmp(const void *a, const void *b)
7621 return ((const struct mem_desc *)a)->base -
7622 ((const struct mem_desc *)b)->base;
7626 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7634 size = to - from + 1;
7638 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7639 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7643 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7645 struct adapter *sc = arg1;
7648 uint32_t lo, hi, used, alloc;
7649 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7650 static const char *region[] = {
7651 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
7652 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
7653 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
7654 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
7655 "RQUDP region:", "PBL region:", "TXPBL region:",
7656 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
7657 "On-chip queues:", "TLS keys:",
7659 struct mem_desc avail[4];
7660 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
7661 struct mem_desc *md = mem;
7663 rc = sysctl_wire_old_buffer(req, 0);
7667 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7671 for (i = 0; i < nitems(mem); i++) {
7676 /* Find and sort the populated memory ranges */
7678 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
7679 if (lo & F_EDRAM0_ENABLE) {
7680 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
7681 avail[i].base = G_EDRAM0_BASE(hi) << 20;
7682 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
7686 if (lo & F_EDRAM1_ENABLE) {
7687 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
7688 avail[i].base = G_EDRAM1_BASE(hi) << 20;
7689 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
7693 if (lo & F_EXT_MEM_ENABLE) {
7694 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
7695 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
7696 avail[i].limit = avail[i].base +
7697 (G_EXT_MEM_SIZE(hi) << 20);
7698 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
7701 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
7702 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
7703 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
7704 avail[i].limit = avail[i].base +
7705 (G_EXT_MEM1_SIZE(hi) << 20);
7709 if (!i) /* no memory available */
7711 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
7713 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
7714 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
7715 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
7716 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7717 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
7718 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
7719 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
7720 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
7721 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
7723 /* the next few have explicit upper bounds */
7724 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
7725 md->limit = md->base - 1 +
7726 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
7727 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
7730 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
7731 md->limit = md->base - 1 +
7732 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
7733 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
7736 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7737 if (chip_id(sc) <= CHELSIO_T5)
7738 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
7740 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
7744 md->idx = nitems(region); /* hide it */
7748 #define ulp_region(reg) \
7749 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
7750 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
7752 ulp_region(RX_ISCSI);
7753 ulp_region(RX_TDDP);
7755 ulp_region(RX_STAG);
7757 ulp_region(RX_RQUDP);
7763 md->idx = nitems(region);
7766 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
7767 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
7770 if (sge_ctrl & F_VFIFO_ENABLE)
7771 size = G_DBVFIFO_SIZE(fifo_size);
7773 size = G_T6_DBVFIFO_SIZE(fifo_size);
7776 md->base = G_BASEADDR(t4_read_reg(sc,
7777 A_SGE_DBVFIFO_BADDR));
7778 md->limit = md->base + (size << 2) - 1;
7783 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
7786 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
7790 md->base = sc->vres.ocq.start;
7791 if (sc->vres.ocq.size)
7792 md->limit = md->base + sc->vres.ocq.size - 1;
7794 md->idx = nitems(region); /* hide it */
7797 md->base = sc->vres.key.start;
7798 if (sc->vres.key.size)
7799 md->limit = md->base + sc->vres.key.size - 1;
7801 md->idx = nitems(region); /* hide it */
7804 /* add any address-space holes, there can be up to 3 */
7805 for (n = 0; n < i - 1; n++)
7806 if (avail[n].limit < avail[n + 1].base)
7807 (md++)->base = avail[n].limit;
7809 (md++)->base = avail[n].limit;
7812 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
7814 for (lo = 0; lo < i; lo++)
7815 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
7816 avail[lo].limit - 1);
7818 sbuf_printf(sb, "\n");
7819 for (i = 0; i < n; i++) {
7820 if (mem[i].idx >= nitems(region))
7821 continue; /* skip holes */
7823 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
7824 mem_region_show(sb, region[mem[i].idx], mem[i].base,
7828 sbuf_printf(sb, "\n");
7829 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
7830 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
7831 mem_region_show(sb, "uP RAM:", lo, hi);
7833 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
7834 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
7835 mem_region_show(sb, "uP Extmem2:", lo, hi);
7837 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
7838 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
7840 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
7841 (lo & F_PMRXNUMCHN) ? 2 : 1);
7843 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
7844 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
7845 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
7847 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
7848 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
7849 sbuf_printf(sb, "%u p-structs\n",
7850 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
7852 for (i = 0; i < 4; i++) {
7853 if (chip_id(sc) > CHELSIO_T5)
7854 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
7856 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
7858 used = G_T5_USED(lo);
7859 alloc = G_T5_ALLOC(lo);
7862 alloc = G_ALLOC(lo);
7864 /* For T6 these are MAC buffer groups */
7865 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
7868 for (i = 0; i < sc->chip_params->nchan; i++) {
7869 if (chip_id(sc) > CHELSIO_T5)
7870 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
7872 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
7874 used = G_T5_USED(lo);
7875 alloc = G_T5_ALLOC(lo);
7878 alloc = G_ALLOC(lo);
7880 /* For T6 these are MAC buffer groups */
7882 "\nLoopback %d using %u pages out of %u allocated",
7886 rc = sbuf_finish(sb);
7893 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7897 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7901 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7903 struct adapter *sc = arg1;
7907 MPASS(chip_id(sc) <= CHELSIO_T5);
7909 rc = sysctl_wire_old_buffer(req, 0);
7913 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7918 "Idx Ethernet address Mask Vld Ports PF"
7919 " VF Replication P0 P1 P2 P3 ML");
7920 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7921 uint64_t tcamx, tcamy, mask;
7922 uint32_t cls_lo, cls_hi;
7923 uint8_t addr[ETHER_ADDR_LEN];
7925 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7926 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7929 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7930 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7931 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7932 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7933 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
7934 addr[3], addr[4], addr[5], (uintmax_t)mask,
7935 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7936 G_PORTMAP(cls_hi), G_PF(cls_lo),
7937 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7939 if (cls_lo & F_REPLICATE) {
7940 struct fw_ldst_cmd ldst_cmd;
7942 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7943 ldst_cmd.op_to_addrspace =
7944 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7945 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7946 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7947 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7948 ldst_cmd.u.mps.rplc.fid_idx =
7949 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7950 V_FW_LDST_CMD_IDX(i));
7952 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7956 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7957 sizeof(ldst_cmd), &ldst_cmd);
7958 end_synchronized_op(sc, 0);
7961 sbuf_printf(sb, "%36d", rc);
7964 sbuf_printf(sb, " %08x %08x %08x %08x",
7965 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7966 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7967 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7968 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7971 sbuf_printf(sb, "%36s", "");
7973 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7974 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7975 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7979 (void) sbuf_finish(sb);
7981 rc = sbuf_finish(sb);
7988 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7990 struct adapter *sc = arg1;
7994 MPASS(chip_id(sc) > CHELSIO_T5);
7996 rc = sysctl_wire_old_buffer(req, 0);
8000 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8004 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
8005 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
8007 " P0 P1 P2 P3 ML\n");
8009 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8010 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
8012 uint64_t tcamx, tcamy, val, mask;
8013 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
8014 uint8_t addr[ETHER_ADDR_LEN];
8016 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
8018 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
8020 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
8021 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8022 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8023 tcamy = G_DMACH(val) << 32;
8024 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8025 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8026 lookup_type = G_DATALKPTYPE(data2);
8027 port_num = G_DATAPORTNUM(data2);
8028 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8029 /* Inner header VNI */
8030 vniy = ((data2 & F_DATAVIDH2) << 23) |
8031 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8032 dip_hit = data2 & F_DATADIPHIT;
8037 vlan_vld = data2 & F_DATAVIDH2;
8038 ivlan = G_VIDL(val);
8041 ctl |= V_CTLXYBITSEL(1);
8042 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8043 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8044 tcamx = G_DMACH(val) << 32;
8045 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8046 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8047 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8048 /* Inner header VNI mask */
8049 vnix = ((data2 & F_DATAVIDH2) << 23) |
8050 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8056 tcamxy2valmask(tcamx, tcamy, addr, &mask);
8058 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8059 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
8061 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8062 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8063 "%012jx %06x %06x - - %3c"
8064 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
8065 addr[1], addr[2], addr[3], addr[4], addr[5],
8066 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
8067 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8068 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8069 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8071 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8072 "%012jx - - ", i, addr[0], addr[1],
8073 addr[2], addr[3], addr[4], addr[5],
8077 sbuf_printf(sb, "%4u Y ", ivlan);
8079 sbuf_printf(sb, " - N ");
8081 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
8082 lookup_type ? 'I' : 'O', port_num,
8083 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8084 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8085 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8089 if (cls_lo & F_T6_REPLICATE) {
8090 struct fw_ldst_cmd ldst_cmd;
8092 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
8093 ldst_cmd.op_to_addrspace =
8094 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
8095 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8096 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
8097 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
8098 ldst_cmd.u.mps.rplc.fid_idx =
8099 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
8100 V_FW_LDST_CMD_IDX(i));
8102 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8106 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
8107 sizeof(ldst_cmd), &ldst_cmd);
8108 end_synchronized_op(sc, 0);
8111 sbuf_printf(sb, "%72d", rc);
8114 sbuf_printf(sb, " %08x %08x %08x %08x"
8115 " %08x %08x %08x %08x",
8116 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
8117 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
8118 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
8119 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
8120 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
8121 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
8122 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
8123 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
8126 sbuf_printf(sb, "%72s", "");
8128 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
8129 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
8130 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
8131 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
8135 (void) sbuf_finish(sb);
8137 rc = sbuf_finish(sb);
8144 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
8146 struct adapter *sc = arg1;
8149 uint16_t mtus[NMTUS];
8151 rc = sysctl_wire_old_buffer(req, 0);
8155 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8159 t4_read_mtu_tbl(sc, mtus, NULL);
8161 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
8162 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
8163 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
8164 mtus[14], mtus[15]);
8166 rc = sbuf_finish(sb);
8173 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
8175 struct adapter *sc = arg1;
8178 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
8179 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
8180 static const char *tx_stats[MAX_PM_NSTATS] = {
8181 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
8182 "Tx FIFO wait", NULL, "Tx latency"
8184 static const char *rx_stats[MAX_PM_NSTATS] = {
8185 "Read:", "Write bypass:", "Write mem:", "Flush:",
8186 "Rx FIFO wait", NULL, "Rx latency"
8189 rc = sysctl_wire_old_buffer(req, 0);
8193 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8197 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
8198 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
8200 sbuf_printf(sb, " Tx pcmds Tx bytes");
8201 for (i = 0; i < 4; i++) {
8202 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8206 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
8207 for (i = 0; i < 4; i++) {
8208 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8212 if (chip_id(sc) > CHELSIO_T5) {
8214 "\n Total wait Total occupancy");
8215 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8217 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8221 MPASS(i < nitems(tx_stats));
8224 "\n Reads Total wait");
8225 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8227 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8231 rc = sbuf_finish(sb);
8238 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
8240 struct adapter *sc = arg1;
8243 struct tp_rdma_stats stats;
8245 rc = sysctl_wire_old_buffer(req, 0);
8249 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8253 mtx_lock(&sc->reg_lock);
8254 t4_tp_get_rdma_stats(sc, &stats, 0);
8255 mtx_unlock(&sc->reg_lock);
8257 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
8258 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
8260 rc = sbuf_finish(sb);
8267 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
8269 struct adapter *sc = arg1;
8272 struct tp_tcp_stats v4, v6;
8274 rc = sysctl_wire_old_buffer(req, 0);
8278 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8282 mtx_lock(&sc->reg_lock);
8283 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
8284 mtx_unlock(&sc->reg_lock);
8288 sbuf_printf(sb, "OutRsts: %20u %20u\n",
8289 v4.tcp_out_rsts, v6.tcp_out_rsts);
8290 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
8291 v4.tcp_in_segs, v6.tcp_in_segs);
8292 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
8293 v4.tcp_out_segs, v6.tcp_out_segs);
8294 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
8295 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
8297 rc = sbuf_finish(sb);
8304 sysctl_tids(SYSCTL_HANDLER_ARGS)
8306 struct adapter *sc = arg1;
8309 struct tid_info *t = &sc->tids;
8311 rc = sysctl_wire_old_buffer(req, 0);
8315 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8320 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
8325 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
8326 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
8330 sbuf_printf(sb, "TID range: ");
8331 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8334 if (chip_id(sc) <= CHELSIO_T5) {
8335 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
8336 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
8338 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
8339 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
8343 sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
8344 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
8346 sbuf_printf(sb, "%u-%u", t->tid_base, t->ntids - 1);
8347 sbuf_printf(sb, ", in use: %u\n",
8348 atomic_load_acq_int(&t->tids_in_use));
8352 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
8353 t->stid_base + t->nstids - 1, t->stids_in_use);
8357 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
8358 t->ftid_end, t->ftids_in_use);
8362 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
8363 t->etid_base + t->netids - 1, t->etids_in_use);
8366 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
8367 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
8368 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
8370 rc = sbuf_finish(sb);
8377 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
8379 struct adapter *sc = arg1;
8382 struct tp_err_stats stats;
8384 rc = sysctl_wire_old_buffer(req, 0);
8388 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8392 mtx_lock(&sc->reg_lock);
8393 t4_tp_get_err_stats(sc, &stats, 0);
8394 mtx_unlock(&sc->reg_lock);
8396 if (sc->chip_params->nchan > 2) {
8397 sbuf_printf(sb, " channel 0 channel 1"
8398 " channel 2 channel 3\n");
8399 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
8400 stats.mac_in_errs[0], stats.mac_in_errs[1],
8401 stats.mac_in_errs[2], stats.mac_in_errs[3]);
8402 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
8403 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
8404 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
8405 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
8406 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
8407 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
8408 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
8409 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
8410 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
8411 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
8412 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
8413 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
8414 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
8415 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
8416 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
8417 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
8418 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
8419 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
8420 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
8421 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
8422 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
8424 sbuf_printf(sb, " channel 0 channel 1\n");
8425 sbuf_printf(sb, "macInErrs: %10u %10u\n",
8426 stats.mac_in_errs[0], stats.mac_in_errs[1]);
8427 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
8428 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
8429 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
8430 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
8431 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
8432 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
8433 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
8434 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
8435 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
8436 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
8437 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
8438 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
8439 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
8440 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
8443 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
8444 stats.ofld_no_neigh, stats.ofld_cong_defer);
8446 rc = sbuf_finish(sb);
8453 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
8455 struct adapter *sc = arg1;
8456 struct tp_params *tpp = &sc->params.tp;
8460 mask = tpp->la_mask >> 16;
8461 rc = sysctl_handle_int(oidp, &mask, 0, req);
8462 if (rc != 0 || req->newptr == NULL)
8466 tpp->la_mask = mask << 16;
8467 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
8479 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
8485 uint64_t mask = (1ULL << f->width) - 1;
8486 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
8487 ((uintmax_t)v >> f->start) & mask);
8489 if (line_size + len >= 79) {
8491 sbuf_printf(sb, "\n ");
8493 sbuf_printf(sb, "%s ", buf);
8494 line_size += len + 1;
8497 sbuf_printf(sb, "\n");
8500 static const struct field_desc tp_la0[] = {
8501 { "RcfOpCodeOut", 60, 4 },
8503 { "WcfState", 52, 4 },
8504 { "RcfOpcSrcOut", 50, 2 },
8505 { "CRxError", 49, 1 },
8506 { "ERxError", 48, 1 },
8507 { "SanityFailed", 47, 1 },
8508 { "SpuriousMsg", 46, 1 },
8509 { "FlushInputMsg", 45, 1 },
8510 { "FlushInputCpl", 44, 1 },
8511 { "RssUpBit", 43, 1 },
8512 { "RssFilterHit", 42, 1 },
8514 { "InitTcb", 31, 1 },
8515 { "LineNumber", 24, 7 },
8517 { "EdataOut", 22, 1 },
8519 { "CdataOut", 20, 1 },
8520 { "EreadPdu", 19, 1 },
8521 { "CreadPdu", 18, 1 },
8522 { "TunnelPkt", 17, 1 },
8523 { "RcfPeerFin", 16, 1 },
8524 { "RcfReasonOut", 12, 4 },
8525 { "TxCchannel", 10, 2 },
8526 { "RcfTxChannel", 8, 2 },
8527 { "RxEchannel", 6, 2 },
8528 { "RcfRxChannel", 5, 1 },
8529 { "RcfDataOutSrdy", 4, 1 },
8531 { "RxOoDvld", 2, 1 },
8532 { "RxCongestion", 1, 1 },
8533 { "TxCongestion", 0, 1 },
8537 static const struct field_desc tp_la1[] = {
8538 { "CplCmdIn", 56, 8 },
8539 { "CplCmdOut", 48, 8 },
8540 { "ESynOut", 47, 1 },
8541 { "EAckOut", 46, 1 },
8542 { "EFinOut", 45, 1 },
8543 { "ERstOut", 44, 1 },
8548 { "DataIn", 39, 1 },
8549 { "DataInVld", 38, 1 },
8551 { "RxBufEmpty", 36, 1 },
8553 { "RxFbCongestion", 34, 1 },
8554 { "TxFbCongestion", 33, 1 },
8555 { "TxPktSumSrdy", 32, 1 },
8556 { "RcfUlpType", 28, 4 },
8558 { "Ebypass", 26, 1 },
8560 { "Static0", 24, 1 },
8562 { "Cbypass", 22, 1 },
8564 { "CPktOut", 20, 1 },
8565 { "RxPagePoolFull", 18, 2 },
8566 { "RxLpbkPkt", 17, 1 },
8567 { "TxLpbkPkt", 16, 1 },
8568 { "RxVfValid", 15, 1 },
8569 { "SynLearned", 14, 1 },
8570 { "SetDelEntry", 13, 1 },
8571 { "SetInvEntry", 12, 1 },
8572 { "CpcmdDvld", 11, 1 },
8573 { "CpcmdSave", 10, 1 },
8574 { "RxPstructsFull", 8, 2 },
8575 { "EpcmdDvld", 7, 1 },
8576 { "EpcmdFlush", 6, 1 },
8577 { "EpcmdTrimPrefix", 5, 1 },
8578 { "EpcmdTrimPostfix", 4, 1 },
8579 { "ERssIp4Pkt", 3, 1 },
8580 { "ERssIp6Pkt", 2, 1 },
8581 { "ERssTcpUdpPkt", 1, 1 },
8582 { "ERssFceFipPkt", 0, 1 },
8586 static const struct field_desc tp_la2[] = {
8587 { "CplCmdIn", 56, 8 },
8588 { "MpsVfVld", 55, 1 },
8595 { "DataIn", 39, 1 },
8596 { "DataInVld", 38, 1 },
8598 { "RxBufEmpty", 36, 1 },
8600 { "RxFbCongestion", 34, 1 },
8601 { "TxFbCongestion", 33, 1 },
8602 { "TxPktSumSrdy", 32, 1 },
8603 { "RcfUlpType", 28, 4 },
8605 { "Ebypass", 26, 1 },
8607 { "Static0", 24, 1 },
8609 { "Cbypass", 22, 1 },
8611 { "CPktOut", 20, 1 },
8612 { "RxPagePoolFull", 18, 2 },
8613 { "RxLpbkPkt", 17, 1 },
8614 { "TxLpbkPkt", 16, 1 },
8615 { "RxVfValid", 15, 1 },
8616 { "SynLearned", 14, 1 },
8617 { "SetDelEntry", 13, 1 },
8618 { "SetInvEntry", 12, 1 },
8619 { "CpcmdDvld", 11, 1 },
8620 { "CpcmdSave", 10, 1 },
8621 { "RxPstructsFull", 8, 2 },
8622 { "EpcmdDvld", 7, 1 },
8623 { "EpcmdFlush", 6, 1 },
8624 { "EpcmdTrimPrefix", 5, 1 },
8625 { "EpcmdTrimPostfix", 4, 1 },
8626 { "ERssIp4Pkt", 3, 1 },
8627 { "ERssIp6Pkt", 2, 1 },
8628 { "ERssTcpUdpPkt", 1, 1 },
8629 { "ERssFceFipPkt", 0, 1 },
8634 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8637 field_desc_show(sb, *p, tp_la0);
8641 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8645 sbuf_printf(sb, "\n");
8646 field_desc_show(sb, p[0], tp_la0);
8647 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8648 field_desc_show(sb, p[1], tp_la0);
8652 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
8656 sbuf_printf(sb, "\n");
8657 field_desc_show(sb, p[0], tp_la0);
8658 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8659 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
8663 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
8665 struct adapter *sc = arg1;
8670 void (*show_func)(struct sbuf *, uint64_t *, int);
8672 rc = sysctl_wire_old_buffer(req, 0);
8676 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8680 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
8682 t4_tp_read_la(sc, buf, NULL);
8685 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
8688 show_func = tp_la_show2;
8692 show_func = tp_la_show3;
8696 show_func = tp_la_show;
8699 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
8700 (*show_func)(sb, p, i);
8702 rc = sbuf_finish(sb);
8709 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
8711 struct adapter *sc = arg1;
8714 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
8716 rc = sysctl_wire_old_buffer(req, 0);
8720 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8724 t4_get_chan_txrate(sc, nrate, orate);
8726 if (sc->chip_params->nchan > 2) {
8727 sbuf_printf(sb, " channel 0 channel 1"
8728 " channel 2 channel 3\n");
8729 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
8730 nrate[0], nrate[1], nrate[2], nrate[3]);
8731 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
8732 orate[0], orate[1], orate[2], orate[3]);
8734 sbuf_printf(sb, " channel 0 channel 1\n");
8735 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
8736 nrate[0], nrate[1]);
8737 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
8738 orate[0], orate[1]);
8741 rc = sbuf_finish(sb);
8748 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
8750 struct adapter *sc = arg1;
8755 rc = sysctl_wire_old_buffer(req, 0);
8759 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8763 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
8766 t4_ulprx_read_la(sc, buf);
8769 sbuf_printf(sb, " Pcmd Type Message"
8771 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
8772 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
8773 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
8776 rc = sbuf_finish(sb);
8783 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
8785 struct adapter *sc = arg1;
8789 MPASS(chip_id(sc) >= CHELSIO_T5);
8791 rc = sysctl_wire_old_buffer(req, 0);
8795 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8799 v = t4_read_reg(sc, A_SGE_STAT_CFG);
8800 if (G_STATSOURCE_T5(v) == 7) {
8803 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
8805 sbuf_printf(sb, "total %d, incomplete %d",
8806 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8807 t4_read_reg(sc, A_SGE_STAT_MATCH));
8808 } else if (mode == 1) {
8809 sbuf_printf(sb, "total %d, data overflow %d",
8810 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8811 t4_read_reg(sc, A_SGE_STAT_MATCH));
8813 sbuf_printf(sb, "unknown mode %d", mode);
8816 rc = sbuf_finish(sb);
8823 sysctl_cpus(SYSCTL_HANDLER_ARGS)
8825 struct adapter *sc = arg1;
8826 enum cpu_sets op = arg2;
8831 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
8834 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
8838 rc = sysctl_wire_old_buffer(req, 0);
8842 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8847 sbuf_printf(sb, "%d ", i);
8848 rc = sbuf_finish(sb);
8856 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
8858 struct adapter *sc = arg1;
8859 int *old_ports, *new_ports;
8860 int i, new_count, rc;
8862 if (req->newptr == NULL && req->oldptr == NULL)
8863 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
8864 sizeof(sc->tt.tls_rx_ports[0])));
8866 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
8870 if (sc->tt.num_tls_rx_ports == 0) {
8872 rc = SYSCTL_OUT(req, &i, sizeof(i));
8874 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
8875 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
8876 if (rc == 0 && req->newptr != NULL) {
8877 new_count = req->newlen / sizeof(new_ports[0]);
8878 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
8880 rc = SYSCTL_IN(req, new_ports, new_count *
8881 sizeof(new_ports[0]));
8885 /* Allow setting to a single '-1' to clear the list. */
8886 if (new_count == 1 && new_ports[0] == -1) {
8888 old_ports = sc->tt.tls_rx_ports;
8889 sc->tt.tls_rx_ports = NULL;
8890 sc->tt.num_tls_rx_ports = 0;
8892 free(old_ports, M_CXGBE);
8894 for (i = 0; i < new_count; i++) {
8895 if (new_ports[i] < 1 ||
8896 new_ports[i] > IPPORT_MAX) {
8903 old_ports = sc->tt.tls_rx_ports;
8904 sc->tt.tls_rx_ports = new_ports;
8905 sc->tt.num_tls_rx_ports = new_count;
8907 free(old_ports, M_CXGBE);
8911 free(new_ports, M_CXGBE);
8913 end_synchronized_op(sc, 0);
8918 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8920 u_int rem = val % factor;
8923 snprintf(buf, len, "%u", val / factor);
8925 while (rem % 10 == 0)
8927 snprintf(buf, len, "%u.%u", val / factor, rem);
8932 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8934 struct adapter *sc = arg1;
8937 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8939 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8943 re = G_TIMERRESOLUTION(res);
8946 /* TCP timestamp tick */
8947 re = G_TIMESTAMPRESOLUTION(res);
8951 re = G_DELAYEDACKRESOLUTION(res);
8957 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8959 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8963 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8965 struct adapter *sc = arg1;
8966 u_int res, dack_re, v;
8967 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8969 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8970 dack_re = G_DELAYEDACKRESOLUTION(res);
8971 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8973 return (sysctl_handle_int(oidp, &v, 0, req));
8977 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8979 struct adapter *sc = arg1;
8982 u_long tp_tick_us, v;
8983 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8985 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8986 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
8987 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8988 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8990 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8991 tp_tick_us = (cclk_ps << tre) / 1000000;
8993 if (reg == A_TP_INIT_SRTT)
8994 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8996 v = tp_tick_us * t4_read_reg(sc, reg);
8998 return (sysctl_handle_long(oidp, &v, 0, req));
9002 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
9003 * passed to this function.
9006 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
9008 struct adapter *sc = arg1;
9012 MPASS(idx >= 0 && idx <= 24);
9014 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
9016 return (sysctl_handle_int(oidp, &v, 0, req));
9020 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
9022 struct adapter *sc = arg1;
9026 MPASS(idx >= 0 && idx < 16);
9028 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
9029 shift = (idx & 3) << 3;
9030 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
9032 return (sysctl_handle_int(oidp, &v, 0, req));
9036 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
9038 struct vi_info *vi = arg1;
9039 struct adapter *sc = vi->pi->adapter;
9041 struct sge_ofld_rxq *ofld_rxq;
9044 idx = vi->ofld_tmr_idx;
9046 rc = sysctl_handle_int(oidp, &idx, 0, req);
9047 if (rc != 0 || req->newptr == NULL)
9050 if (idx < 0 || idx >= SGE_NTIMERS)
9053 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9058 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
9059 for_each_ofld_rxq(vi, i, ofld_rxq) {
9060 #ifdef atomic_store_rel_8
9061 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
9063 ofld_rxq->iq.intr_params = v;
9066 vi->ofld_tmr_idx = idx;
9068 end_synchronized_op(sc, LOCK_HELD);
9073 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
9075 struct vi_info *vi = arg1;
9076 struct adapter *sc = vi->pi->adapter;
9079 idx = vi->ofld_pktc_idx;
9081 rc = sysctl_handle_int(oidp, &idx, 0, req);
9082 if (rc != 0 || req->newptr == NULL)
9085 if (idx < -1 || idx >= SGE_NCOUNTERS)
9088 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9093 if (vi->flags & VI_INIT_DONE)
9094 rc = EBUSY; /* cannot be changed once the queues are created */
9096 vi->ofld_pktc_idx = idx;
9098 end_synchronized_op(sc, LOCK_HELD);
9104 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
9108 if (cntxt->cid > M_CTXTQID)
9111 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
9112 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
9115 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
9119 if (sc->flags & FW_OK) {
9120 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
9127 * Read via firmware failed or wasn't even attempted. Read directly via
9130 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
9132 end_synchronized_op(sc, 0);
9137 load_fw(struct adapter *sc, struct t4_data *fw)
9142 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
9147 * The firmware, with the sole exception of the memory parity error
9148 * handler, runs from memory and not flash. It is almost always safe to
9149 * install a new firmware on a running system. Just set bit 1 in
9150 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
9152 if (sc->flags & FULL_INIT_DONE &&
9153 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9158 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9159 if (fw_data == NULL) {
9164 rc = copyin(fw->data, fw_data, fw->len);
9166 rc = -t4_load_fw(sc, fw_data, fw->len);
9168 free(fw_data, M_CXGBE);
9170 end_synchronized_op(sc, 0);
9175 load_cfg(struct adapter *sc, struct t4_data *cfg)
9178 uint8_t *cfg_data = NULL;
9180 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9184 if (cfg->len == 0) {
9186 rc = -t4_load_cfg(sc, NULL, 0);
9190 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
9191 if (cfg_data == NULL) {
9196 rc = copyin(cfg->data, cfg_data, cfg->len);
9198 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9200 free(cfg_data, M_CXGBE);
9202 end_synchronized_op(sc, 0);
9207 load_boot(struct adapter *sc, struct t4_bootrom *br)
9210 uint8_t *br_data = NULL;
9213 if (br->len > 1024 * 1024)
9216 if (br->pf_offset == 0) {
9218 if (br->pfidx_addr > 7)
9220 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9221 A_PCIE_PF_EXPROM_OFST)));
9222 } else if (br->pf_offset == 1) {
9224 offset = G_OFFSET(br->pfidx_addr);
9229 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9235 rc = -t4_load_boot(sc, NULL, offset, 0);
9239 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9240 if (br_data == NULL) {
9245 rc = copyin(br->data, br_data, br->len);
9247 rc = -t4_load_boot(sc, br_data, offset, br->len);
9249 free(br_data, M_CXGBE);
9251 end_synchronized_op(sc, 0);
9256 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9259 uint8_t *bc_data = NULL;
9261 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9267 rc = -t4_load_bootcfg(sc, NULL, 0);
9271 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9272 if (bc_data == NULL) {
9277 rc = copyin(bc->data, bc_data, bc->len);
9279 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9281 free(bc_data, M_CXGBE);
9283 end_synchronized_op(sc, 0);
9288 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9291 struct cudbg_init *cudbg;
9294 /* buf is large, don't block if no memory is available */
9295 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9299 handle = cudbg_alloc_handle();
9300 if (handle == NULL) {
9305 cudbg = cudbg_get_init(handle);
9307 cudbg->print = (cudbg_print_cb)printf;
9310 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9311 __func__, dump->wr_flash, dump->len, dump->data);
9315 cudbg->use_flash = 1;
9316 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9317 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9319 rc = cudbg_collect(handle, buf, &dump->len);
9323 rc = copyout(buf, dump->data, dump->len);
9325 cudbg_free_handle(handle);
9331 free_offload_policy(struct t4_offload_policy *op)
9333 struct offload_rule *r;
9340 for (i = 0; i < op->nrules; i++, r++) {
9341 free(r->bpf_prog.bf_insns, M_CXGBE);
9343 free(op->rule, M_CXGBE);
9348 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
9351 struct t4_offload_policy *op, *old;
9352 struct bpf_program *bf;
9353 const struct offload_settings *s;
9354 struct offload_rule *r;
9357 if (!is_offload(sc))
9360 if (uop->nrules == 0) {
9361 /* Delete installed policies. */
9364 } if (uop->nrules > 256) { /* arbitrary */
9368 /* Copy userspace offload policy to kernel */
9369 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
9370 op->nrules = uop->nrules;
9371 len = op->nrules * sizeof(struct offload_rule);
9372 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9373 rc = copyin(uop->rule, op->rule, len);
9375 free(op->rule, M_CXGBE);
9381 for (i = 0; i < op->nrules; i++, r++) {
9383 /* Validate open_type */
9384 if (r->open_type != OPEN_TYPE_LISTEN &&
9385 r->open_type != OPEN_TYPE_ACTIVE &&
9386 r->open_type != OPEN_TYPE_PASSIVE &&
9387 r->open_type != OPEN_TYPE_DONTCARE) {
9390 * Rules 0 to i have malloc'd filters that need to be
9391 * freed. Rules i+1 to nrules have userspace pointers
9392 * and should be left alone.
9395 free_offload_policy(op);
9399 /* Validate settings */
9401 if ((s->offload != 0 && s->offload != 1) ||
9402 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
9403 s->sched_class < -1 ||
9404 s->sched_class >= sc->chip_params->nsched_cls) {
9410 u = bf->bf_insns; /* userspace ptr */
9411 bf->bf_insns = NULL;
9412 if (bf->bf_len == 0) {
9413 /* legal, matches everything */
9416 len = bf->bf_len * sizeof(*bf->bf_insns);
9417 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9418 rc = copyin(u, bf->bf_insns, len);
9422 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
9428 rw_wlock(&sc->policy_lock);
9431 rw_wunlock(&sc->policy_lock);
9432 free_offload_policy(old);
9437 #define MAX_READ_BUF_SIZE (128 * 1024)
9439 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9441 uint32_t addr, remaining, n;
9446 rc = validate_mem_range(sc, mr->addr, mr->len);
9450 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9452 remaining = mr->len;
9453 dst = (void *)mr->data;
9456 n = min(remaining, MAX_READ_BUF_SIZE);
9457 read_via_memwin(sc, 2, addr, buf, n);
9459 rc = copyout(buf, dst, n);
9471 #undef MAX_READ_BUF_SIZE
9474 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9478 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9481 if (i2cd->len > sizeof(i2cd->data))
9484 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9487 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9488 i2cd->offset, i2cd->len, &i2cd->data[0]);
9489 end_synchronized_op(sc, 0);
9495 t4_os_find_pci_capability(struct adapter *sc, int cap)
9499 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9503 t4_os_pci_save_state(struct adapter *sc)
9506 struct pci_devinfo *dinfo;
9509 dinfo = device_get_ivars(dev);
9511 pci_cfg_save(dev, dinfo, 0);
9516 t4_os_pci_restore_state(struct adapter *sc)
9519 struct pci_devinfo *dinfo;
9522 dinfo = device_get_ivars(dev);
9524 pci_cfg_restore(dev, dinfo);
9529 t4_os_portmod_changed(struct port_info *pi)
9531 struct adapter *sc = pi->adapter;
9534 static const char *mod_str[] = {
9535 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9538 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
9539 ("%s: port_type %u", __func__, pi->port_type));
9542 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9544 build_medialist(pi);
9545 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
9546 fixup_link_config(pi);
9547 apply_link_config(pi);
9550 end_synchronized_op(sc, LOCK_HELD);
9554 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9555 if_printf(ifp, "transceiver unplugged.\n");
9556 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9557 if_printf(ifp, "unknown transceiver inserted.\n");
9558 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9559 if_printf(ifp, "unsupported transceiver inserted.\n");
9560 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9561 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9562 port_top_speed(pi), mod_str[pi->mod_type]);
9564 if_printf(ifp, "transceiver (type %d) inserted.\n",
9570 t4_os_link_changed(struct port_info *pi)
9574 struct link_config *lc;
9577 PORT_LOCK_ASSERT_OWNED(pi);
9579 for_each_vi(pi, v, vi) {
9586 ifp->if_baudrate = IF_Mbps(lc->speed);
9587 if_link_state_change(ifp, LINK_STATE_UP);
9589 if_link_state_change(ifp, LINK_STATE_DOWN);
9595 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9599 sx_slock(&t4_list_lock);
9600 SLIST_FOREACH(sc, &t4_list, link) {
9602 * func should not make any assumptions about what state sc is
9603 * in - the only guarantee is that sc->sc_lock is a valid lock.
9607 sx_sunlock(&t4_list_lock);
9611 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9615 struct adapter *sc = dev->si_drv1;
9617 rc = priv_check(td, PRIV_DRIVER);
9622 case CHELSIO_T4_GETREG: {
9623 struct t4_reg *edata = (struct t4_reg *)data;
9625 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9628 if (edata->size == 4)
9629 edata->val = t4_read_reg(sc, edata->addr);
9630 else if (edata->size == 8)
9631 edata->val = t4_read_reg64(sc, edata->addr);
9637 case CHELSIO_T4_SETREG: {
9638 struct t4_reg *edata = (struct t4_reg *)data;
9640 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9643 if (edata->size == 4) {
9644 if (edata->val & 0xffffffff00000000)
9646 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9647 } else if (edata->size == 8)
9648 t4_write_reg64(sc, edata->addr, edata->val);
9653 case CHELSIO_T4_REGDUMP: {
9654 struct t4_regdump *regs = (struct t4_regdump *)data;
9655 int reglen = t4_get_regs_len(sc);
9658 if (regs->len < reglen) {
9659 regs->len = reglen; /* hint to the caller */
9664 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9665 get_regs(sc, regs, buf);
9666 rc = copyout(buf, regs->data, reglen);
9670 case CHELSIO_T4_GET_FILTER_MODE:
9671 rc = get_filter_mode(sc, (uint32_t *)data);
9673 case CHELSIO_T4_SET_FILTER_MODE:
9674 rc = set_filter_mode(sc, *(uint32_t *)data);
9676 case CHELSIO_T4_GET_FILTER:
9677 rc = get_filter(sc, (struct t4_filter *)data);
9679 case CHELSIO_T4_SET_FILTER:
9680 rc = set_filter(sc, (struct t4_filter *)data);
9682 case CHELSIO_T4_DEL_FILTER:
9683 rc = del_filter(sc, (struct t4_filter *)data);
9685 case CHELSIO_T4_GET_SGE_CONTEXT:
9686 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9688 case CHELSIO_T4_LOAD_FW:
9689 rc = load_fw(sc, (struct t4_data *)data);
9691 case CHELSIO_T4_GET_MEM:
9692 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9694 case CHELSIO_T4_GET_I2C:
9695 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9697 case CHELSIO_T4_CLEAR_STATS: {
9699 u_int port_id = *(uint32_t *)data;
9700 struct port_info *pi;
9703 if (port_id >= sc->params.nports)
9705 pi = sc->port[port_id];
9710 t4_clr_port_stats(sc, pi->tx_chan);
9711 pi->tx_parse_error = 0;
9712 pi->tnl_cong_drops = 0;
9713 mtx_lock(&sc->reg_lock);
9714 for_each_vi(pi, v, vi) {
9715 if (vi->flags & VI_INIT_DONE)
9716 t4_clr_vi_stats(sc, vi->viid);
9718 bg_map = pi->mps_bg_map;
9721 i = ffs(bg_map) - 1;
9722 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
9723 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
9724 bg_map &= ~(1 << i);
9726 mtx_unlock(&sc->reg_lock);
9729 * Since this command accepts a port, clear stats for
9730 * all VIs on this port.
9732 for_each_vi(pi, v, vi) {
9733 if (vi->flags & VI_INIT_DONE) {
9734 struct sge_rxq *rxq;
9735 struct sge_txq *txq;
9736 struct sge_wrq *wrq;
9738 for_each_rxq(vi, i, rxq) {
9739 #if defined(INET) || defined(INET6)
9740 rxq->lro.lro_queued = 0;
9741 rxq->lro.lro_flushed = 0;
9744 rxq->vlan_extraction = 0;
9747 for_each_txq(vi, i, txq) {
9750 txq->vlan_insertion = 0;
9754 txq->txpkts0_wrs = 0;
9755 txq->txpkts1_wrs = 0;
9756 txq->txpkts0_pkts = 0;
9757 txq->txpkts1_pkts = 0;
9758 mp_ring_reset_stats(txq->r);
9762 /* nothing to clear for each ofld_rxq */
9764 for_each_ofld_txq(vi, i, wrq) {
9765 wrq->tx_wrs_direct = 0;
9766 wrq->tx_wrs_copied = 0;
9770 if (IS_MAIN_VI(vi)) {
9771 wrq = &sc->sge.ctrlq[pi->port_id];
9772 wrq->tx_wrs_direct = 0;
9773 wrq->tx_wrs_copied = 0;
9779 case CHELSIO_T4_SCHED_CLASS:
9780 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9782 case CHELSIO_T4_SCHED_QUEUE:
9783 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9785 case CHELSIO_T4_GET_TRACER:
9786 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9788 case CHELSIO_T4_SET_TRACER:
9789 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9791 case CHELSIO_T4_LOAD_CFG:
9792 rc = load_cfg(sc, (struct t4_data *)data);
9794 case CHELSIO_T4_LOAD_BOOT:
9795 rc = load_boot(sc, (struct t4_bootrom *)data);
9797 case CHELSIO_T4_LOAD_BOOTCFG:
9798 rc = load_bootcfg(sc, (struct t4_data *)data);
9800 case CHELSIO_T4_CUDBG_DUMP:
9801 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
9803 case CHELSIO_T4_SET_OFLD_POLICY:
9804 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
9814 t4_db_full(struct adapter *sc)
9817 CXGBE_UNIMPLEMENTED(__func__);
9821 t4_db_dropped(struct adapter *sc)
9824 CXGBE_UNIMPLEMENTED(__func__);
9829 toe_capability(struct vi_info *vi, int enable)
9832 struct port_info *pi = vi->pi;
9833 struct adapter *sc = pi->adapter;
9835 ASSERT_SYNCHRONIZED_OP(sc);
9837 if (!is_offload(sc))
9841 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9842 /* TOE is already enabled. */
9847 * We need the port's queues around so that we're able to send
9848 * and receive CPLs to/from the TOE even if the ifnet for this
9849 * port has never been UP'd administratively.
9851 if (!(vi->flags & VI_INIT_DONE)) {
9852 rc = vi_full_init(vi);
9856 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9857 rc = vi_full_init(&pi->vi[0]);
9862 if (isset(&sc->offload_map, pi->port_id)) {
9863 /* TOE is enabled on another VI of this port. */
9868 if (!uld_active(sc, ULD_TOM)) {
9869 rc = t4_activate_uld(sc, ULD_TOM);
9872 "You must kldload t4_tom.ko before trying "
9873 "to enable TOE on a cxgbe interface.\n");
9877 KASSERT(sc->tom_softc != NULL,
9878 ("%s: TOM activated but softc NULL", __func__));
9879 KASSERT(uld_active(sc, ULD_TOM),
9880 ("%s: TOM activated but flag not set", __func__));
9883 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9884 if (!uld_active(sc, ULD_IWARP))
9885 (void) t4_activate_uld(sc, ULD_IWARP);
9886 if (!uld_active(sc, ULD_ISCSI))
9887 (void) t4_activate_uld(sc, ULD_ISCSI);
9890 setbit(&sc->offload_map, pi->port_id);
9894 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9897 KASSERT(uld_active(sc, ULD_TOM),
9898 ("%s: TOM never initialized?", __func__));
9899 clrbit(&sc->offload_map, pi->port_id);
9906 * Add an upper layer driver to the global list.
9909 t4_register_uld(struct uld_info *ui)
9914 sx_xlock(&t4_uld_list_lock);
9915 SLIST_FOREACH(u, &t4_uld_list, link) {
9916 if (u->uld_id == ui->uld_id) {
9922 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9925 sx_xunlock(&t4_uld_list_lock);
9930 t4_unregister_uld(struct uld_info *ui)
9935 sx_xlock(&t4_uld_list_lock);
9937 SLIST_FOREACH(u, &t4_uld_list, link) {
9939 if (ui->refcount > 0) {
9944 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9950 sx_xunlock(&t4_uld_list_lock);
9955 t4_activate_uld(struct adapter *sc, int id)
9958 struct uld_info *ui;
9960 ASSERT_SYNCHRONIZED_OP(sc);
9962 if (id < 0 || id > ULD_MAX)
9964 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
9966 sx_slock(&t4_uld_list_lock);
9968 SLIST_FOREACH(ui, &t4_uld_list, link) {
9969 if (ui->uld_id == id) {
9970 if (!(sc->flags & FULL_INIT_DONE)) {
9971 rc = adapter_full_init(sc);
9976 rc = ui->activate(sc);
9978 setbit(&sc->active_ulds, id);
9985 sx_sunlock(&t4_uld_list_lock);
9991 t4_deactivate_uld(struct adapter *sc, int id)
9994 struct uld_info *ui;
9996 ASSERT_SYNCHRONIZED_OP(sc);
9998 if (id < 0 || id > ULD_MAX)
10002 sx_slock(&t4_uld_list_lock);
10004 SLIST_FOREACH(ui, &t4_uld_list, link) {
10005 if (ui->uld_id == id) {
10006 rc = ui->deactivate(sc);
10008 clrbit(&sc->active_ulds, id);
10015 sx_sunlock(&t4_uld_list_lock);
10021 uld_active(struct adapter *sc, int uld_id)
10024 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
10026 return (isset(&sc->active_ulds, uld_id));
10031 * t = ptr to tunable.
10032 * nc = number of CPUs.
10033 * c = compiled in default for that tunable.
10036 calculate_nqueues(int *t, int nc, const int c)
10042 nq = *t < 0 ? -*t : c;
10047 * Come up with reasonable defaults for some of the tunables, provided they're
10048 * not set by the user (in which case we'll use the values as is).
10051 tweak_tunables(void)
10053 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
10057 t4_ntxq = rss_getnumbuckets();
10059 calculate_nqueues(&t4_ntxq, nc, NTXQ);
10063 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
10067 t4_nrxq = rss_getnumbuckets();
10069 calculate_nqueues(&t4_nrxq, nc, NRXQ);
10073 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
10075 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10076 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
10077 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
10080 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
10081 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
10083 if (t4_toecaps_allowed == -1)
10084 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
10086 if (t4_rdmacaps_allowed == -1) {
10087 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
10088 FW_CAPS_CONFIG_RDMA_RDMAC;
10091 if (t4_iscsicaps_allowed == -1) {
10092 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
10093 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
10094 FW_CAPS_CONFIG_ISCSI_T10DIF;
10097 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
10098 t4_tmr_idx_ofld = TMR_IDX_OFLD;
10100 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
10101 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
10103 if (t4_toecaps_allowed == -1)
10104 t4_toecaps_allowed = 0;
10106 if (t4_rdmacaps_allowed == -1)
10107 t4_rdmacaps_allowed = 0;
10109 if (t4_iscsicaps_allowed == -1)
10110 t4_iscsicaps_allowed = 0;
10114 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
10115 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
10118 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
10119 t4_tmr_idx = TMR_IDX;
10121 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
10122 t4_pktc_idx = PKTC_IDX;
10124 if (t4_qsize_txq < 128)
10125 t4_qsize_txq = 128;
10127 if (t4_qsize_rxq < 128)
10128 t4_qsize_rxq = 128;
10129 while (t4_qsize_rxq & 7)
10132 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
10135 * Number of VIs to create per-port. The first VI is the "main" regular
10136 * VI for the port. The rest are additional virtual interfaces on the
10137 * same physical port. Note that the main VI does not have native
10138 * netmap support but the extra VIs do.
10140 * Limit the number of VIs per port to the number of available
10141 * MAC addresses per port.
10143 if (t4_num_vis < 1)
10145 if (t4_num_vis > nitems(vi_mac_funcs)) {
10146 t4_num_vis = nitems(vi_mac_funcs);
10147 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
10150 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
10151 pcie_relaxed_ordering = 1;
10152 #if defined(__i386__) || defined(__amd64__)
10153 if (cpu_vendor_id == CPU_VENDOR_INTEL)
10154 pcie_relaxed_ordering = 0;
10161 t4_dump_tcb(struct adapter *sc, int tid)
10163 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
10165 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
10166 save = t4_read_reg(sc, reg);
10167 base = sc->memwin[2].mw_base;
10169 /* Dump TCB for the tid */
10170 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
10171 tcb_addr += tid * TCB_SIZE;
10175 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
10177 pf = V_PFNUM(sc->pf);
10178 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
10180 t4_write_reg(sc, reg, win_pos | pf);
10181 t4_read_reg(sc, reg);
10183 off = tcb_addr - win_pos;
10184 for (i = 0; i < 4; i++) {
10186 for (j = 0; j < 8; j++, off += 4)
10187 buf[j] = htonl(t4_read_reg(sc, base + off));
10189 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
10190 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
10194 t4_write_reg(sc, reg, save);
10195 t4_read_reg(sc, reg);
10199 t4_dump_devlog(struct adapter *sc)
10201 struct devlog_params *dparams = &sc->params.devlog;
10202 struct fw_devlog_e e;
10203 int i, first, j, m, nentries, rc;
10204 uint64_t ftstamp = UINT64_MAX;
10206 if (dparams->start == 0) {
10207 db_printf("devlog params not valid\n");
10211 nentries = dparams->size / sizeof(struct fw_devlog_e);
10212 m = fwmtype_to_hwmtype(dparams->memtype);
10214 /* Find the first entry. */
10216 for (i = 0; i < nentries && !db_pager_quit; i++) {
10217 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10218 sizeof(e), (void *)&e);
10222 if (e.timestamp == 0)
10225 e.timestamp = be64toh(e.timestamp);
10226 if (e.timestamp < ftstamp) {
10227 ftstamp = e.timestamp;
10237 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10238 sizeof(e), (void *)&e);
10242 if (e.timestamp == 0)
10245 e.timestamp = be64toh(e.timestamp);
10246 e.seqno = be32toh(e.seqno);
10247 for (j = 0; j < 8; j++)
10248 e.params[j] = be32toh(e.params[j]);
10250 db_printf("%10d %15ju %8s %8s ",
10251 e.seqno, e.timestamp,
10252 (e.level < nitems(devlog_level_strings) ?
10253 devlog_level_strings[e.level] : "UNKNOWN"),
10254 (e.facility < nitems(devlog_facility_strings) ?
10255 devlog_facility_strings[e.facility] : "UNKNOWN"));
10256 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10257 e.params[3], e.params[4], e.params[5], e.params[6],
10260 if (++i == nentries)
10262 } while (i != first && !db_pager_quit);
10265 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10266 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10268 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10275 t = db_read_token();
10277 dev = device_lookup_by_name(db_tok_string);
10282 db_printf("usage: show t4 devlog <nexus>\n");
10287 db_printf("device not found\n");
10291 t4_dump_devlog(device_get_softc(dev));
10294 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10303 t = db_read_token();
10305 dev = device_lookup_by_name(db_tok_string);
10306 t = db_read_token();
10307 if (t == tNUMBER) {
10308 tid = db_tok_number;
10315 db_printf("usage: show t4 tcb <nexus> <tid>\n");
10320 db_printf("device not found\n");
10324 db_printf("invalid tid\n");
10328 t4_dump_tcb(device_get_softc(dev), tid);
10333 * Borrowed from cesa_prep_aes_key().
10335 * NB: The crypto engine wants the words in the decryption key in reverse
10339 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
10341 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
10345 rijndaelKeySetupEnc(ek, enc_key, kbits);
10347 dkey += (kbits / 8) / 4;
10351 for (i = 0; i < 4; i++)
10352 *--dkey = htobe32(ek[4 * 10 + i]);
10355 for (i = 0; i < 2; i++)
10356 *--dkey = htobe32(ek[4 * 11 + 2 + i]);
10357 for (i = 0; i < 4; i++)
10358 *--dkey = htobe32(ek[4 * 12 + i]);
10361 for (i = 0; i < 4; i++)
10362 *--dkey = htobe32(ek[4 * 13 + i]);
10363 for (i = 0; i < 4; i++)
10364 *--dkey = htobe32(ek[4 * 14 + i]);
10367 MPASS(dkey == dec_key);
10370 static struct sx mlu; /* mod load unload */
10371 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10374 mod_event(module_t mod, int cmd, void *arg)
10377 static int loaded = 0;
10382 if (loaded++ == 0) {
10384 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10385 t4_filter_rpl, CPL_COOKIE_FILTER);
10386 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
10387 do_l2t_write_rpl, CPL_COOKIE_FILTER);
10388 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
10389 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
10390 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10391 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
10392 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
10393 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
10394 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10395 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10396 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
10398 sx_init(&t4_list_lock, "T4/T5 adapters");
10399 SLIST_INIT(&t4_list);
10401 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10402 SLIST_INIT(&t4_uld_list);
10404 t4_tracer_modload();
10412 if (--loaded == 0) {
10415 sx_slock(&t4_list_lock);
10416 if (!SLIST_EMPTY(&t4_list)) {
10418 sx_sunlock(&t4_list_lock);
10422 sx_slock(&t4_uld_list_lock);
10423 if (!SLIST_EMPTY(&t4_uld_list)) {
10425 sx_sunlock(&t4_uld_list_lock);
10426 sx_sunlock(&t4_list_lock);
10431 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10432 uprintf("%ju clusters with custom free routine "
10433 "still is use.\n", t4_sge_extfree_refs());
10434 pause("t4unload", 2 * hz);
10437 sx_sunlock(&t4_uld_list_lock);
10439 sx_sunlock(&t4_list_lock);
10441 if (t4_sge_extfree_refs() == 0) {
10442 t4_tracer_modunload();
10444 sx_destroy(&t4_uld_list_lock);
10446 sx_destroy(&t4_list_lock);
10447 t4_sge_modunload();
10451 loaded++; /* undo earlier decrement */
10462 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10463 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10464 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10466 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10467 MODULE_VERSION(t4nex, 1);
10468 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10470 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10471 #endif /* DEV_NETMAP */
10473 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10474 MODULE_VERSION(t5nex, 1);
10475 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10477 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10478 #endif /* DEV_NETMAP */
10480 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10481 MODULE_VERSION(t6nex, 1);
10482 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10484 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10485 #endif /* DEV_NETMAP */
10487 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10488 MODULE_VERSION(cxgbe, 1);
10490 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10491 MODULE_VERSION(cxl, 1);
10493 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10494 MODULE_VERSION(cc, 1);
10496 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10497 MODULE_VERSION(vcxgbe, 1);
10499 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10500 MODULE_VERSION(vcxl, 1);
10502 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10503 MODULE_VERSION(vcc, 1);