2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
36 #include "opt_ratelimit.h"
39 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #if defined(__i386__) || defined(__amd64__)
67 #include <machine/md_var.h>
68 #include <machine/cputypes.h>
72 #include <crypto/rijndael/rijndael.h>
75 #include <ddb/db_lex.h>
78 #include "common/common.h"
79 #include "common/t4_msg.h"
80 #include "common/t4_regs.h"
81 #include "common/t4_regs_values.h"
82 #include "cudbg/cudbg.h"
85 #include "t4_mp_ring.h"
89 /* T4 bus driver interface */
90 static int t4_probe(device_t);
91 static int t4_attach(device_t);
92 static int t4_detach(device_t);
93 static int t4_ready(device_t);
94 static int t4_read_port_device(device_t, int, device_t *);
95 static device_method_t t4_methods[] = {
96 DEVMETHOD(device_probe, t4_probe),
97 DEVMETHOD(device_attach, t4_attach),
98 DEVMETHOD(device_detach, t4_detach),
100 DEVMETHOD(t4_is_main_ready, t4_ready),
101 DEVMETHOD(t4_read_port_device, t4_read_port_device),
105 static driver_t t4_driver = {
108 sizeof(struct adapter)
112 /* T4 port (cxgbe) interface */
113 static int cxgbe_probe(device_t);
114 static int cxgbe_attach(device_t);
115 static int cxgbe_detach(device_t);
116 device_method_t cxgbe_methods[] = {
117 DEVMETHOD(device_probe, cxgbe_probe),
118 DEVMETHOD(device_attach, cxgbe_attach),
119 DEVMETHOD(device_detach, cxgbe_detach),
122 static driver_t cxgbe_driver = {
125 sizeof(struct port_info)
128 /* T4 VI (vcxgbe) interface */
129 static int vcxgbe_probe(device_t);
130 static int vcxgbe_attach(device_t);
131 static int vcxgbe_detach(device_t);
132 static device_method_t vcxgbe_methods[] = {
133 DEVMETHOD(device_probe, vcxgbe_probe),
134 DEVMETHOD(device_attach, vcxgbe_attach),
135 DEVMETHOD(device_detach, vcxgbe_detach),
138 static driver_t vcxgbe_driver = {
141 sizeof(struct vi_info)
144 static d_ioctl_t t4_ioctl;
146 static struct cdevsw t4_cdevsw = {
147 .d_version = D_VERSION,
152 /* T5 bus driver interface */
153 static int t5_probe(device_t);
154 static device_method_t t5_methods[] = {
155 DEVMETHOD(device_probe, t5_probe),
156 DEVMETHOD(device_attach, t4_attach),
157 DEVMETHOD(device_detach, t4_detach),
159 DEVMETHOD(t4_is_main_ready, t4_ready),
160 DEVMETHOD(t4_read_port_device, t4_read_port_device),
164 static driver_t t5_driver = {
167 sizeof(struct adapter)
171 /* T5 port (cxl) interface */
172 static driver_t cxl_driver = {
175 sizeof(struct port_info)
178 /* T5 VI (vcxl) interface */
179 static driver_t vcxl_driver = {
182 sizeof(struct vi_info)
185 /* T6 bus driver interface */
186 static int t6_probe(device_t);
187 static device_method_t t6_methods[] = {
188 DEVMETHOD(device_probe, t6_probe),
189 DEVMETHOD(device_attach, t4_attach),
190 DEVMETHOD(device_detach, t4_detach),
192 DEVMETHOD(t4_is_main_ready, t4_ready),
193 DEVMETHOD(t4_read_port_device, t4_read_port_device),
197 static driver_t t6_driver = {
200 sizeof(struct adapter)
204 /* T6 port (cc) interface */
205 static driver_t cc_driver = {
208 sizeof(struct port_info)
211 /* T6 VI (vcc) interface */
212 static driver_t vcc_driver = {
215 sizeof(struct vi_info)
218 /* ifnet + media interface */
219 static void cxgbe_init(void *);
220 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
221 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
222 static void cxgbe_qflush(struct ifnet *);
223 static int cxgbe_media_change(struct ifnet *);
224 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
226 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
229 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
230 * then ADAPTER_LOCK, then t4_uld_list_lock.
232 static struct sx t4_list_lock;
233 SLIST_HEAD(, adapter) t4_list;
235 static struct sx t4_uld_list_lock;
236 SLIST_HEAD(, uld_info) t4_uld_list;
240 * Tunables. See tweak_tunables() too.
242 * Each tunable is set to a default value here if it's known at compile-time.
243 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
244 * provide a reasonable default (upto n) when the driver is loaded.
246 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
247 * T5 are under hw.cxl.
251 * Number of queues for tx and rx, NIC and offload.
255 TUNABLE_INT("hw.cxgbe.ntxq", &t4_ntxq);
256 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
260 TUNABLE_INT("hw.cxgbe.nrxq", &t4_nrxq);
261 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
264 static int t4_ntxq_vi = -NTXQ_VI;
265 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
268 static int t4_nrxq_vi = -NRXQ_VI;
269 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
271 static int t4_rsrv_noflowq = 0;
272 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
274 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
276 static int t4_nofldtxq = -NOFLDTXQ;
277 TUNABLE_INT("hw.cxgbe.nofldtxq", &t4_nofldtxq);
280 static int t4_nofldrxq = -NOFLDRXQ;
281 TUNABLE_INT("hw.cxgbe.nofldrxq", &t4_nofldrxq);
283 #define NOFLDTXQ_VI 1
284 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
285 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
287 #define NOFLDRXQ_VI 1
288 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
289 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
291 #define TMR_IDX_OFLD 1
292 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
293 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld);
295 #define PKTC_IDX_OFLD (-1)
296 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
297 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld);
299 /* 0 means chip/fw default, non-zero number is value in microseconds */
300 static u_long t4_toe_keepalive_idle = 0;
301 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle);
303 /* 0 means chip/fw default, non-zero number is value in microseconds */
304 static u_long t4_toe_keepalive_interval = 0;
305 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval);
307 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
308 static int t4_toe_keepalive_count = 0;
309 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count);
311 /* 0 means chip/fw default, non-zero number is value in microseconds */
312 static u_long t4_toe_rexmt_min = 0;
313 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min);
315 /* 0 means chip/fw default, non-zero number is value in microseconds */
316 static u_long t4_toe_rexmt_max = 0;
317 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max);
319 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
320 static int t4_toe_rexmt_count = 0;
321 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count);
323 /* -1 means chip/fw default, other values are raw backoff values to use */
324 static int t4_toe_rexmt_backoff[16] = {
325 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]);
328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]);
329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]);
330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]);
331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]);
332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]);
333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]);
334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]);
335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]);
336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]);
337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]);
338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]);
339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]);
340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]);
341 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]);
342 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]);
347 static int t4_nnmtxq_vi = -NNMTXQ_VI;
348 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
351 static int t4_nnmrxq_vi = -NNMRXQ_VI;
352 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
356 * Holdoff parameters for ports.
359 int t4_tmr_idx = TMR_IDX;
360 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx", &t4_tmr_idx);
361 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
363 #define PKTC_IDX (-1)
364 int t4_pktc_idx = PKTC_IDX;
365 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx", &t4_pktc_idx);
366 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
369 * Size (# of entries) of each tx and rx queue.
371 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
372 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
374 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
375 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
378 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
380 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
381 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
384 * Configuration file. All the _CF names here are special.
386 #define DEFAULT_CF "default"
387 #define BUILTIN_CF "built-in"
388 #define FLASH_CF "flash"
389 #define UWIRE_CF "uwire"
390 #define FPGA_CF "fpga"
391 static char t4_cfg_file[32] = DEFAULT_CF;
392 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
395 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
396 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
397 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
398 * mark or when signalled to do so, 0 to never emit PAUSE.
400 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
401 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
404 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
405 * FEC_RESERVED respectively).
406 * -1 to run with the firmware default.
409 static int t4_fec = -1;
410 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
413 * Link autonegotiation.
414 * -1 to run with the firmware default.
418 static int t4_autoneg = -1;
419 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
422 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
423 * encouraged respectively).
425 static unsigned int t4_fw_install = 1;
426 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
429 * ASIC features that will be used. Disable the ones you don't want so that the
430 * chip resources aren't wasted on features that will not be used.
432 static int t4_nbmcaps_allowed = 0;
433 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
435 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
436 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
438 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
439 FW_CAPS_CONFIG_SWITCH_EGRESS;
440 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
442 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
443 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
444 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
446 static int t4_toecaps_allowed = -1;
447 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
449 static int t4_rdmacaps_allowed = -1;
450 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
452 static int t4_cryptocaps_allowed = -1;
453 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
455 static int t4_iscsicaps_allowed = -1;
456 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
458 static int t4_fcoecaps_allowed = 0;
459 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
461 static int t5_write_combine = 0;
462 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
464 static int t4_num_vis = 1;
465 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
467 * PCIe Relaxed Ordering.
468 * -1: driver should figure out a good value.
473 static int pcie_relaxed_ordering = -1;
474 TUNABLE_INT("hw.cxgbe.pcie_relaxed_ordering", &pcie_relaxed_ordering);
476 static int t4_panic_on_fatal_err = 0;
477 TUNABLE_INT("hw.cxgbe.panic_on_fatal_err", &t4_panic_on_fatal_err);
483 static int t4_cop_managed_offloading = 0;
484 TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading);
487 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
488 static int vi_mac_funcs[] = {
492 FW_VI_FUNC_OPENISCSI,
498 struct intrs_and_queues {
499 uint16_t intr_type; /* INTx, MSI, or MSI-X */
500 uint16_t num_vis; /* number of VIs for each port */
501 uint16_t nirq; /* Total # of vectors */
502 uint16_t ntxq; /* # of NIC txq's for each port */
503 uint16_t nrxq; /* # of NIC rxq's for each port */
504 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
505 uint16_t nofldrxq; /* # of TOE rxq's for each port */
507 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
508 uint16_t ntxq_vi; /* # of NIC txq's */
509 uint16_t nrxq_vi; /* # of NIC rxq's */
510 uint16_t nofldtxq_vi; /* # of TOE txq's */
511 uint16_t nofldrxq_vi; /* # of TOE rxq's */
512 uint16_t nnmtxq_vi; /* # of netmap txq's */
513 uint16_t nnmrxq_vi; /* # of netmap rxq's */
516 static void setup_memwin(struct adapter *);
517 static void position_memwin(struct adapter *, int, uint32_t);
518 static int validate_mem_range(struct adapter *, uint32_t, int);
519 static int fwmtype_to_hwmtype(int);
520 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
522 static int fixup_devlog_params(struct adapter *);
523 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
524 static int prep_firmware(struct adapter *);
525 static int partition_resources(struct adapter *, const struct firmware *,
527 static int get_params__pre_init(struct adapter *);
528 static int get_params__post_init(struct adapter *);
529 static int set_params__post_init(struct adapter *);
530 static void t4_set_desc(struct adapter *);
531 static void build_medialist(struct port_info *, struct ifmedia *);
532 static void init_l1cfg(struct port_info *);
533 static int apply_l1cfg(struct port_info *);
534 static int cxgbe_init_synchronized(struct vi_info *);
535 static int cxgbe_uninit_synchronized(struct vi_info *);
536 static void quiesce_txq(struct adapter *, struct sge_txq *);
537 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
538 static void quiesce_iq(struct adapter *, struct sge_iq *);
539 static void quiesce_fl(struct adapter *, struct sge_fl *);
540 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
541 driver_intr_t *, void *, char *);
542 static int t4_free_irq(struct adapter *, struct irq *);
543 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
544 static void vi_refresh_stats(struct adapter *, struct vi_info *);
545 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
546 static void cxgbe_tick(void *);
547 static void cxgbe_sysctls(struct port_info *);
548 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
549 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
550 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
551 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
552 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
553 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
554 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
555 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
556 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
557 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
558 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
559 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
560 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
561 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
562 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
563 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
564 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
565 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
566 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
567 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
568 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
569 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
570 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
571 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
572 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
573 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
574 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
575 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
576 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
577 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
578 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
579 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
580 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
581 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
582 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
583 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
584 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
585 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
586 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
587 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
588 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
589 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
590 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
591 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
593 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
594 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
595 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
596 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
597 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
598 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
599 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
600 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
602 static int get_sge_context(struct adapter *, struct t4_sge_context *);
603 static int load_fw(struct adapter *, struct t4_data *);
604 static int load_cfg(struct adapter *, struct t4_data *);
605 static int load_boot(struct adapter *, struct t4_bootrom *);
606 static int load_bootcfg(struct adapter *, struct t4_data *);
607 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
608 static void free_offload_policy(struct t4_offload_policy *);
609 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
610 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
611 static int read_i2c(struct adapter *, struct t4_i2c_data *);
613 static int toe_capability(struct vi_info *, int);
615 static int mod_event(module_t, int, void *);
616 static int notify_siblings(device_t, int);
622 {0xa000, "Chelsio Terminator 4 FPGA"},
623 {0x4400, "Chelsio T440-dbg"},
624 {0x4401, "Chelsio T420-CR"},
625 {0x4402, "Chelsio T422-CR"},
626 {0x4403, "Chelsio T440-CR"},
627 {0x4404, "Chelsio T420-BCH"},
628 {0x4405, "Chelsio T440-BCH"},
629 {0x4406, "Chelsio T440-CH"},
630 {0x4407, "Chelsio T420-SO"},
631 {0x4408, "Chelsio T420-CX"},
632 {0x4409, "Chelsio T420-BT"},
633 {0x440a, "Chelsio T404-BT"},
634 {0x440e, "Chelsio T440-LP-CR"},
636 {0xb000, "Chelsio Terminator 5 FPGA"},
637 {0x5400, "Chelsio T580-dbg"},
638 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
639 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
640 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
641 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
642 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
643 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
644 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
645 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
646 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
647 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
648 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
649 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
650 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
651 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
652 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
653 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
654 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
656 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
657 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
658 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
659 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
660 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
661 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
662 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
663 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
664 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
665 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
666 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
667 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
668 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
669 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
670 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
671 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
674 {0x6480, "Custom T6225-CR"},
675 {0x6481, "Custom T62100-CR"},
676 {0x6482, "Custom T6225-CR"},
677 {0x6483, "Custom T62100-CR"},
678 {0x6484, "Custom T64100-CR"},
679 {0x6485, "Custom T6240-SO"},
680 {0x6486, "Custom T6225-SO-CR"},
681 {0x6487, "Custom T6225-CR"},
686 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
687 * be exactly the same for both rxq and ofld_rxq.
689 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
690 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
692 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
695 t4_probe(device_t dev)
698 uint16_t v = pci_get_vendor(dev);
699 uint16_t d = pci_get_device(dev);
700 uint8_t f = pci_get_function(dev);
702 if (v != PCI_VENDOR_ID_CHELSIO)
705 /* Attach only to PF0 of the FPGA */
706 if (d == 0xa000 && f != 0)
709 for (i = 0; i < nitems(t4_pciids); i++) {
710 if (d == t4_pciids[i].device) {
711 device_set_desc(dev, t4_pciids[i].desc);
712 return (BUS_PROBE_DEFAULT);
720 t5_probe(device_t dev)
723 uint16_t v = pci_get_vendor(dev);
724 uint16_t d = pci_get_device(dev);
725 uint8_t f = pci_get_function(dev);
727 if (v != PCI_VENDOR_ID_CHELSIO)
730 /* Attach only to PF0 of the FPGA */
731 if (d == 0xb000 && f != 0)
734 for (i = 0; i < nitems(t5_pciids); i++) {
735 if (d == t5_pciids[i].device) {
736 device_set_desc(dev, t5_pciids[i].desc);
737 return (BUS_PROBE_DEFAULT);
745 t6_probe(device_t dev)
748 uint16_t v = pci_get_vendor(dev);
749 uint16_t d = pci_get_device(dev);
751 if (v != PCI_VENDOR_ID_CHELSIO)
754 for (i = 0; i < nitems(t6_pciids); i++) {
755 if (d == t6_pciids[i].device) {
756 device_set_desc(dev, t6_pciids[i].desc);
757 return (BUS_PROBE_DEFAULT);
765 t5_attribute_workaround(device_t dev)
771 * The T5 chips do not properly echo the No Snoop and Relaxed
772 * Ordering attributes when replying to a TLP from a Root
773 * Port. As a workaround, find the parent Root Port and
774 * disable No Snoop and Relaxed Ordering. Note that this
775 * affects all devices under this root port.
777 root_port = pci_find_pcie_root_port(dev);
778 if (root_port == NULL) {
779 device_printf(dev, "Unable to find parent root port\n");
783 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
784 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
785 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
787 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
788 device_get_nameunit(root_port));
791 static const struct devnames devnames[] = {
793 .nexus_name = "t4nex",
794 .ifnet_name = "cxgbe",
795 .vi_ifnet_name = "vcxgbe",
796 .pf03_drv_name = "t4iov",
797 .vf_nexus_name = "t4vf",
798 .vf_ifnet_name = "cxgbev"
800 .nexus_name = "t5nex",
802 .vi_ifnet_name = "vcxl",
803 .pf03_drv_name = "t5iov",
804 .vf_nexus_name = "t5vf",
805 .vf_ifnet_name = "cxlv"
807 .nexus_name = "t6nex",
809 .vi_ifnet_name = "vcc",
810 .pf03_drv_name = "t6iov",
811 .vf_nexus_name = "t6vf",
812 .vf_ifnet_name = "ccv"
817 t4_init_devnames(struct adapter *sc)
822 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
823 sc->names = &devnames[id - CHELSIO_T4];
825 device_printf(sc->dev, "chip id %d is not supported.\n", id);
831 t4_attach(device_t dev)
834 int rc = 0, i, j, rqidx, tqidx, nports;
835 struct make_dev_args mda;
836 struct intrs_and_queues iaq;
839 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
846 int nm_rqidx, nm_tqidx;
850 sc = device_get_softc(dev);
852 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
854 if ((pci_get_device(dev) & 0xff00) == 0x5400)
855 t5_attribute_workaround(dev);
856 pci_enable_busmaster(dev);
857 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
860 pci_set_max_read_req(dev, 4096);
861 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
862 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
863 if (pcie_relaxed_ordering == 0 &&
864 (v | PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
865 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
866 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
867 } else if (pcie_relaxed_ordering == 1 &&
868 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
869 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
870 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
874 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
875 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
877 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
878 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
879 device_get_nameunit(dev));
881 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
882 device_get_nameunit(dev));
883 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
886 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
887 TAILQ_INIT(&sc->sfl);
888 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
890 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
893 rw_init(&sc->policy_lock, "connection offload policy");
895 rc = t4_map_bars_0_and_4(sc);
897 goto done; /* error message displayed already */
899 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
901 /* Prepare the adapter for operation. */
902 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
903 rc = -t4_prep_adapter(sc, buf);
906 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
911 * This is the real PF# to which we're attaching. Works from within PCI
912 * passthrough environments too, where pci_get_function() could return a
913 * different PF# depending on the passthrough configuration. We need to
914 * use the real PF# in all our communication with the firmware.
916 j = t4_read_reg(sc, A_PL_WHOAMI);
917 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
920 t4_init_devnames(sc);
921 if (sc->names == NULL) {
923 goto done; /* error message displayed already */
927 * Do this really early, with the memory windows set up even before the
928 * character device. The userland tool's register i/o and mem read
929 * will work even in "recovery mode".
932 if (t4_init_devlog_params(sc, 0) == 0)
933 fixup_devlog_params(sc);
934 make_dev_args_init(&mda);
935 mda.mda_devsw = &t4_cdevsw;
936 mda.mda_uid = UID_ROOT;
937 mda.mda_gid = GID_WHEEL;
939 mda.mda_si_drv1 = sc;
940 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
942 device_printf(dev, "failed to create nexus char device: %d.\n",
945 /* Go no further if recovery mode has been requested. */
946 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
947 device_printf(dev, "recovery mode.\n");
951 #if defined(__i386__)
952 if ((cpu_feature & CPUID_CX8) == 0) {
953 device_printf(dev, "64 bit atomics not available.\n");
959 /* Prepare the firmware for operation */
960 rc = prep_firmware(sc);
962 goto done; /* error message displayed already */
964 rc = get_params__post_init(sc);
966 goto done; /* error message displayed already */
968 rc = set_params__post_init(sc);
970 goto done; /* error message displayed already */
972 rc = t4_map_bar_2(sc);
974 goto done; /* error message displayed already */
976 rc = t4_create_dma_tag(sc);
978 goto done; /* error message displayed already */
981 * First pass over all the ports - allocate VIs and initialize some
982 * basic parameters like mac address, port type, etc.
984 for_each_port(sc, i) {
985 struct port_info *pi;
987 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
990 /* These must be set before t4_port_init */
994 * XXX: vi[0] is special so we can't delay this allocation until
995 * pi->nvi's final value is known.
997 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1001 * Allocate the "main" VI and initialize parameters
1004 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1006 device_printf(dev, "unable to initialize port %d: %d\n",
1008 free(pi->vi, M_CXGBE);
1014 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1015 device_get_nameunit(dev), i);
1016 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1017 sc->chan_map[pi->tx_chan] = i;
1019 /* All VIs on this port share this media. */
1020 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1021 cxgbe_media_status);
1023 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1024 if (pi->dev == NULL) {
1026 "failed to add device for port %d.\n", i);
1030 pi->vi[0].dev = pi->dev;
1031 device_set_softc(pi->dev, pi);
1035 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1037 nports = sc->params.nports;
1038 rc = cfg_itype_and_nqueues(sc, &iaq);
1040 goto done; /* error message displayed already */
1042 num_vis = iaq.num_vis;
1043 sc->intr_type = iaq.intr_type;
1044 sc->intr_count = iaq.nirq;
1047 s->nrxq = nports * iaq.nrxq;
1048 s->ntxq = nports * iaq.ntxq;
1050 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1051 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1053 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1054 s->neq += nports; /* ctrl queues: 1 per port */
1055 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1056 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1057 if (is_offload(sc) || is_ethoffload(sc)) {
1058 s->nofldtxq = nports * iaq.nofldtxq;
1060 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1061 s->neq += s->nofldtxq;
1063 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1064 M_CXGBE, M_ZERO | M_WAITOK);
1068 if (is_offload(sc)) {
1069 s->nofldrxq = nports * iaq.nofldrxq;
1071 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1072 s->neq += s->nofldrxq; /* free list */
1073 s->niq += s->nofldrxq;
1075 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1076 M_CXGBE, M_ZERO | M_WAITOK);
1081 s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi;
1082 s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi;
1084 s->neq += s->nnmtxq + s->nnmrxq;
1085 s->niq += s->nnmrxq;
1087 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1088 M_CXGBE, M_ZERO | M_WAITOK);
1089 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1090 M_CXGBE, M_ZERO | M_WAITOK);
1093 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1095 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1097 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1099 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1101 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1104 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1107 t4_init_l2t(sc, M_WAITOK);
1108 t4_init_smt(sc, M_WAITOK);
1109 t4_init_tx_sched(sc);
1111 t4_init_etid_table(sc);
1115 * Second pass over the ports. This time we know the number of rx and
1116 * tx queues that each port should get.
1119 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1126 nm_rqidx = nm_tqidx = 0;
1128 for_each_port(sc, i) {
1129 struct port_info *pi = sc->port[i];
1136 for_each_vi(pi, j, vi) {
1138 vi->qsize_rxq = t4_qsize_rxq;
1139 vi->qsize_txq = t4_qsize_txq;
1141 vi->first_rxq = rqidx;
1142 vi->first_txq = tqidx;
1143 vi->tmr_idx = t4_tmr_idx;
1144 vi->pktc_idx = t4_pktc_idx;
1145 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1146 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1151 if (j == 0 && vi->ntxq > 1)
1152 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1154 vi->rsrv_noflowq = 0;
1156 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1157 vi->first_ofld_txq = ofld_tqidx;
1158 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1159 ofld_tqidx += vi->nofldtxq;
1162 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1163 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1164 vi->first_ofld_rxq = ofld_rqidx;
1165 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1167 ofld_rqidx += vi->nofldrxq;
1171 vi->first_nm_rxq = nm_rqidx;
1172 vi->first_nm_txq = nm_tqidx;
1173 vi->nnmrxq = iaq.nnmrxq_vi;
1174 vi->nnmtxq = iaq.nnmtxq_vi;
1175 nm_rqidx += vi->nnmrxq;
1176 nm_tqidx += vi->nnmtxq;
1182 rc = t4_setup_intr_handlers(sc);
1185 "failed to setup interrupt handlers: %d\n", rc);
1189 rc = bus_generic_probe(dev);
1191 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1196 * Ensure thread-safe mailbox access (in debug builds).
1198 * So far this was the only thread accessing the mailbox but various
1199 * ifnets and sysctls are about to be created and their handlers/ioctls
1200 * will access the mailbox from different threads.
1202 sc->flags |= CHK_MBOX_ACCESS;
1204 rc = bus_generic_attach(dev);
1207 "failed to attach all child ports: %d\n", rc);
1212 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1213 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1214 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1215 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1216 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1220 notify_siblings(dev, 0);
1223 if (rc != 0 && sc->cdev) {
1224 /* cdev was created and so cxgbetool works; recover that way. */
1226 "error during attach, adapter is now in recovery mode.\n");
1231 t4_detach_common(dev);
1239 t4_ready(device_t dev)
1243 sc = device_get_softc(dev);
1244 if (sc->flags & FW_OK)
1250 t4_read_port_device(device_t dev, int port, device_t *child)
1253 struct port_info *pi;
1255 sc = device_get_softc(dev);
1256 if (port < 0 || port >= MAX_NPORTS)
1258 pi = sc->port[port];
1259 if (pi == NULL || pi->dev == NULL)
1266 notify_siblings(device_t dev, int detaching)
1272 for (i = 0; i < PCI_FUNCMAX; i++) {
1273 if (i == pci_get_function(dev))
1275 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1276 pci_get_slot(dev), i);
1277 if (sibling == NULL || !device_is_attached(sibling))
1280 error = T4_DETACH_CHILD(sibling);
1282 (void)T4_ATTACH_CHILD(sibling);
1293 t4_detach(device_t dev)
1298 sc = device_get_softc(dev);
1300 rc = notify_siblings(dev, 1);
1303 "failed to detach sibling devices: %d\n", rc);
1307 return (t4_detach_common(dev));
1311 t4_detach_common(device_t dev)
1314 struct port_info *pi;
1317 sc = device_get_softc(dev);
1320 destroy_dev(sc->cdev);
1324 sc->flags &= ~CHK_MBOX_ACCESS;
1325 if (sc->flags & FULL_INIT_DONE) {
1326 if (!(sc->flags & IS_VF))
1327 t4_intr_disable(sc);
1330 if (device_is_attached(dev)) {
1331 rc = bus_generic_detach(dev);
1334 "failed to detach child devices: %d\n", rc);
1339 for (i = 0; i < sc->intr_count; i++)
1340 t4_free_irq(sc, &sc->irq[i]);
1342 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1343 t4_free_tx_sched(sc);
1345 for (i = 0; i < MAX_NPORTS; i++) {
1348 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1350 device_delete_child(dev, pi->dev);
1352 mtx_destroy(&pi->pi_lock);
1353 free(pi->vi, M_CXGBE);
1358 device_delete_children(dev);
1360 if (sc->flags & FULL_INIT_DONE)
1361 adapter_full_uninit(sc);
1363 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1364 t4_fw_bye(sc, sc->mbox);
1366 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1367 pci_release_msi(dev);
1370 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1374 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1378 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1382 t4_free_l2t(sc->l2t);
1384 t4_free_smt(sc->smt);
1386 t4_free_etid_table(sc);
1389 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1390 free(sc->sge.ofld_txq, M_CXGBE);
1393 free(sc->sge.ofld_rxq, M_CXGBE);
1396 free(sc->sge.nm_rxq, M_CXGBE);
1397 free(sc->sge.nm_txq, M_CXGBE);
1399 free(sc->irq, M_CXGBE);
1400 free(sc->sge.rxq, M_CXGBE);
1401 free(sc->sge.txq, M_CXGBE);
1402 free(sc->sge.ctrlq, M_CXGBE);
1403 free(sc->sge.iqmap, M_CXGBE);
1404 free(sc->sge.eqmap, M_CXGBE);
1405 free(sc->tids.ftid_tab, M_CXGBE);
1406 free(sc->tids.hpftid_tab, M_CXGBE);
1407 free_hftid_hash(&sc->tids);
1408 free(sc->tids.atid_tab, M_CXGBE);
1409 free(sc->tids.tid_tab, M_CXGBE);
1410 free(sc->tt.tls_rx_ports, M_CXGBE);
1411 t4_destroy_dma_tag(sc);
1412 if (mtx_initialized(&sc->sc_lock)) {
1413 sx_xlock(&t4_list_lock);
1414 SLIST_REMOVE(&t4_list, sc, adapter, link);
1415 sx_xunlock(&t4_list_lock);
1416 mtx_destroy(&sc->sc_lock);
1419 callout_drain(&sc->sfl_callout);
1420 if (mtx_initialized(&sc->tids.ftid_lock)) {
1421 mtx_destroy(&sc->tids.ftid_lock);
1422 cv_destroy(&sc->tids.ftid_cv);
1424 if (mtx_initialized(&sc->tids.atid_lock))
1425 mtx_destroy(&sc->tids.atid_lock);
1426 if (mtx_initialized(&sc->sfl_lock))
1427 mtx_destroy(&sc->sfl_lock);
1428 if (mtx_initialized(&sc->ifp_lock))
1429 mtx_destroy(&sc->ifp_lock);
1430 if (mtx_initialized(&sc->reg_lock))
1431 mtx_destroy(&sc->reg_lock);
1433 if (rw_initialized(&sc->policy_lock)) {
1434 rw_destroy(&sc->policy_lock);
1436 if (sc->policy != NULL)
1437 free_offload_policy(sc->policy);
1441 for (i = 0; i < NUM_MEMWIN; i++) {
1442 struct memwin *mw = &sc->memwin[i];
1444 if (rw_initialized(&mw->mw_lock))
1445 rw_destroy(&mw->mw_lock);
1448 bzero(sc, sizeof(*sc));
1454 cxgbe_probe(device_t dev)
1457 struct port_info *pi = device_get_softc(dev);
1459 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1460 device_set_desc_copy(dev, buf);
1462 return (BUS_PROBE_DEFAULT);
1465 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1466 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1467 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
1469 #define T4_CAP_ENABLE (T4_CAP)
1472 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1477 vi->xact_addr_filt = -1;
1478 callout_init(&vi->tick, 1);
1480 /* Allocate an ifnet and set it up */
1481 ifp = if_alloc(IFT_ETHER);
1483 device_printf(dev, "Cannot allocate ifnet\n");
1489 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1490 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1492 ifp->if_init = cxgbe_init;
1493 ifp->if_ioctl = cxgbe_ioctl;
1494 ifp->if_transmit = cxgbe_transmit;
1495 ifp->if_qflush = cxgbe_qflush;
1496 ifp->if_get_counter = cxgbe_get_counter;
1498 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1499 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1500 ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1501 ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1504 ifp->if_capabilities = T4_CAP;
1506 if (vi->nofldrxq != 0)
1507 ifp->if_capabilities |= IFCAP_TOE;
1510 if (vi->nnmrxq != 0)
1511 ifp->if_capabilities |= IFCAP_NETMAP;
1514 if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0)
1515 ifp->if_capabilities |= IFCAP_TXRTLMT;
1517 ifp->if_capenable = T4_CAP_ENABLE;
1518 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1519 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1521 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1522 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1523 ifp->if_hw_tsomaxsegsize = 65536;
1525 ether_ifattach(ifp, vi->hw_addr);
1527 if (ifp->if_capabilities & IFCAP_NETMAP)
1528 cxgbe_nm_attach(vi);
1530 sb = sbuf_new_auto();
1531 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1532 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1533 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1535 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1537 case IFCAP_TOE | IFCAP_TXRTLMT:
1538 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1541 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1546 if (ifp->if_capabilities & IFCAP_TOE)
1547 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1550 if (ifp->if_capabilities & IFCAP_NETMAP)
1551 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1552 vi->nnmtxq, vi->nnmrxq);
1555 device_printf(dev, "%s\n", sbuf_data(sb));
1564 cxgbe_attach(device_t dev)
1566 struct port_info *pi = device_get_softc(dev);
1567 struct adapter *sc = pi->adapter;
1571 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1573 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1577 for_each_vi(pi, i, vi) {
1580 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1581 if (vi->dev == NULL) {
1582 device_printf(dev, "failed to add VI %d\n", i);
1585 device_set_softc(vi->dev, vi);
1590 bus_generic_attach(dev);
1596 cxgbe_vi_detach(struct vi_info *vi)
1598 struct ifnet *ifp = vi->ifp;
1600 ether_ifdetach(ifp);
1602 /* Let detach proceed even if these fail. */
1604 if (ifp->if_capabilities & IFCAP_NETMAP)
1605 cxgbe_nm_detach(vi);
1607 cxgbe_uninit_synchronized(vi);
1608 callout_drain(&vi->tick);
1616 cxgbe_detach(device_t dev)
1618 struct port_info *pi = device_get_softc(dev);
1619 struct adapter *sc = pi->adapter;
1622 /* Detach the extra VIs first. */
1623 rc = bus_generic_detach(dev);
1626 device_delete_children(dev);
1628 doom_vi(sc, &pi->vi[0]);
1630 if (pi->flags & HAS_TRACEQ) {
1631 sc->traceq = -1; /* cloner should not create ifnet */
1632 t4_tracer_port_detach(sc);
1635 cxgbe_vi_detach(&pi->vi[0]);
1636 callout_drain(&pi->tick);
1637 ifmedia_removeall(&pi->media);
1639 end_synchronized_op(sc, 0);
1645 cxgbe_init(void *arg)
1647 struct vi_info *vi = arg;
1648 struct adapter *sc = vi->pi->adapter;
1650 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1652 cxgbe_init_synchronized(vi);
1653 end_synchronized_op(sc, 0);
1657 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1659 int rc = 0, mtu, flags;
1660 struct vi_info *vi = ifp->if_softc;
1661 struct port_info *pi = vi->pi;
1662 struct adapter *sc = pi->adapter;
1663 struct ifreq *ifr = (struct ifreq *)data;
1669 if (mtu < ETHERMIN || mtu > MAX_MTU)
1672 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1676 if (vi->flags & VI_INIT_DONE) {
1677 t4_update_fl_bufsize(ifp);
1678 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1679 rc = update_mac_settings(ifp, XGMAC_MTU);
1681 end_synchronized_op(sc, 0);
1685 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1689 if (ifp->if_flags & IFF_UP) {
1690 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1691 flags = vi->if_flags;
1692 if ((ifp->if_flags ^ flags) &
1693 (IFF_PROMISC | IFF_ALLMULTI)) {
1694 rc = update_mac_settings(ifp,
1695 XGMAC_PROMISC | XGMAC_ALLMULTI);
1698 rc = cxgbe_init_synchronized(vi);
1700 vi->if_flags = ifp->if_flags;
1701 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1702 rc = cxgbe_uninit_synchronized(vi);
1704 end_synchronized_op(sc, 0);
1709 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1712 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1713 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1714 end_synchronized_op(sc, 0);
1718 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1722 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1723 if (mask & IFCAP_TXCSUM) {
1724 ifp->if_capenable ^= IFCAP_TXCSUM;
1725 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1727 if (IFCAP_TSO4 & ifp->if_capenable &&
1728 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1729 ifp->if_capenable &= ~IFCAP_TSO4;
1731 "tso4 disabled due to -txcsum.\n");
1734 if (mask & IFCAP_TXCSUM_IPV6) {
1735 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1736 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1738 if (IFCAP_TSO6 & ifp->if_capenable &&
1739 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1740 ifp->if_capenable &= ~IFCAP_TSO6;
1742 "tso6 disabled due to -txcsum6.\n");
1745 if (mask & IFCAP_RXCSUM)
1746 ifp->if_capenable ^= IFCAP_RXCSUM;
1747 if (mask & IFCAP_RXCSUM_IPV6)
1748 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1751 * Note that we leave CSUM_TSO alone (it is always set). The
1752 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1753 * sending a TSO request our way, so it's sufficient to toggle
1756 if (mask & IFCAP_TSO4) {
1757 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1758 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1759 if_printf(ifp, "enable txcsum first.\n");
1763 ifp->if_capenable ^= IFCAP_TSO4;
1765 if (mask & IFCAP_TSO6) {
1766 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1767 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1768 if_printf(ifp, "enable txcsum6 first.\n");
1772 ifp->if_capenable ^= IFCAP_TSO6;
1774 if (mask & IFCAP_LRO) {
1775 #if defined(INET) || defined(INET6)
1777 struct sge_rxq *rxq;
1779 ifp->if_capenable ^= IFCAP_LRO;
1780 for_each_rxq(vi, i, rxq) {
1781 if (ifp->if_capenable & IFCAP_LRO)
1782 rxq->iq.flags |= IQ_LRO_ENABLED;
1784 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1789 if (mask & IFCAP_TOE) {
1790 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1792 rc = toe_capability(vi, enable);
1796 ifp->if_capenable ^= mask;
1799 if (mask & IFCAP_VLAN_HWTAGGING) {
1800 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1801 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1802 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1804 if (mask & IFCAP_VLAN_MTU) {
1805 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1807 /* Need to find out how to disable auto-mtu-inflation */
1809 if (mask & IFCAP_VLAN_HWTSO)
1810 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1811 if (mask & IFCAP_VLAN_HWCSUM)
1812 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1814 if (mask & IFCAP_TXRTLMT)
1815 ifp->if_capenable ^= IFCAP_TXRTLMT;
1817 if (mask & IFCAP_HWRXTSTMP) {
1819 struct sge_rxq *rxq;
1821 ifp->if_capenable ^= IFCAP_HWRXTSTMP;
1822 for_each_rxq(vi, i, rxq) {
1823 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
1824 rxq->iq.flags |= IQ_RX_TIMESTAMP;
1826 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
1830 #ifdef VLAN_CAPABILITIES
1831 VLAN_CAPABILITIES(ifp);
1834 end_synchronized_op(sc, 0);
1840 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1844 struct ifi2creq i2c;
1846 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
1849 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1853 if (i2c.len > sizeof(i2c.data)) {
1857 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1860 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1861 i2c.offset, i2c.len, &i2c.data[0]);
1862 end_synchronized_op(sc, 0);
1864 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
1869 rc = ether_ioctl(ifp, cmd, data);
1876 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1878 struct vi_info *vi = ifp->if_softc;
1879 struct port_info *pi = vi->pi;
1880 struct adapter *sc = pi->adapter;
1881 struct sge_txq *txq;
1886 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1888 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1893 rc = parse_pkt(sc, &m);
1894 if (__predict_false(rc != 0)) {
1895 MPASS(m == NULL); /* was freed already */
1896 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1900 if (m->m_pkthdr.snd_tag != NULL) {
1901 /* EAGAIN tells the stack we are not the correct interface. */
1902 if (__predict_false(ifp != m->m_pkthdr.snd_tag->ifp)) {
1907 return (ethofld_transmit(ifp, m));
1912 txq = &sc->sge.txq[vi->first_txq];
1913 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1914 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1918 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1919 if (__predict_false(rc != 0))
1926 cxgbe_qflush(struct ifnet *ifp)
1928 struct vi_info *vi = ifp->if_softc;
1929 struct sge_txq *txq;
1932 /* queues do not exist if !VI_INIT_DONE. */
1933 if (vi->flags & VI_INIT_DONE) {
1934 for_each_txq(vi, i, txq) {
1936 txq->eq.flags |= EQ_QFLUSH;
1938 while (!mp_ring_is_idle(txq->r)) {
1939 mp_ring_check_drainage(txq->r, 0);
1943 txq->eq.flags &= ~EQ_QFLUSH;
1951 vi_get_counter(struct ifnet *ifp, ift_counter c)
1953 struct vi_info *vi = ifp->if_softc;
1954 struct fw_vi_stats_vf *s = &vi->stats;
1956 vi_refresh_stats(vi->pi->adapter, vi);
1959 case IFCOUNTER_IPACKETS:
1960 return (s->rx_bcast_frames + s->rx_mcast_frames +
1961 s->rx_ucast_frames);
1962 case IFCOUNTER_IERRORS:
1963 return (s->rx_err_frames);
1964 case IFCOUNTER_OPACKETS:
1965 return (s->tx_bcast_frames + s->tx_mcast_frames +
1966 s->tx_ucast_frames + s->tx_offload_frames);
1967 case IFCOUNTER_OERRORS:
1968 return (s->tx_drop_frames);
1969 case IFCOUNTER_IBYTES:
1970 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1972 case IFCOUNTER_OBYTES:
1973 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1974 s->tx_ucast_bytes + s->tx_offload_bytes);
1975 case IFCOUNTER_IMCASTS:
1976 return (s->rx_mcast_frames);
1977 case IFCOUNTER_OMCASTS:
1978 return (s->tx_mcast_frames);
1979 case IFCOUNTER_OQDROPS: {
1983 if (vi->flags & VI_INIT_DONE) {
1985 struct sge_txq *txq;
1987 for_each_txq(vi, i, txq)
1988 drops += counter_u64_fetch(txq->r->drops);
1996 return (if_get_counter_default(ifp, c));
2001 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
2003 struct vi_info *vi = ifp->if_softc;
2004 struct port_info *pi = vi->pi;
2005 struct adapter *sc = pi->adapter;
2006 struct port_stats *s = &pi->stats;
2008 if (pi->nvi > 1 || sc->flags & IS_VF)
2009 return (vi_get_counter(ifp, c));
2011 cxgbe_refresh_stats(sc, pi);
2014 case IFCOUNTER_IPACKETS:
2015 return (s->rx_frames);
2017 case IFCOUNTER_IERRORS:
2018 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2019 s->rx_fcs_err + s->rx_len_err);
2021 case IFCOUNTER_OPACKETS:
2022 return (s->tx_frames);
2024 case IFCOUNTER_OERRORS:
2025 return (s->tx_error_frames);
2027 case IFCOUNTER_IBYTES:
2028 return (s->rx_octets);
2030 case IFCOUNTER_OBYTES:
2031 return (s->tx_octets);
2033 case IFCOUNTER_IMCASTS:
2034 return (s->rx_mcast_frames);
2036 case IFCOUNTER_OMCASTS:
2037 return (s->tx_mcast_frames);
2039 case IFCOUNTER_IQDROPS:
2040 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2041 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2042 s->rx_trunc3 + pi->tnl_cong_drops);
2044 case IFCOUNTER_OQDROPS: {
2048 if (vi->flags & VI_INIT_DONE) {
2050 struct sge_txq *txq;
2052 for_each_txq(vi, i, txq)
2053 drops += counter_u64_fetch(txq->r->drops);
2061 return (if_get_counter_default(ifp, c));
2066 * The kernel picks a media from the list we had provided so we do not have to
2067 * validate the request.
2070 cxgbe_media_change(struct ifnet *ifp)
2072 struct vi_info *vi = ifp->if_softc;
2073 struct port_info *pi = vi->pi;
2074 struct ifmedia *ifm = &pi->media;
2075 struct link_config *lc = &pi->link_cfg;
2076 struct adapter *sc = pi->adapter;
2079 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2083 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2084 MPASS(lc->supported & FW_PORT_CAP_ANEG);
2085 lc->requested_aneg = AUTONEG_ENABLE;
2087 lc->requested_aneg = AUTONEG_DISABLE;
2088 lc->requested_speed =
2089 ifmedia_baudrate(ifm->ifm_media) / 1000000;
2090 lc->requested_fc = 0;
2091 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2092 lc->requested_fc |= PAUSE_RX;
2093 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2094 lc->requested_fc |= PAUSE_TX;
2097 rc = apply_l1cfg(pi);
2099 end_synchronized_op(sc, 0);
2104 * Mbps to FW_PORT_CAP_SPEED_* bit.
2107 speed_to_fwspeed(int speed)
2112 return (FW_PORT_CAP_SPEED_100G);
2114 return (FW_PORT_CAP_SPEED_40G);
2116 return (FW_PORT_CAP_SPEED_25G);
2118 return (FW_PORT_CAP_SPEED_10G);
2120 return (FW_PORT_CAP_SPEED_1G);
2122 return (FW_PORT_CAP_SPEED_100M);
2129 * Base media word (without ETHER, pause, link active, etc.) for the port at the
2133 port_mword(struct port_info *pi, uint16_t speed)
2136 MPASS(speed & M_FW_PORT_CAP_SPEED);
2137 MPASS(powerof2(speed));
2139 switch(pi->port_type) {
2140 case FW_PORT_TYPE_BT_SGMII:
2141 case FW_PORT_TYPE_BT_XFI:
2142 case FW_PORT_TYPE_BT_XAUI:
2145 case FW_PORT_CAP_SPEED_100M:
2147 case FW_PORT_CAP_SPEED_1G:
2148 return (IFM_1000_T);
2149 case FW_PORT_CAP_SPEED_10G:
2153 case FW_PORT_TYPE_KX4:
2154 if (speed == FW_PORT_CAP_SPEED_10G)
2155 return (IFM_10G_KX4);
2157 case FW_PORT_TYPE_CX4:
2158 if (speed == FW_PORT_CAP_SPEED_10G)
2159 return (IFM_10G_CX4);
2161 case FW_PORT_TYPE_KX:
2162 if (speed == FW_PORT_CAP_SPEED_1G)
2163 return (IFM_1000_KX);
2165 case FW_PORT_TYPE_KR:
2166 case FW_PORT_TYPE_BP_AP:
2167 case FW_PORT_TYPE_BP4_AP:
2168 case FW_PORT_TYPE_BP40_BA:
2169 case FW_PORT_TYPE_KR4_100G:
2170 case FW_PORT_TYPE_KR_SFP28:
2171 case FW_PORT_TYPE_KR_XLAUI:
2173 case FW_PORT_CAP_SPEED_1G:
2174 return (IFM_1000_KX);
2175 case FW_PORT_CAP_SPEED_10G:
2176 return (IFM_10G_KR);
2177 case FW_PORT_CAP_SPEED_25G:
2178 return (IFM_25G_KR);
2179 case FW_PORT_CAP_SPEED_40G:
2180 return (IFM_40G_KR4);
2181 case FW_PORT_CAP_SPEED_100G:
2182 return (IFM_100G_KR4);
2185 case FW_PORT_TYPE_FIBER_XFI:
2186 case FW_PORT_TYPE_FIBER_XAUI:
2187 case FW_PORT_TYPE_SFP:
2188 case FW_PORT_TYPE_QSFP_10G:
2189 case FW_PORT_TYPE_QSA:
2190 case FW_PORT_TYPE_QSFP:
2191 case FW_PORT_TYPE_CR4_QSFP:
2192 case FW_PORT_TYPE_CR_QSFP:
2193 case FW_PORT_TYPE_CR2_QSFP:
2194 case FW_PORT_TYPE_SFP28:
2195 /* Pluggable transceiver */
2196 switch (pi->mod_type) {
2197 case FW_PORT_MOD_TYPE_LR:
2199 case FW_PORT_CAP_SPEED_1G:
2200 return (IFM_1000_LX);
2201 case FW_PORT_CAP_SPEED_10G:
2202 return (IFM_10G_LR);
2203 case FW_PORT_CAP_SPEED_25G:
2204 return (IFM_25G_LR);
2205 case FW_PORT_CAP_SPEED_40G:
2206 return (IFM_40G_LR4);
2207 case FW_PORT_CAP_SPEED_100G:
2208 return (IFM_100G_LR4);
2211 case FW_PORT_MOD_TYPE_SR:
2213 case FW_PORT_CAP_SPEED_1G:
2214 return (IFM_1000_SX);
2215 case FW_PORT_CAP_SPEED_10G:
2216 return (IFM_10G_SR);
2217 case FW_PORT_CAP_SPEED_25G:
2218 return (IFM_25G_SR);
2219 case FW_PORT_CAP_SPEED_40G:
2220 return (IFM_40G_SR4);
2221 case FW_PORT_CAP_SPEED_100G:
2222 return (IFM_100G_SR4);
2225 case FW_PORT_MOD_TYPE_ER:
2226 if (speed == FW_PORT_CAP_SPEED_10G)
2227 return (IFM_10G_ER);
2229 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2230 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2232 case FW_PORT_CAP_SPEED_1G:
2233 return (IFM_1000_CX);
2234 case FW_PORT_CAP_SPEED_10G:
2235 return (IFM_10G_TWINAX);
2236 case FW_PORT_CAP_SPEED_25G:
2237 return (IFM_25G_CR);
2238 case FW_PORT_CAP_SPEED_40G:
2239 return (IFM_40G_CR4);
2240 case FW_PORT_CAP_SPEED_100G:
2241 return (IFM_100G_CR4);
2244 case FW_PORT_MOD_TYPE_LRM:
2245 if (speed == FW_PORT_CAP_SPEED_10G)
2246 return (IFM_10G_LRM);
2248 case FW_PORT_MOD_TYPE_NA:
2249 MPASS(0); /* Not pluggable? */
2251 case FW_PORT_MOD_TYPE_ERROR:
2252 case FW_PORT_MOD_TYPE_UNKNOWN:
2253 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2255 case FW_PORT_MOD_TYPE_NONE:
2259 case FW_PORT_TYPE_NONE:
2263 return (IFM_UNKNOWN);
2267 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2269 struct vi_info *vi = ifp->if_softc;
2270 struct port_info *pi = vi->pi;
2271 struct adapter *sc = pi->adapter;
2272 struct link_config *lc = &pi->link_cfg;
2274 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2278 if (pi->up_vis == 0) {
2280 * If all the interfaces are administratively down the firmware
2281 * does not report transceiver changes. Refresh port info here
2282 * so that ifconfig displays accurate ifmedia at all times.
2283 * This is the only reason we have a synchronized op in this
2284 * function. Just PORT_LOCK would have been enough otherwise.
2286 t4_update_port_info(pi);
2287 build_medialist(pi, &pi->media);
2291 ifmr->ifm_status = IFM_AVALID;
2292 if (lc->link_ok == 0)
2294 ifmr->ifm_status |= IFM_ACTIVE;
2297 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2298 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2299 if (lc->fc & PAUSE_RX)
2300 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2301 if (lc->fc & PAUSE_TX)
2302 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2303 ifmr->ifm_active |= port_mword(pi, speed_to_fwspeed(lc->speed));
2306 end_synchronized_op(sc, 0);
2310 vcxgbe_probe(device_t dev)
2313 struct vi_info *vi = device_get_softc(dev);
2315 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2317 device_set_desc_copy(dev, buf);
2319 return (BUS_PROBE_DEFAULT);
2323 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2325 int func, index, rc;
2326 uint32_t param, val;
2328 ASSERT_SYNCHRONIZED_OP(sc);
2330 index = vi - pi->vi;
2331 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2332 KASSERT(index < nitems(vi_mac_funcs),
2333 ("%s: VI %s doesn't have a MAC func", __func__,
2334 device_get_nameunit(vi->dev)));
2335 func = vi_mac_funcs[index];
2336 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2337 vi->hw_addr, &vi->rss_size, func, 0);
2339 device_printf(vi->dev, "failed to allocate virtual interface %d"
2340 "for port %d: %d\n", index, pi->port_id, -rc);
2344 if (chip_id(sc) <= CHELSIO_T5)
2345 vi->smt_idx = (rc & 0x7f) << 1;
2347 vi->smt_idx = (rc & 0x7f);
2349 if (vi->rss_size == 1) {
2351 * This VI didn't get a slice of the RSS table. Reduce the
2352 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2353 * configuration file (nvi, rssnvi for this PF) if this is a
2356 device_printf(vi->dev, "RSS table not available.\n");
2357 vi->rss_base = 0xffff;
2362 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2363 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2364 V_FW_PARAMS_PARAM_YZ(vi->viid);
2365 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2367 vi->rss_base = 0xffff;
2369 MPASS((val >> 16) == vi->rss_size);
2370 vi->rss_base = val & 0xffff;
2377 vcxgbe_attach(device_t dev)
2380 struct port_info *pi;
2384 vi = device_get_softc(dev);
2388 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2391 rc = alloc_extra_vi(sc, pi, vi);
2392 end_synchronized_op(sc, 0);
2396 rc = cxgbe_vi_attach(dev, vi);
2398 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2405 vcxgbe_detach(device_t dev)
2410 vi = device_get_softc(dev);
2411 sc = vi->pi->adapter;
2415 cxgbe_vi_detach(vi);
2416 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2418 end_synchronized_op(sc, 0);
2424 t4_fatal_err(struct adapter *sc)
2426 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2427 t4_intr_disable(sc);
2428 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2429 device_get_nameunit(sc->dev));
2430 if (t4_panic_on_fatal_err)
2431 panic("panic requested on fatal error");
2435 t4_add_adapter(struct adapter *sc)
2437 sx_xlock(&t4_list_lock);
2438 SLIST_INSERT_HEAD(&t4_list, sc, link);
2439 sx_xunlock(&t4_list_lock);
2443 t4_map_bars_0_and_4(struct adapter *sc)
2445 sc->regs_rid = PCIR_BAR(0);
2446 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2447 &sc->regs_rid, RF_ACTIVE);
2448 if (sc->regs_res == NULL) {
2449 device_printf(sc->dev, "cannot map registers.\n");
2452 sc->bt = rman_get_bustag(sc->regs_res);
2453 sc->bh = rman_get_bushandle(sc->regs_res);
2454 sc->mmio_len = rman_get_size(sc->regs_res);
2455 setbit(&sc->doorbells, DOORBELL_KDB);
2457 sc->msix_rid = PCIR_BAR(4);
2458 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2459 &sc->msix_rid, RF_ACTIVE);
2460 if (sc->msix_res == NULL) {
2461 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2469 t4_map_bar_2(struct adapter *sc)
2473 * T4: only iWARP driver uses the userspace doorbells. There is no need
2474 * to map it if RDMA is disabled.
2476 if (is_t4(sc) && sc->rdmacaps == 0)
2479 sc->udbs_rid = PCIR_BAR(2);
2480 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2481 &sc->udbs_rid, RF_ACTIVE);
2482 if (sc->udbs_res == NULL) {
2483 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2486 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2488 if (chip_id(sc) >= CHELSIO_T5) {
2489 setbit(&sc->doorbells, DOORBELL_UDB);
2490 #if defined(__i386__) || defined(__amd64__)
2491 if (t5_write_combine) {
2495 * Enable write combining on BAR2. This is the
2496 * userspace doorbell BAR and is split into 128B
2497 * (UDBS_SEG_SIZE) doorbell regions, each associated
2498 * with an egress queue. The first 64B has the doorbell
2499 * and the second 64B can be used to submit a tx work
2500 * request with an implicit doorbell.
2503 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2504 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2506 clrbit(&sc->doorbells, DOORBELL_UDB);
2507 setbit(&sc->doorbells, DOORBELL_WCWR);
2508 setbit(&sc->doorbells, DOORBELL_UDBWC);
2510 device_printf(sc->dev,
2511 "couldn't enable write combining: %d\n",
2515 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2516 t4_write_reg(sc, A_SGE_STAT_CFG,
2517 V_STATSOURCE_T5(7) | mode);
2521 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2526 struct memwin_init {
2531 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2532 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2533 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2534 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2537 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2538 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2539 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2540 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2544 setup_memwin(struct adapter *sc)
2546 const struct memwin_init *mw_init;
2553 * Read low 32b of bar0 indirectly via the hardware backdoor
2554 * mechanism. Works from within PCI passthrough environments
2555 * too, where rman_get_start() can return a different value. We
2556 * need to program the T4 memory window decoders with the actual
2557 * addresses that will be coming across the PCIe link.
2559 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2560 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2562 mw_init = &t4_memwin[0];
2564 /* T5+ use the relative offset inside the PCIe BAR */
2567 mw_init = &t5_memwin[0];
2570 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2571 rw_init(&mw->mw_lock, "memory window access");
2572 mw->mw_base = mw_init->base;
2573 mw->mw_aperture = mw_init->aperture;
2576 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2577 (mw->mw_base + bar0) | V_BIR(0) |
2578 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2579 rw_wlock(&mw->mw_lock);
2580 position_memwin(sc, i, 0);
2581 rw_wunlock(&mw->mw_lock);
2585 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2589 * Positions the memory window at the given address in the card's address space.
2590 * There are some alignment requirements and the actual position may be at an
2591 * address prior to the requested address. mw->mw_curpos always has the actual
2592 * position of the window.
2595 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2601 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2602 mw = &sc->memwin[idx];
2603 rw_assert(&mw->mw_lock, RA_WLOCKED);
2607 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2609 pf = V_PFNUM(sc->pf);
2610 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2612 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2613 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2614 t4_read_reg(sc, reg); /* flush */
2618 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2624 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2626 /* Memory can only be accessed in naturally aligned 4 byte units */
2627 if (addr & 3 || len & 3 || len <= 0)
2630 mw = &sc->memwin[idx];
2632 rw_rlock(&mw->mw_lock);
2633 mw_end = mw->mw_curpos + mw->mw_aperture;
2634 if (addr >= mw_end || addr < mw->mw_curpos) {
2635 /* Will need to reposition the window */
2636 if (!rw_try_upgrade(&mw->mw_lock)) {
2637 rw_runlock(&mw->mw_lock);
2638 rw_wlock(&mw->mw_lock);
2640 rw_assert(&mw->mw_lock, RA_WLOCKED);
2641 position_memwin(sc, idx, addr);
2642 rw_downgrade(&mw->mw_lock);
2643 mw_end = mw->mw_curpos + mw->mw_aperture;
2645 rw_assert(&mw->mw_lock, RA_RLOCKED);
2646 while (addr < mw_end && len > 0) {
2648 v = t4_read_reg(sc, mw->mw_base + addr -
2650 *val++ = le32toh(v);
2653 t4_write_reg(sc, mw->mw_base + addr -
2654 mw->mw_curpos, htole32(v));
2659 rw_runlock(&mw->mw_lock);
2666 alloc_atid_tab(struct tid_info *t, int flags)
2670 MPASS(t->natids > 0);
2671 MPASS(t->atid_tab == NULL);
2673 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
2675 if (t->atid_tab == NULL)
2677 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
2678 t->afree = t->atid_tab;
2679 t->atids_in_use = 0;
2680 for (i = 1; i < t->natids; i++)
2681 t->atid_tab[i - 1].next = &t->atid_tab[i];
2682 t->atid_tab[t->natids - 1].next = NULL;
2688 free_atid_tab(struct tid_info *t)
2691 KASSERT(t->atids_in_use == 0,
2692 ("%s: %d atids still in use.", __func__, t->atids_in_use));
2694 if (mtx_initialized(&t->atid_lock))
2695 mtx_destroy(&t->atid_lock);
2696 free(t->atid_tab, M_CXGBE);
2701 alloc_atid(struct adapter *sc, void *ctx)
2703 struct tid_info *t = &sc->tids;
2706 mtx_lock(&t->atid_lock);
2708 union aopen_entry *p = t->afree;
2710 atid = p - t->atid_tab;
2711 MPASS(atid <= M_TID_TID);
2716 mtx_unlock(&t->atid_lock);
2721 lookup_atid(struct adapter *sc, int atid)
2723 struct tid_info *t = &sc->tids;
2725 return (t->atid_tab[atid].data);
2729 free_atid(struct adapter *sc, int atid)
2731 struct tid_info *t = &sc->tids;
2732 union aopen_entry *p = &t->atid_tab[atid];
2734 mtx_lock(&t->atid_lock);
2738 mtx_unlock(&t->atid_lock);
2742 queue_tid_release(struct adapter *sc, int tid)
2745 CXGBE_UNIMPLEMENTED("deferred tid release");
2749 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
2752 struct cpl_tid_release *req;
2754 wr = alloc_wrqe(sizeof(*req), ctrlq);
2756 queue_tid_release(sc, tid); /* defer */
2761 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
2767 t4_range_cmp(const void *a, const void *b)
2769 return ((const struct t4_range *)a)->start -
2770 ((const struct t4_range *)b)->start;
2774 * Verify that the memory range specified by the addr/len pair is valid within
2775 * the card's address space.
2778 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2780 struct t4_range mem_ranges[4], *r, *next;
2781 uint32_t em, addr_len;
2782 int i, n, remaining;
2784 /* Memory can only be accessed in naturally aligned 4 byte units */
2785 if (addr & 3 || len & 3 || len <= 0)
2788 /* Enabled memories */
2789 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2793 bzero(r, sizeof(mem_ranges));
2794 if (em & F_EDRAM0_ENABLE) {
2795 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2796 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2798 r->start = G_EDRAM0_BASE(addr_len) << 20;
2799 if (addr >= r->start &&
2800 addr + len <= r->start + r->size)
2806 if (em & F_EDRAM1_ENABLE) {
2807 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2808 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2810 r->start = G_EDRAM1_BASE(addr_len) << 20;
2811 if (addr >= r->start &&
2812 addr + len <= r->start + r->size)
2818 if (em & F_EXT_MEM_ENABLE) {
2819 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2820 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2822 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2823 if (addr >= r->start &&
2824 addr + len <= r->start + r->size)
2830 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2831 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2832 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2834 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2835 if (addr >= r->start &&
2836 addr + len <= r->start + r->size)
2842 MPASS(n <= nitems(mem_ranges));
2845 /* Sort and merge the ranges. */
2846 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2848 /* Start from index 0 and examine the next n - 1 entries. */
2850 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2852 MPASS(r->size > 0); /* r is a valid entry. */
2854 MPASS(next->size > 0); /* and so is the next one. */
2856 while (r->start + r->size >= next->start) {
2857 /* Merge the next one into the current entry. */
2858 r->size = max(r->start + r->size,
2859 next->start + next->size) - r->start;
2860 n--; /* One fewer entry in total. */
2861 if (--remaining == 0)
2862 goto done; /* short circuit */
2865 if (next != r + 1) {
2867 * Some entries were merged into r and next
2868 * points to the first valid entry that couldn't
2871 MPASS(next->size > 0); /* must be valid */
2872 memcpy(r + 1, next, remaining * sizeof(*r));
2875 * This so that the foo->size assertion in the
2876 * next iteration of the loop do the right
2877 * thing for entries that were pulled up and are
2880 MPASS(n < nitems(mem_ranges));
2881 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2882 sizeof(struct t4_range));
2887 /* Done merging the ranges. */
2890 for (i = 0; i < n; i++, r++) {
2891 if (addr >= r->start &&
2892 addr + len <= r->start + r->size)
2901 fwmtype_to_hwmtype(int mtype)
2905 case FW_MEMTYPE_EDC0:
2907 case FW_MEMTYPE_EDC1:
2909 case FW_MEMTYPE_EXTMEM:
2911 case FW_MEMTYPE_EXTMEM1:
2914 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2919 * Verify that the memory range specified by the memtype/offset/len pair is
2920 * valid and lies entirely within the memtype specified. The global address of
2921 * the start of the range is returned in addr.
2924 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2927 uint32_t em, addr_len, maddr;
2929 /* Memory can only be accessed in naturally aligned 4 byte units */
2930 if (off & 3 || len & 3 || len == 0)
2933 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2934 switch (fwmtype_to_hwmtype(mtype)) {
2936 if (!(em & F_EDRAM0_ENABLE))
2938 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2939 maddr = G_EDRAM0_BASE(addr_len) << 20;
2942 if (!(em & F_EDRAM1_ENABLE))
2944 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2945 maddr = G_EDRAM1_BASE(addr_len) << 20;
2948 if (!(em & F_EXT_MEM_ENABLE))
2950 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2951 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2954 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2956 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2957 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2963 *addr = maddr + off; /* global address */
2964 return (validate_mem_range(sc, *addr, len));
2968 fixup_devlog_params(struct adapter *sc)
2970 struct devlog_params *dparams = &sc->params.devlog;
2973 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2974 dparams->size, &dparams->addr);
2980 update_nirq(struct intrs_and_queues *iaq, int nports)
2982 int extra = T4_EXTRA_INTR;
2985 iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
2986 iaq->nirq += nports * (iaq->num_vis - 1) *
2987 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
2988 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
2992 * Adjust requirements to fit the number of interrupts available.
2995 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
2999 const int nports = sc->params.nports;
3004 bzero(iaq, sizeof(*iaq));
3005 iaq->intr_type = itype;
3006 iaq->num_vis = t4_num_vis;
3007 iaq->ntxq = t4_ntxq;
3008 iaq->ntxq_vi = t4_ntxq_vi;
3009 iaq->nrxq = t4_nrxq;
3010 iaq->nrxq_vi = t4_nrxq_vi;
3011 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3012 if (is_offload(sc) || is_ethoffload(sc)) {
3013 iaq->nofldtxq = t4_nofldtxq;
3014 iaq->nofldtxq_vi = t4_nofldtxq_vi;
3018 if (is_offload(sc)) {
3019 iaq->nofldrxq = t4_nofldrxq;
3020 iaq->nofldrxq_vi = t4_nofldrxq_vi;
3024 iaq->nnmtxq_vi = t4_nnmtxq_vi;
3025 iaq->nnmrxq_vi = t4_nnmrxq_vi;
3028 update_nirq(iaq, nports);
3029 if (iaq->nirq <= navail &&
3030 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3032 * This is the normal case -- there are enough interrupts for
3039 * If extra VIs have been configured try reducing their count and see if
3042 while (iaq->num_vis > 1) {
3044 update_nirq(iaq, nports);
3045 if (iaq->nirq <= navail &&
3046 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3047 device_printf(sc->dev, "virtual interfaces per port "
3048 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
3049 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
3050 "itype %d, navail %u, nirq %d.\n",
3051 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3052 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3053 itype, navail, iaq->nirq);
3059 * Extra VIs will not be created. Log a message if they were requested.
3061 MPASS(iaq->num_vis == 1);
3062 iaq->ntxq_vi = iaq->nrxq_vi = 0;
3063 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3064 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3065 if (iaq->num_vis != t4_num_vis) {
3066 device_printf(sc->dev, "extra virtual interfaces disabled. "
3067 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3068 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
3069 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3070 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3074 * Keep reducing the number of NIC rx queues to the next lower power of
3075 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3079 if (iaq->nrxq > 1) {
3082 } while (!powerof2(iaq->nrxq));
3084 if (iaq->nofldrxq > 1)
3085 iaq->nofldrxq >>= 1;
3087 old_nirq = iaq->nirq;
3088 update_nirq(iaq, nports);
3089 if (iaq->nirq <= navail &&
3090 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3091 device_printf(sc->dev, "running with reduced number of "
3092 "rx queues because of shortage of interrupts. "
3093 "nrxq=%u, nofldrxq=%u. "
3094 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3095 iaq->nofldrxq, itype, navail, iaq->nirq);
3098 } while (old_nirq != iaq->nirq);
3100 /* One interrupt for everything. Ugh. */
3101 device_printf(sc->dev, "running with minimal number of queues. "
3102 "itype %d, navail %u.\n", itype, navail);
3104 MPASS(iaq->nrxq == 1);
3106 if (iaq->nofldrxq > 1)
3109 MPASS(iaq->num_vis > 0);
3110 if (iaq->num_vis > 1) {
3111 MPASS(iaq->nrxq_vi > 0);
3112 MPASS(iaq->ntxq_vi > 0);
3114 MPASS(iaq->nirq > 0);
3115 MPASS(iaq->nrxq > 0);
3116 MPASS(iaq->ntxq > 0);
3117 if (itype == INTR_MSI) {
3118 MPASS(powerof2(iaq->nirq));
3123 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3125 int rc, itype, navail, nalloc;
3127 for (itype = INTR_MSIX; itype; itype >>= 1) {
3129 if ((itype & t4_intr_types) == 0)
3130 continue; /* not allowed */
3132 if (itype == INTR_MSIX)
3133 navail = pci_msix_count(sc->dev);
3134 else if (itype == INTR_MSI)
3135 navail = pci_msi_count(sc->dev);
3142 calculate_iaq(sc, iaq, itype, navail);
3145 if (itype == INTR_MSIX)
3146 rc = pci_alloc_msix(sc->dev, &nalloc);
3147 else if (itype == INTR_MSI)
3148 rc = pci_alloc_msi(sc->dev, &nalloc);
3150 if (rc == 0 && nalloc > 0) {
3151 if (nalloc == iaq->nirq)
3155 * Didn't get the number requested. Use whatever number
3156 * the kernel is willing to allocate.
3158 device_printf(sc->dev, "fewer vectors than requested, "
3159 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3160 itype, iaq->nirq, nalloc);
3161 pci_release_msi(sc->dev);
3166 device_printf(sc->dev,
3167 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3168 itype, rc, iaq->nirq, nalloc);
3171 device_printf(sc->dev,
3172 "failed to find a usable interrupt type. "
3173 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3174 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3179 #define FW_VERSION(chip) ( \
3180 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3181 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3182 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3183 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3184 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3190 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
3194 .kld_name = "t4fw_cfg",
3195 .fw_mod_name = "t4fw",
3197 .chip = FW_HDR_CHIP_T4,
3198 .fw_ver = htobe32(FW_VERSION(T4)),
3199 .intfver_nic = FW_INTFVER(T4, NIC),
3200 .intfver_vnic = FW_INTFVER(T4, VNIC),
3201 .intfver_ofld = FW_INTFVER(T4, OFLD),
3202 .intfver_ri = FW_INTFVER(T4, RI),
3203 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3204 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3205 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3206 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3210 .kld_name = "t5fw_cfg",
3211 .fw_mod_name = "t5fw",
3213 .chip = FW_HDR_CHIP_T5,
3214 .fw_ver = htobe32(FW_VERSION(T5)),
3215 .intfver_nic = FW_INTFVER(T5, NIC),
3216 .intfver_vnic = FW_INTFVER(T5, VNIC),
3217 .intfver_ofld = FW_INTFVER(T5, OFLD),
3218 .intfver_ri = FW_INTFVER(T5, RI),
3219 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3220 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3221 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3222 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3226 .kld_name = "t6fw_cfg",
3227 .fw_mod_name = "t6fw",
3229 .chip = FW_HDR_CHIP_T6,
3230 .fw_ver = htobe32(FW_VERSION(T6)),
3231 .intfver_nic = FW_INTFVER(T6, NIC),
3232 .intfver_vnic = FW_INTFVER(T6, VNIC),
3233 .intfver_ofld = FW_INTFVER(T6, OFLD),
3234 .intfver_ri = FW_INTFVER(T6, RI),
3235 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3236 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3237 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3238 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3243 static struct fw_info *
3244 find_fw_info(int chip)
3248 for (i = 0; i < nitems(fw_info); i++) {
3249 if (fw_info[i].chip == chip)
3250 return (&fw_info[i]);
3256 * Is the given firmware API compatible with the one the driver was compiled
3260 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3263 /* short circuit if it's the exact same firmware version */
3264 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3268 * XXX: Is this too conservative? Perhaps I should limit this to the
3269 * features that are supported in the driver.
3271 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3272 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3273 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3274 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3282 * The firmware in the KLD is usable, but should it be installed? This routine
3283 * explains itself in detail if it indicates the KLD firmware should be
3287 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
3291 if (!card_fw_usable) {
3292 reason = "incompatible or unusable";
3297 reason = "older than the version bundled with this driver";
3301 if (t4_fw_install == 2 && k != c) {
3302 reason = "different than the version bundled with this driver";
3309 if (t4_fw_install == 0) {
3310 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3311 "but the driver is prohibited from installing a different "
3312 "firmware on the card.\n",
3313 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3314 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3319 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3320 "installing firmware %u.%u.%u.%u on card.\n",
3321 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3322 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3323 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3324 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3330 * Establish contact with the firmware and determine if we are the master driver
3331 * or not, and whether we are responsible for chip initialization.
3334 prep_firmware(struct adapter *sc)
3336 const struct firmware *fw = NULL, *default_cfg;
3337 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
3338 enum dev_state state;
3339 struct fw_info *fw_info;
3340 struct fw_hdr *card_fw; /* fw on the card */
3341 const struct fw_hdr *kld_fw; /* fw in the KLD */
3342 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
3345 /* This is the firmware whose headers the driver was compiled against */
3346 fw_info = find_fw_info(chip_id(sc));
3347 if (fw_info == NULL) {
3348 device_printf(sc->dev,
3349 "unable to look up firmware information for chip %d.\n",
3353 drv_fw = &fw_info->fw_hdr;
3356 * The firmware KLD contains many modules. The KLD name is also the
3357 * name of the module that contains the default config file.
3359 default_cfg = firmware_get(fw_info->kld_name);
3361 /* This is the firmware in the KLD */
3362 fw = firmware_get(fw_info->fw_mod_name);
3364 kld_fw = (const void *)fw->data;
3365 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3371 /* Read the header of the firmware on the card */
3372 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3373 rc = -t4_read_flash(sc, FLASH_FW_START,
3374 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3376 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3377 if (card_fw->fw_ver == be32toh(0xffffffff)) {
3378 uint32_t d = be32toh(kld_fw->fw_ver);
3380 if (!kld_fw_usable) {
3381 device_printf(sc->dev,
3382 "no firmware on the card and no usable "
3383 "firmware bundled with the driver.\n");
3386 } else if (t4_fw_install == 0) {
3387 device_printf(sc->dev,
3388 "no firmware on the card and the driver "
3389 "is prohibited from installing new "
3395 device_printf(sc->dev, "no firmware on the card, "
3396 "installing firmware %d.%d.%d.%d\n",
3397 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3398 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3399 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize);
3402 device_printf(sc->dev,
3403 "firmware install failed: %d.\n", rc);
3406 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3411 device_printf(sc->dev,
3412 "Unable to read card's firmware header: %d\n", rc);
3416 /* Contact firmware. */
3417 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3418 if (rc < 0 || state == DEV_STATE_ERR) {
3420 device_printf(sc->dev,
3421 "failed to connect to the firmware: %d, %d.\n", rc, state);
3426 sc->flags |= MASTER_PF;
3427 else if (state == DEV_STATE_UNINIT) {
3429 * We didn't get to be the master so we definitely won't be
3430 * configuring the chip. It's a bug if someone else hasn't
3431 * configured it already.
3433 device_printf(sc->dev, "couldn't be master(%d), "
3434 "device not already initialized either(%d).\n", rc, state);
3439 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3440 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3442 * Common case: the firmware on the card is an exact match and
3443 * the KLD is an exact match too, or the KLD is
3444 * absent/incompatible. Note that t4_fw_install = 2 is ignored
3445 * here -- use cxgbetool loadfw if you want to reinstall the
3446 * same firmware as the one on the card.
3448 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3449 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3450 be32toh(card_fw->fw_ver))) {
3452 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3454 device_printf(sc->dev,
3455 "failed to install firmware: %d\n", rc);
3459 /* Installed successfully, update the cached header too. */
3460 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3462 need_fw_reset = 0; /* already reset as part of load_fw */
3465 if (!card_fw_usable) {
3468 d = ntohl(drv_fw->fw_ver);
3469 c = ntohl(card_fw->fw_ver);
3470 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3472 device_printf(sc->dev, "Cannot find a usable firmware: "
3473 "fw_install %d, chip state %d, "
3474 "driver compiled with %d.%d.%d.%d, "
3475 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3476 t4_fw_install, state,
3477 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3478 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3479 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3480 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3481 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3482 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3488 if (need_fw_reset &&
3489 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3490 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3491 if (rc != ETIMEDOUT && rc != EIO)
3492 t4_fw_bye(sc, sc->mbox);
3497 rc = get_params__pre_init(sc);
3499 goto done; /* error message displayed already */
3501 /* Partition adapter resources as specified in the config file. */
3502 if (state == DEV_STATE_UNINIT) {
3504 KASSERT(sc->flags & MASTER_PF,
3505 ("%s: trying to change chip settings when not master.",
3508 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3510 goto done; /* error message displayed already */
3512 t4_tweak_chip_settings(sc);
3514 /* get basic stuff going */
3515 rc = -t4_fw_initialize(sc, sc->mbox);
3517 device_printf(sc->dev, "fw init failed: %d.\n", rc);
3521 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3526 free(card_fw, M_CXGBE);
3528 firmware_put(fw, FIRMWARE_UNLOAD);
3529 if (default_cfg != NULL)
3530 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3535 #define FW_PARAM_DEV(param) \
3536 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3537 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3538 #define FW_PARAM_PFVF(param) \
3539 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3540 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3543 * Partition chip resources for use between various PFs, VFs, etc.
3546 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3547 const char *name_prefix)
3549 const struct firmware *cfg = NULL;
3551 struct fw_caps_config_cmd caps;
3552 uint32_t mtype, moff, finicsum, cfcsum;
3555 * Figure out what configuration file to use. Pick the default config
3556 * file for the card if the user hasn't specified one explicitly.
3558 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3559 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3560 /* Card specific overrides go here. */
3561 if (pci_get_device(sc->dev) == 0x440a)
3562 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3564 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3565 } else if (strncmp(t4_cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0)
3566 goto use_built_in_config; /* go straight to config. */
3569 * We need to load another module if the profile is anything except
3570 * "default" or "flash".
3572 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3573 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3576 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3577 cfg = firmware_get(s);
3579 if (default_cfg != NULL) {
3580 device_printf(sc->dev,
3581 "unable to load module \"%s\" for "
3582 "configuration profile \"%s\", will use "
3583 "the default config file instead.\n",
3585 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3588 device_printf(sc->dev,
3589 "unable to load module \"%s\" for "
3590 "configuration profile \"%s\", will use "
3591 "the config file on the card's flash "
3592 "instead.\n", s, sc->cfg_file);
3593 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3599 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3600 default_cfg == NULL) {
3601 device_printf(sc->dev,
3602 "default config file not available, will use the config "
3603 "file on the card's flash instead.\n");
3604 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3607 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3609 const uint32_t *cfdata;
3610 uint32_t param, val, addr;
3612 KASSERT(cfg != NULL || default_cfg != NULL,
3613 ("%s: no config to upload", __func__));
3616 * Ask the firmware where it wants us to upload the config file.
3618 param = FW_PARAM_DEV(CF);
3619 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3621 /* No support for config file? Shouldn't happen. */
3622 device_printf(sc->dev,
3623 "failed to query config file location: %d.\n", rc);
3626 mtype = G_FW_PARAMS_PARAM_Y(val);
3627 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3630 * XXX: sheer laziness. We deliberately added 4 bytes of
3631 * useless stuffing/comments at the end of the config file so
3632 * it's ok to simply throw away the last remaining bytes when
3633 * the config file is not an exact multiple of 4. This also
3634 * helps with the validate_mt_off_len check.
3637 cflen = cfg->datasize & ~3;
3640 cflen = default_cfg->datasize & ~3;
3641 cfdata = default_cfg->data;
3644 if (cflen > FLASH_CFG_MAX_SIZE) {
3645 device_printf(sc->dev,
3646 "config file too long (%d, max allowed is %d). "
3647 "Will try to use the config on the card, if any.\n",
3648 cflen, FLASH_CFG_MAX_SIZE);
3649 goto use_config_on_flash;
3652 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3654 device_printf(sc->dev,
3655 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
3656 "Will try to use the config on the card, if any.\n",
3657 __func__, mtype, moff, cflen, rc);
3658 goto use_config_on_flash;
3660 write_via_memwin(sc, 2, addr, cfdata, cflen);
3662 use_config_on_flash:
3663 mtype = FW_MEMTYPE_FLASH;
3664 moff = t4_flash_cfg_addr(sc);
3667 bzero(&caps, sizeof(caps));
3668 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3669 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3670 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3671 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3672 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3673 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3675 device_printf(sc->dev,
3676 "failed to pre-process config file: %d "
3677 "(mtype %d, moff 0x%x). Will reset the firmware and retry "
3678 "with the built-in configuration.\n", rc, mtype, moff);
3680 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
3682 device_printf(sc->dev,
3683 "firmware reset failed: %d.\n", rc);
3684 if (rc != ETIMEDOUT && rc != EIO) {
3685 t4_fw_bye(sc, sc->mbox);
3686 sc->flags &= ~FW_OK;
3690 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "built-in");
3691 use_built_in_config:
3692 bzero(&caps, sizeof(caps));
3693 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3694 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3695 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3696 rc = t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3698 device_printf(sc->dev,
3699 "built-in configuration failed: %d.\n", rc);
3704 finicsum = be32toh(caps.finicsum);
3705 cfcsum = be32toh(caps.cfcsum);
3706 if (finicsum != cfcsum) {
3707 device_printf(sc->dev,
3708 "WARNING: config file checksum mismatch: %08x %08x\n",
3711 sc->cfcsum = cfcsum;
3713 #define LIMIT_CAPS(x) do { \
3714 caps.x &= htobe16(t4_##x##_allowed); \
3718 * Let the firmware know what features will (not) be used so it can tune
3719 * things accordingly.
3721 LIMIT_CAPS(nbmcaps);
3722 LIMIT_CAPS(linkcaps);
3723 LIMIT_CAPS(switchcaps);
3724 LIMIT_CAPS(niccaps);
3725 LIMIT_CAPS(toecaps);
3726 LIMIT_CAPS(rdmacaps);
3727 LIMIT_CAPS(cryptocaps);
3728 LIMIT_CAPS(iscsicaps);
3729 LIMIT_CAPS(fcoecaps);
3732 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
3734 * TOE and hashfilters are mutually exclusive. It is a config
3735 * file or firmware bug if both are reported as available. Try
3736 * to cope with the situation in non-debug builds by disabling
3739 MPASS(caps.toecaps == 0);
3746 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3747 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3748 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3749 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3751 device_printf(sc->dev,
3752 "failed to process config file: %d.\n", rc);
3756 firmware_put(cfg, FIRMWARE_UNLOAD);
3761 * Retrieve parameters that are needed (or nice to have) very early.
3764 get_params__pre_init(struct adapter *sc)
3767 uint32_t param[2], val[2];
3769 t4_get_version_info(sc);
3771 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3772 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3773 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3774 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3775 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3777 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3778 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3779 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3780 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3781 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3783 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3784 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3785 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3786 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3787 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3789 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3790 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3791 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3792 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3793 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3795 param[0] = FW_PARAM_DEV(PORTVEC);
3796 param[1] = FW_PARAM_DEV(CCLK);
3797 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3799 device_printf(sc->dev,
3800 "failed to query parameters (pre_init): %d.\n", rc);
3804 sc->params.portvec = val[0];
3805 sc->params.nports = bitcount32(val[0]);
3806 sc->params.vpd.cclk = val[1];
3808 /* Read device log parameters. */
3809 rc = -t4_init_devlog_params(sc, 1);
3811 fixup_devlog_params(sc);
3813 device_printf(sc->dev,
3814 "failed to get devlog parameters: %d.\n", rc);
3815 rc = 0; /* devlog isn't critical for device operation */
3822 * Retrieve various parameters that are of interest to the driver. The device
3823 * has been initialized by the firmware at this point.
3826 get_params__post_init(struct adapter *sc)
3829 uint32_t param[7], val[7];
3830 struct fw_caps_config_cmd caps;
3832 param[0] = FW_PARAM_PFVF(IQFLINT_START);
3833 param[1] = FW_PARAM_PFVF(EQ_START);
3834 param[2] = FW_PARAM_PFVF(FILTER_START);
3835 param[3] = FW_PARAM_PFVF(FILTER_END);
3836 param[4] = FW_PARAM_PFVF(L2T_START);
3837 param[5] = FW_PARAM_PFVF(L2T_END);
3838 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3839 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3840 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
3841 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
3843 device_printf(sc->dev,
3844 "failed to query parameters (post_init): %d.\n", rc);
3848 sc->sge.iq_start = val[0];
3849 sc->sge.eq_start = val[1];
3850 if (val[3] > val[2]) {
3851 sc->tids.ftid_base = val[2];
3852 sc->tids.ftid_end = val[3];
3853 sc->tids.nftids = val[3] - val[2] + 1;
3855 sc->vres.l2t.start = val[4];
3856 sc->vres.l2t.size = val[5] - val[4] + 1;
3857 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3858 ("%s: L2 table size (%u) larger than expected (%u)",
3859 __func__, sc->vres.l2t.size, L2T_SIZE));
3860 sc->params.core_vdd = val[6];
3862 if (chip_id(sc) >= CHELSIO_T6) {
3865 if (sc->params.fw_vers >=
3866 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
3867 V_FW_HDR_FW_VER_MICRO(1) | V_FW_HDR_FW_VER_BUILD(0))) {
3869 * Note that the code to enable the region should run
3870 * before t4_fw_initialize and not here. This is just a
3871 * reminder to add said code.
3873 device_printf(sc->dev,
3874 "hpfilter region not enabled.\n");
3878 sc->tids.tid_base = t4_read_reg(sc,
3879 A_LE_DB_ACTIVE_TABLE_START_INDEX);
3881 param[0] = FW_PARAM_PFVF(HPFILTER_START);
3882 param[1] = FW_PARAM_PFVF(HPFILTER_END);
3883 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3885 device_printf(sc->dev,
3886 "failed to query hpfilter parameters: %d.\n", rc);
3889 if ((int)val[1] > (int)val[0]) {
3890 sc->tids.hpftid_base = val[0];
3891 sc->tids.hpftid_end = val[1];
3892 sc->tids.nhpftids = val[1] - val[0] + 1;
3895 * These should go off if the layout changes and the
3896 * driver needs to catch up.
3898 MPASS(sc->tids.hpftid_base == 0);
3899 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
3904 * MPSBGMAP is queried separately because only recent firmwares support
3905 * it as a parameter and we don't want the compound query above to fail
3906 * on older firmwares.
3908 param[0] = FW_PARAM_DEV(MPSBGMAP);
3910 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3912 sc->params.mps_bg_map = val[0];
3914 sc->params.mps_bg_map = 0;
3917 * Determine whether the firmware supports the filter2 work request.
3918 * This is queried separately for the same reason as MPSBGMAP above.
3920 param[0] = FW_PARAM_DEV(FILTER2_WR);
3922 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3924 sc->params.filter2_wr_support = val[0] != 0;
3926 sc->params.filter2_wr_support = 0;
3928 /* get capabilites */
3929 bzero(&caps, sizeof(caps));
3930 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3931 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3932 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3933 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3935 device_printf(sc->dev,
3936 "failed to get card capabilities: %d.\n", rc);
3940 #define READ_CAPS(x) do { \
3941 sc->x = htobe16(caps.x); \
3944 READ_CAPS(linkcaps);
3945 READ_CAPS(switchcaps);
3948 READ_CAPS(rdmacaps);
3949 READ_CAPS(cryptocaps);
3950 READ_CAPS(iscsicaps);
3951 READ_CAPS(fcoecaps);
3953 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
3954 MPASS(chip_id(sc) > CHELSIO_T4);
3955 MPASS(sc->toecaps == 0);
3958 param[0] = FW_PARAM_DEV(NTID);
3959 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3961 device_printf(sc->dev,
3962 "failed to query HASHFILTER parameters: %d.\n", rc);
3965 sc->tids.ntids = val[0];
3966 if (sc->params.fw_vers <
3967 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
3968 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
3969 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
3970 sc->tids.ntids -= sc->tids.nhpftids;
3972 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3973 sc->params.hash_filter = 1;
3975 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3976 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3977 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3978 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3979 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3981 device_printf(sc->dev,
3982 "failed to query NIC parameters: %d.\n", rc);
3985 if (val[1] > val[0]) {
3986 sc->tids.etid_base = val[0];
3987 sc->tids.etid_end = val[1];
3988 sc->tids.netids = val[1] - val[0] + 1;
3989 sc->params.eo_wr_cred = val[2];
3990 sc->params.ethoffload = 1;
3994 /* query offload-related parameters */
3995 param[0] = FW_PARAM_DEV(NTID);
3996 param[1] = FW_PARAM_PFVF(SERVER_START);
3997 param[2] = FW_PARAM_PFVF(SERVER_END);
3998 param[3] = FW_PARAM_PFVF(TDDP_START);
3999 param[4] = FW_PARAM_PFVF(TDDP_END);
4000 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4001 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4003 device_printf(sc->dev,
4004 "failed to query TOE parameters: %d.\n", rc);
4007 sc->tids.ntids = val[0];
4008 if (sc->params.fw_vers <
4009 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
4010 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
4011 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4012 sc->tids.ntids -= sc->tids.nhpftids;
4014 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4015 if (val[2] > val[1]) {
4016 sc->tids.stid_base = val[1];
4017 sc->tids.nstids = val[2] - val[1] + 1;
4019 sc->vres.ddp.start = val[3];
4020 sc->vres.ddp.size = val[4] - val[3] + 1;
4021 sc->params.ofldq_wr_cred = val[5];
4022 sc->params.offload = 1;
4025 * The firmware attempts memfree TOE configuration for -SO cards
4026 * and will report toecaps=0 if it runs out of resources (this
4027 * depends on the config file). It may not report 0 for other
4028 * capabilities dependent on the TOE in this case. Set them to
4029 * 0 here so that the driver doesn't bother tracking resources
4030 * that will never be used.
4036 param[0] = FW_PARAM_PFVF(STAG_START);
4037 param[1] = FW_PARAM_PFVF(STAG_END);
4038 param[2] = FW_PARAM_PFVF(RQ_START);
4039 param[3] = FW_PARAM_PFVF(RQ_END);
4040 param[4] = FW_PARAM_PFVF(PBL_START);
4041 param[5] = FW_PARAM_PFVF(PBL_END);
4042 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4044 device_printf(sc->dev,
4045 "failed to query RDMA parameters(1): %d.\n", rc);
4048 sc->vres.stag.start = val[0];
4049 sc->vres.stag.size = val[1] - val[0] + 1;
4050 sc->vres.rq.start = val[2];
4051 sc->vres.rq.size = val[3] - val[2] + 1;
4052 sc->vres.pbl.start = val[4];
4053 sc->vres.pbl.size = val[5] - val[4] + 1;
4055 param[0] = FW_PARAM_PFVF(SQRQ_START);
4056 param[1] = FW_PARAM_PFVF(SQRQ_END);
4057 param[2] = FW_PARAM_PFVF(CQ_START);
4058 param[3] = FW_PARAM_PFVF(CQ_END);
4059 param[4] = FW_PARAM_PFVF(OCQ_START);
4060 param[5] = FW_PARAM_PFVF(OCQ_END);
4061 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4063 device_printf(sc->dev,
4064 "failed to query RDMA parameters(2): %d.\n", rc);
4067 sc->vres.qp.start = val[0];
4068 sc->vres.qp.size = val[1] - val[0] + 1;
4069 sc->vres.cq.start = val[2];
4070 sc->vres.cq.size = val[3] - val[2] + 1;
4071 sc->vres.ocq.start = val[4];
4072 sc->vres.ocq.size = val[5] - val[4] + 1;
4074 param[0] = FW_PARAM_PFVF(SRQ_START);
4075 param[1] = FW_PARAM_PFVF(SRQ_END);
4076 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4077 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4078 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4080 device_printf(sc->dev,
4081 "failed to query RDMA parameters(3): %d.\n", rc);
4084 sc->vres.srq.start = val[0];
4085 sc->vres.srq.size = val[1] - val[0] + 1;
4086 sc->params.max_ordird_qp = val[2];
4087 sc->params.max_ird_adapter = val[3];
4089 if (sc->iscsicaps) {
4090 param[0] = FW_PARAM_PFVF(ISCSI_START);
4091 param[1] = FW_PARAM_PFVF(ISCSI_END);
4092 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4094 device_printf(sc->dev,
4095 "failed to query iSCSI parameters: %d.\n", rc);
4098 sc->vres.iscsi.start = val[0];
4099 sc->vres.iscsi.size = val[1] - val[0] + 1;
4101 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4102 param[0] = FW_PARAM_PFVF(TLS_START);
4103 param[1] = FW_PARAM_PFVF(TLS_END);
4104 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4106 device_printf(sc->dev,
4107 "failed to query TLS parameters: %d.\n", rc);
4110 sc->vres.key.start = val[0];
4111 sc->vres.key.size = val[1] - val[0] + 1;
4114 t4_init_sge_params(sc);
4117 * We've got the params we wanted to query via the firmware. Now grab
4118 * some others directly from the chip.
4120 rc = t4_read_chip_settings(sc);
4126 set_params__post_init(struct adapter *sc)
4128 uint32_t param, val;
4133 /* ask for encapsulated CPLs */
4134 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4136 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4140 * Override the TOE timers with user provided tunables. This is not the
4141 * recommended way to change the timers (the firmware config file is) so
4142 * these tunables are not documented.
4144 * All the timer tunables are in microseconds.
4146 if (t4_toe_keepalive_idle != 0) {
4147 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4148 v &= M_KEEPALIVEIDLE;
4149 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4150 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4152 if (t4_toe_keepalive_interval != 0) {
4153 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4154 v &= M_KEEPALIVEINTVL;
4155 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4156 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4158 if (t4_toe_keepalive_count != 0) {
4159 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4160 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4161 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4162 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4163 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4165 if (t4_toe_rexmt_min != 0) {
4166 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4168 t4_set_reg_field(sc, A_TP_RXT_MIN,
4169 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4171 if (t4_toe_rexmt_max != 0) {
4172 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4174 t4_set_reg_field(sc, A_TP_RXT_MAX,
4175 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4177 if (t4_toe_rexmt_count != 0) {
4178 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4179 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4180 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4181 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4182 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4184 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4185 if (t4_toe_rexmt_backoff[i] != -1) {
4186 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4187 shift = (i & 3) << 3;
4188 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4189 M_TIMERBACKOFFINDEX0 << shift, v << shift);
4196 #undef FW_PARAM_PFVF
4200 t4_set_desc(struct adapter *sc)
4203 struct adapter_params *p = &sc->params;
4205 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4207 device_set_desc_copy(sc->dev, buf);
4211 ifmedia_add4(struct ifmedia *ifm, int m)
4214 ifmedia_add(ifm, m, 0, NULL);
4215 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4216 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4217 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4221 set_current_media(struct port_info *pi, struct ifmedia *ifm)
4223 struct link_config *lc;
4226 PORT_LOCK_ASSERT_OWNED(pi);
4228 /* Leave current media alone if it's already set to IFM_NONE. */
4229 if (ifm->ifm_cur != NULL &&
4230 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4234 if (lc->requested_aneg == AUTONEG_ENABLE &&
4235 lc->supported & FW_PORT_CAP_ANEG) {
4236 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4239 mword = IFM_ETHER | IFM_FDX;
4240 if (lc->requested_fc & PAUSE_TX)
4241 mword |= IFM_ETH_TXPAUSE;
4242 if (lc->requested_fc & PAUSE_RX)
4243 mword |= IFM_ETH_RXPAUSE;
4244 mword |= port_mword(pi, speed_to_fwspeed(lc->requested_speed));
4245 ifmedia_set(ifm, mword);
4249 build_medialist(struct port_info *pi, struct ifmedia *ifm)
4252 int unknown, mword, bit;
4253 struct link_config *lc;
4255 PORT_LOCK_ASSERT_OWNED(pi);
4257 if (pi->flags & FIXED_IFMEDIA)
4261 * First setup all the requested_ fields so that they comply with what's
4262 * supported by the port + transceiver. Note that this clobbers any
4263 * user preferences set via sysctl_pause_settings or sysctl_autoneg.
4268 * Now (re)build the ifmedia list.
4270 ifmedia_removeall(ifm);
4272 ss = G_FW_PORT_CAP_SPEED(lc->supported); /* Supported Speeds */
4273 if (__predict_false(ss == 0)) { /* not supposed to happen. */
4276 MPASS(LIST_EMPTY(&ifm->ifm_list));
4277 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4278 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4283 for (bit = 0; bit < fls(ss); bit++) {
4285 MPASS(speed & M_FW_PORT_CAP_SPEED);
4287 mword = port_mword(pi, speed);
4288 if (mword == IFM_NONE) {
4290 } else if (mword == IFM_UNKNOWN)
4293 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4296 if (unknown > 0) /* Add one unknown for all unknown media types. */
4297 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4298 if (lc->supported & FW_PORT_CAP_ANEG)
4299 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4301 set_current_media(pi, ifm);
4305 * Update all the requested_* fields in the link config to something valid (and
4309 init_l1cfg(struct port_info *pi)
4311 struct link_config *lc = &pi->link_cfg;
4313 PORT_LOCK_ASSERT_OWNED(pi);
4316 lc->requested_speed = port_top_speed(pi) * 1000;
4318 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) {
4319 lc->requested_aneg = AUTONEG_ENABLE;
4321 lc->requested_aneg = AUTONEG_DISABLE;
4324 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX);
4327 if (t4_fec & FEC_RS && lc->supported & FW_PORT_CAP_FEC_RS) {
4328 lc->requested_fec = FEC_RS;
4329 } else if (t4_fec & FEC_BASER_RS &&
4330 lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4331 lc->requested_fec = FEC_BASER_RS;
4333 lc->requested_fec = 0;
4336 /* Use the suggested value provided by the firmware in acaps */
4337 if (lc->advertising & FW_PORT_CAP_FEC_RS &&
4338 lc->supported & FW_PORT_CAP_FEC_RS) {
4339 lc->requested_fec = FEC_RS;
4340 } else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS &&
4341 lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4342 lc->requested_fec = FEC_BASER_RS;
4344 lc->requested_fec = 0;
4350 * Apply the settings in requested_* to the hardware. The parameters are
4351 * expected to be sane.
4354 apply_l1cfg(struct port_info *pi)
4356 struct adapter *sc = pi->adapter;
4357 struct link_config *lc = &pi->link_cfg;
4362 ASSERT_SYNCHRONIZED_OP(sc);
4363 PORT_LOCK_ASSERT_OWNED(pi);
4365 if (lc->requested_aneg == AUTONEG_ENABLE)
4366 MPASS(lc->supported & FW_PORT_CAP_ANEG);
4367 if (lc->requested_fc & PAUSE_TX)
4368 MPASS(lc->supported & FW_PORT_CAP_FC_TX);
4369 if (lc->requested_fc & PAUSE_RX)
4370 MPASS(lc->supported & FW_PORT_CAP_FC_RX);
4371 if (lc->requested_fec == FEC_RS)
4372 MPASS(lc->supported & FW_PORT_CAP_FEC_RS);
4373 if (lc->requested_fec == FEC_BASER_RS)
4374 MPASS(lc->supported & FW_PORT_CAP_FEC_BASER_RS);
4375 fwspeed = speed_to_fwspeed(lc->requested_speed);
4376 MPASS(fwspeed != 0);
4377 MPASS(lc->supported & fwspeed);
4379 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4381 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4383 lc->fc = lc->requested_fc;
4384 lc->fec = lc->requested_fec;
4389 #define FW_MAC_EXACT_CHUNK 7
4392 * Program the port's XGMAC based on parameters in ifnet. The caller also
4393 * indicates which parameters should be programmed (the rest are left alone).
4396 update_mac_settings(struct ifnet *ifp, int flags)
4399 struct vi_info *vi = ifp->if_softc;
4400 struct port_info *pi = vi->pi;
4401 struct adapter *sc = pi->adapter;
4402 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4404 ASSERT_SYNCHRONIZED_OP(sc);
4405 KASSERT(flags, ("%s: not told what to update.", __func__));
4407 if (flags & XGMAC_MTU)
4410 if (flags & XGMAC_PROMISC)
4411 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4413 if (flags & XGMAC_ALLMULTI)
4414 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4416 if (flags & XGMAC_VLANEX)
4417 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4419 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4420 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4421 allmulti, 1, vlanex, false);
4423 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4429 if (flags & XGMAC_UCADDR) {
4430 uint8_t ucaddr[ETHER_ADDR_LEN];
4432 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4433 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4434 ucaddr, true, true);
4437 if_printf(ifp, "change_mac failed: %d\n", rc);
4440 vi->xact_addr_filt = rc;
4445 if (flags & XGMAC_MCADDRS) {
4446 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4449 struct ifmultiaddr *ifma;
4452 if_maddr_rlock(ifp);
4453 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4454 if (ifma->ifma_addr->sa_family != AF_LINK)
4457 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4458 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4461 if (i == FW_MAC_EXACT_CHUNK) {
4462 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4463 del, i, mcaddr, NULL, &hash, 0);
4466 for (j = 0; j < i; j++) {
4468 "failed to add mc address"
4470 "%02x:%02x:%02x rc=%d\n",
4471 mcaddr[j][0], mcaddr[j][1],
4472 mcaddr[j][2], mcaddr[j][3],
4473 mcaddr[j][4], mcaddr[j][5],
4483 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4484 mcaddr, NULL, &hash, 0);
4487 for (j = 0; j < i; j++) {
4489 "failed to add mc address"
4491 "%02x:%02x:%02x rc=%d\n",
4492 mcaddr[j][0], mcaddr[j][1],
4493 mcaddr[j][2], mcaddr[j][3],
4494 mcaddr[j][4], mcaddr[j][5],
4501 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4503 if_printf(ifp, "failed to set mc address hash: %d", rc);
4505 if_maddr_runlock(ifp);
4512 * {begin|end}_synchronized_op must be called from the same thread.
4515 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4521 /* the caller thinks it's ok to sleep, but is it really? */
4522 if (flags & SLEEP_OK)
4523 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4524 "begin_synchronized_op");
4535 if (vi && IS_DOOMED(vi)) {
4545 if (!(flags & SLEEP_OK)) {
4550 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4556 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4559 sc->last_op = wmesg;
4560 sc->last_op_thr = curthread;
4561 sc->last_op_flags = flags;
4565 if (!(flags & HOLD_LOCK) || rc)
4572 * Tell if_ioctl and if_init that the VI is going away. This is
4573 * special variant of begin_synchronized_op and must be paired with a
4574 * call to end_synchronized_op.
4577 doom_vi(struct adapter *sc, struct vi_info *vi)
4584 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4587 sc->last_op = "t4detach";
4588 sc->last_op_thr = curthread;
4589 sc->last_op_flags = 0;
4595 * {begin|end}_synchronized_op must be called from the same thread.
4598 end_synchronized_op(struct adapter *sc, int flags)
4601 if (flags & LOCK_HELD)
4602 ADAPTER_LOCK_ASSERT_OWNED(sc);
4606 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
4613 cxgbe_init_synchronized(struct vi_info *vi)
4615 struct port_info *pi = vi->pi;
4616 struct adapter *sc = pi->adapter;
4617 struct ifnet *ifp = vi->ifp;
4619 struct sge_txq *txq;
4621 ASSERT_SYNCHRONIZED_OP(sc);
4623 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4624 return (0); /* already running */
4626 if (!(sc->flags & FULL_INIT_DONE) &&
4627 ((rc = adapter_full_init(sc)) != 0))
4628 return (rc); /* error message displayed already */
4630 if (!(vi->flags & VI_INIT_DONE) &&
4631 ((rc = vi_full_init(vi)) != 0))
4632 return (rc); /* error message displayed already */
4634 rc = update_mac_settings(ifp, XGMAC_ALL);
4636 goto done; /* error message displayed already */
4638 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4640 if_printf(ifp, "enable_vi failed: %d\n", rc);
4645 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
4649 for_each_txq(vi, i, txq) {
4651 txq->eq.flags |= EQ_ENABLED;
4656 * The first iq of the first port to come up is used for tracing.
4658 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4659 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4660 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
4661 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4662 V_QUEUENUMBER(sc->traceq));
4663 pi->flags |= HAS_TRACEQ;
4668 if (pi->up_vis++ == 0) {
4669 t4_update_port_info(pi);
4670 build_medialist(pi, &pi->media);
4673 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4675 if (pi->nvi > 1 || sc->flags & IS_VF)
4676 callout_reset(&vi->tick, hz, vi_tick, vi);
4678 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4682 cxgbe_uninit_synchronized(vi);
4691 cxgbe_uninit_synchronized(struct vi_info *vi)
4693 struct port_info *pi = vi->pi;
4694 struct adapter *sc = pi->adapter;
4695 struct ifnet *ifp = vi->ifp;
4697 struct sge_txq *txq;
4699 ASSERT_SYNCHRONIZED_OP(sc);
4701 if (!(vi->flags & VI_INIT_DONE)) {
4702 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4703 KASSERT(0, ("uninited VI is running"));
4704 if_printf(ifp, "uninited VI with running ifnet. "
4705 "vi->flags 0x%016lx, if_flags 0x%08x, "
4706 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
4713 * Disable the VI so that all its data in either direction is discarded
4714 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
4715 * tick) intact as the TP can deliver negative advice or data that it's
4716 * holding in its RAM (for an offloaded connection) even after the VI is
4719 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4721 if_printf(ifp, "disable_vi failed: %d\n", rc);
4725 for_each_txq(vi, i, txq) {
4727 txq->eq.flags &= ~EQ_ENABLED;
4732 if (pi->nvi > 1 || sc->flags & IS_VF)
4733 callout_stop(&vi->tick);
4735 callout_stop(&pi->tick);
4736 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4740 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4742 if (pi->up_vis > 0) {
4747 pi->link_cfg.link_ok = 0;
4748 pi->link_cfg.speed = 0;
4749 pi->link_cfg.link_down_rc = 255;
4750 t4_os_link_changed(pi);
4751 pi->old_link_cfg = pi->link_cfg;
4758 * It is ok for this function to fail midway and return right away. t4_detach
4759 * will walk the entire sc->irq list and clean up whatever is valid.
4762 t4_setup_intr_handlers(struct adapter *sc)
4764 int rc, rid, p, q, v;
4767 struct port_info *pi;
4769 struct sge *sge = &sc->sge;
4770 struct sge_rxq *rxq;
4772 struct sge_ofld_rxq *ofld_rxq;
4775 struct sge_nm_rxq *nm_rxq;
4778 int nbuckets = rss_getnumbuckets();
4785 rid = sc->intr_type == INTR_INTX ? 0 : 1;
4786 if (forwarding_intr_to_fwq(sc))
4787 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4789 /* Multiple interrupts. */
4790 if (sc->flags & IS_VF)
4791 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4792 ("%s: too few intr.", __func__));
4794 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4795 ("%s: too few intr.", __func__));
4797 /* The first one is always error intr on PFs */
4798 if (!(sc->flags & IS_VF)) {
4799 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4806 /* The second one is always the firmware event queue (first on VFs) */
4807 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4813 for_each_port(sc, p) {
4815 for_each_vi(pi, v, vi) {
4816 vi->first_intr = rid - 1;
4818 if (vi->nnmrxq > 0) {
4819 int n = max(vi->nrxq, vi->nnmrxq);
4821 rxq = &sge->rxq[vi->first_rxq];
4823 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4825 for (q = 0; q < n; q++) {
4826 snprintf(s, sizeof(s), "%x%c%x", p,
4832 irq->nm_rxq = nm_rxq++;
4834 if (irq->nm_rxq != NULL &&
4836 /* Netmap rx only */
4837 rc = t4_alloc_irq(sc, irq, rid,
4838 t4_nm_intr, irq->nm_rxq, s);
4840 if (irq->nm_rxq != NULL &&
4842 /* NIC and Netmap rx */
4843 rc = t4_alloc_irq(sc, irq, rid,
4844 t4_vi_intr, irq, s);
4847 if (irq->rxq != NULL &&
4848 irq->nm_rxq == NULL) {
4850 rc = t4_alloc_irq(sc, irq, rid,
4851 t4_intr, irq->rxq, s);
4857 bus_bind_intr(sc->dev, irq->res,
4858 rss_getcpu(q % nbuckets));
4866 for_each_rxq(vi, q, rxq) {
4867 snprintf(s, sizeof(s), "%x%c%x", p,
4869 rc = t4_alloc_irq(sc, irq, rid,
4874 bus_bind_intr(sc->dev, irq->res,
4875 rss_getcpu(q % nbuckets));
4883 for_each_ofld_rxq(vi, q, ofld_rxq) {
4884 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
4885 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
4896 MPASS(irq == &sc->irq[sc->intr_count]);
4902 adapter_full_init(struct adapter *sc)
4906 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4907 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4910 ASSERT_SYNCHRONIZED_OP(sc);
4911 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4912 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4913 ("%s: FULL_INIT_DONE already", __func__));
4916 * queues that belong to the adapter (not any particular port).
4918 rc = t4_setup_adapter_queues(sc);
4922 for (i = 0; i < nitems(sc->tq); i++) {
4923 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4924 taskqueue_thread_enqueue, &sc->tq[i]);
4925 if (sc->tq[i] == NULL) {
4926 device_printf(sc->dev,
4927 "failed to allocate task queue %d\n", i);
4931 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4932 device_get_nameunit(sc->dev), i);
4935 MPASS(RSS_KEYSIZE == 40);
4936 rss_getkey((void *)&raw_rss_key[0]);
4937 for (i = 0; i < nitems(rss_key); i++) {
4938 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4940 t4_write_rss_key(sc, &rss_key[0], -1, 1);
4943 if (!(sc->flags & IS_VF))
4945 sc->flags |= FULL_INIT_DONE;
4948 adapter_full_uninit(sc);
4954 adapter_full_uninit(struct adapter *sc)
4958 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4960 t4_teardown_adapter_queues(sc);
4962 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4963 taskqueue_free(sc->tq[i]);
4967 sc->flags &= ~FULL_INIT_DONE;
4973 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4974 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4975 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4976 RSS_HASHTYPE_RSS_UDP_IPV6)
4978 /* Translates kernel hash types to hardware. */
4980 hashconfig_to_hashen(int hashconfig)
4984 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4985 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4986 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4987 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4988 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4989 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4990 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4992 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4993 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4994 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4996 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4997 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4998 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4999 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5004 /* Translates hardware hash types to kernel. */
5006 hashen_to_hashconfig(int hashen)
5010 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
5012 * If UDP hashing was enabled it must have been enabled for
5013 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
5014 * enabling any 4-tuple hash is nonsense configuration.
5016 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5017 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
5019 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5020 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
5021 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5022 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
5024 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5025 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
5026 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5027 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
5028 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
5029 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
5030 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
5031 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
5033 return (hashconfig);
5038 vi_full_init(struct vi_info *vi)
5040 struct adapter *sc = vi->pi->adapter;
5041 struct ifnet *ifp = vi->ifp;
5043 struct sge_rxq *rxq;
5044 int rc, i, j, hashen;
5046 int nbuckets = rss_getnumbuckets();
5047 int hashconfig = rss_gethashconfig();
5051 ASSERT_SYNCHRONIZED_OP(sc);
5052 KASSERT((vi->flags & VI_INIT_DONE) == 0,
5053 ("%s: VI_INIT_DONE already", __func__));
5055 sysctl_ctx_init(&vi->ctx);
5056 vi->flags |= VI_SYSCTL_CTX;
5059 * Allocate tx/rx/fl queues for this VI.
5061 rc = t4_setup_vi_queues(vi);
5063 goto done; /* error message displayed already */
5066 * Setup RSS for this VI. Save a copy of the RSS table for later use.
5068 if (vi->nrxq > vi->rss_size) {
5069 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
5070 "some queues will never receive traffic.\n", vi->nrxq,
5072 } else if (vi->rss_size % vi->nrxq) {
5073 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
5074 "expect uneven traffic distribution.\n", vi->nrxq,
5078 if (vi->nrxq != nbuckets) {
5079 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5080 "performance will be impacted.\n", vi->nrxq, nbuckets);
5083 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5084 for (i = 0; i < vi->rss_size;) {
5086 j = rss_get_indirection_to_bucket(i);
5088 rxq = &sc->sge.rxq[vi->first_rxq + j];
5089 rss[i++] = rxq->iq.abs_id;
5091 for_each_rxq(vi, j, rxq) {
5092 rss[i++] = rxq->iq.abs_id;
5093 if (i == vi->rss_size)
5099 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5102 if_printf(ifp, "rss_config failed: %d\n", rc);
5107 hashen = hashconfig_to_hashen(hashconfig);
5110 * We may have had to enable some hashes even though the global config
5111 * wants them disabled. This is a potential problem that must be
5112 * reported to the user.
5114 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
5117 * If we consider only the supported hash types, then the enabled hashes
5118 * are a superset of the requested hashes. In other words, there cannot
5119 * be any supported hash that was requested but not enabled, but there
5120 * can be hashes that were not requested but had to be enabled.
5122 extra &= SUPPORTED_RSS_HASHTYPES;
5123 MPASS((extra & hashconfig) == 0);
5127 "global RSS config (0x%x) cannot be accommodated.\n",
5130 if (extra & RSS_HASHTYPE_RSS_IPV4)
5131 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5132 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5133 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5134 if (extra & RSS_HASHTYPE_RSS_IPV6)
5135 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5136 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5137 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5138 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5139 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5140 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5141 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5143 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5144 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5145 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5146 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5148 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
5150 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5155 vi->flags |= VI_INIT_DONE;
5167 vi_full_uninit(struct vi_info *vi)
5169 struct port_info *pi = vi->pi;
5170 struct adapter *sc = pi->adapter;
5172 struct sge_rxq *rxq;
5173 struct sge_txq *txq;
5175 struct sge_ofld_rxq *ofld_rxq;
5176 struct sge_wrq *ofld_txq;
5179 if (vi->flags & VI_INIT_DONE) {
5181 /* Need to quiesce queues. */
5183 /* XXX: Only for the first VI? */
5184 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5185 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5187 for_each_txq(vi, i, txq) {
5188 quiesce_txq(sc, txq);
5192 for_each_ofld_txq(vi, i, ofld_txq) {
5193 quiesce_wrq(sc, ofld_txq);
5197 for_each_rxq(vi, i, rxq) {
5198 quiesce_iq(sc, &rxq->iq);
5199 quiesce_fl(sc, &rxq->fl);
5203 for_each_ofld_rxq(vi, i, ofld_rxq) {
5204 quiesce_iq(sc, &ofld_rxq->iq);
5205 quiesce_fl(sc, &ofld_rxq->fl);
5208 free(vi->rss, M_CXGBE);
5209 free(vi->nm_rss, M_CXGBE);
5212 t4_teardown_vi_queues(vi);
5213 vi->flags &= ~VI_INIT_DONE;
5219 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5221 struct sge_eq *eq = &txq->eq;
5222 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5224 (void) sc; /* unused */
5228 MPASS((eq->flags & EQ_ENABLED) == 0);
5232 /* Wait for the mp_ring to empty. */
5233 while (!mp_ring_is_idle(txq->r)) {
5234 mp_ring_check_drainage(txq->r, 0);
5235 pause("rquiesce", 1);
5238 /* Then wait for the hardware to finish. */
5239 while (spg->cidx != htobe16(eq->pidx))
5240 pause("equiesce", 1);
5242 /* Finally, wait for the driver to reclaim all descriptors. */
5243 while (eq->cidx != eq->pidx)
5244 pause("dquiesce", 1);
5248 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5255 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5257 (void) sc; /* unused */
5259 /* Synchronize with the interrupt handler */
5260 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5265 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
5267 mtx_lock(&sc->sfl_lock);
5269 fl->flags |= FL_DOOMED;
5271 callout_stop(&sc->sfl_callout);
5272 mtx_unlock(&sc->sfl_lock);
5274 KASSERT((fl->flags & FL_STARVING) == 0,
5275 ("%s: still starving", __func__));
5279 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
5280 driver_intr_t *handler, void *arg, char *name)
5285 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
5286 RF_SHAREABLE | RF_ACTIVE);
5287 if (irq->res == NULL) {
5288 device_printf(sc->dev,
5289 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
5293 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
5294 NULL, handler, arg, &irq->tag);
5296 device_printf(sc->dev,
5297 "failed to setup interrupt for rid %d, name %s: %d\n",
5300 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
5306 t4_free_irq(struct adapter *sc, struct irq *irq)
5309 bus_teardown_intr(sc->dev, irq->res, irq->tag);
5311 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
5313 bzero(irq, sizeof(*irq));
5319 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
5322 regs->version = chip_id(sc) | chip_rev(sc) << 10;
5323 t4_get_regs(sc, buf, regs->len);
5326 #define A_PL_INDIR_CMD 0x1f8
5328 #define S_PL_AUTOINC 31
5329 #define M_PL_AUTOINC 0x1U
5330 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
5331 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
5333 #define S_PL_VFID 20
5334 #define M_PL_VFID 0xffU
5335 #define V_PL_VFID(x) ((x) << S_PL_VFID)
5336 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
5339 #define M_PL_ADDR 0xfffffU
5340 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
5341 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
5343 #define A_PL_INDIR_DATA 0x1fc
5346 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
5350 mtx_assert(&sc->reg_lock, MA_OWNED);
5351 if (sc->flags & IS_VF) {
5352 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5353 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5355 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5356 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5357 V_PL_ADDR(VF_MPS_REG(reg)));
5358 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5359 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5361 return (((uint64_t)stats[1]) << 32 | stats[0]);
5365 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
5366 struct fw_vi_stats_vf *stats)
5369 #define GET_STAT(name) \
5370 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
5372 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
5373 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
5374 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
5375 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
5376 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
5377 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
5378 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
5379 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
5380 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5381 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
5382 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
5383 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
5384 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
5385 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
5386 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
5387 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
5393 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
5397 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5398 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5399 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5400 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5401 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5402 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5406 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5409 const struct timeval interval = {0, 250000}; /* 250ms */
5411 if (!(vi->flags & VI_INIT_DONE))
5415 timevalsub(&tv, &interval);
5416 if (timevalcmp(&tv, &vi->last_refreshed, <))
5419 mtx_lock(&sc->reg_lock);
5420 t4_get_vi_stats(sc, vi->viid, &vi->stats);
5421 getmicrotime(&vi->last_refreshed);
5422 mtx_unlock(&sc->reg_lock);
5426 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5428 u_int i, v, tnl_cong_drops, bg_map;
5430 const struct timeval interval = {0, 250000}; /* 250ms */
5433 timevalsub(&tv, &interval);
5434 if (timevalcmp(&tv, &pi->last_refreshed, <))
5438 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5439 bg_map = pi->mps_bg_map;
5441 i = ffs(bg_map) - 1;
5442 mtx_lock(&sc->reg_lock);
5443 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5444 A_TP_MIB_TNL_CNG_DROP_0 + i);
5445 mtx_unlock(&sc->reg_lock);
5446 tnl_cong_drops += v;
5447 bg_map &= ~(1 << i);
5449 pi->tnl_cong_drops = tnl_cong_drops;
5450 getmicrotime(&pi->last_refreshed);
5454 cxgbe_tick(void *arg)
5456 struct port_info *pi = arg;
5457 struct adapter *sc = pi->adapter;
5459 PORT_LOCK_ASSERT_OWNED(pi);
5460 cxgbe_refresh_stats(sc, pi);
5462 callout_schedule(&pi->tick, hz);
5468 struct vi_info *vi = arg;
5469 struct adapter *sc = vi->pi->adapter;
5471 vi_refresh_stats(sc, vi);
5473 callout_schedule(&vi->tick, hz);
5477 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5479 static char *caps_decoder[] = {
5480 "\20\001IPMI\002NCSI", /* 0: NBM */
5481 "\20\001PPP\002QFC\003DCBX", /* 1: link */
5482 "\20\001INGRESS\002EGRESS", /* 2: switch */
5483 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
5484 "\006HASHFILTER\007ETHOFLD",
5485 "\20\001TOE", /* 4: TOE */
5486 "\20\001RDDP\002RDMAC", /* 5: RDMA */
5487 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
5488 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5489 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5491 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5492 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
5493 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
5494 "\004PO_INITIATOR\005PO_TARGET",
5498 t4_sysctls(struct adapter *sc)
5500 struct sysctl_ctx_list *ctx;
5501 struct sysctl_oid *oid;
5502 struct sysctl_oid_list *children, *c0;
5503 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5505 ctx = device_get_sysctl_ctx(sc->dev);
5510 oid = device_get_sysctl_tree(sc->dev);
5511 c0 = children = SYSCTL_CHILDREN(oid);
5513 sc->sc_do_rxcopy = 1;
5514 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5515 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5517 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5518 sc->params.nports, "# of ports");
5520 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5521 CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells,
5522 sysctl_bitfield_8b, "A", "available doorbells");
5524 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5525 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5527 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5528 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5529 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5530 "interrupt holdoff timer values (us)");
5532 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5533 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5534 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5535 "interrupt holdoff packet counter values");
5537 t4_sge_sysctls(sc, ctx, children);
5539 sc->lro_timeout = 100;
5540 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5541 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5543 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5544 &sc->debug_flags, 0, "flags to enable runtime debugging");
5546 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5547 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5549 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5550 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5552 if (sc->flags & IS_VF)
5555 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5556 NULL, chip_rev(sc), "chip hardware revision");
5558 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5559 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5561 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5562 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5564 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5565 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5567 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
5568 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
5570 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5571 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5573 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5574 sc->er_version, 0, "expansion ROM version");
5576 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5577 sc->bs_version, 0, "bootstrap firmware version");
5579 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5580 NULL, sc->params.scfg_vers, "serial config version");
5582 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5583 NULL, sc->params.vpd_vers, "VPD version");
5585 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5586 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5588 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5589 sc->cfcsum, "config file checksum");
5591 #define SYSCTL_CAP(name, n, text) \
5592 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5593 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \
5594 sysctl_bitfield_16b, "A", "available " text " capabilities")
5596 SYSCTL_CAP(nbmcaps, 0, "NBM");
5597 SYSCTL_CAP(linkcaps, 1, "link");
5598 SYSCTL_CAP(switchcaps, 2, "switch");
5599 SYSCTL_CAP(niccaps, 3, "NIC");
5600 SYSCTL_CAP(toecaps, 4, "TCP offload");
5601 SYSCTL_CAP(rdmacaps, 5, "RDMA");
5602 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5603 SYSCTL_CAP(cryptocaps, 7, "crypto");
5604 SYSCTL_CAP(fcoecaps, 8, "FCoE");
5607 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5608 NULL, sc->tids.nftids, "number of filters");
5610 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5611 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5612 "chip temperature (in Celsius)");
5614 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING |
5615 CTLFLAG_RD, sc, 0, sysctl_loadavg, "A",
5616 "microprocessor load averages (debug firmwares only)");
5618 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD,
5619 &sc->params.core_vdd, 0, "core Vdd (in mV)");
5621 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
5622 CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS,
5623 sysctl_cpus, "A", "local CPUs");
5625 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
5626 CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS,
5627 sysctl_cpus, "A", "preferred CPUs for interrupts");
5630 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
5632 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5633 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5634 "logs and miscellaneous information");
5635 children = SYSCTL_CHILDREN(oid);
5637 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5638 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5639 sysctl_cctrl, "A", "congestion control");
5641 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5642 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5643 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5645 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5646 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5647 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5649 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5650 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5651 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5653 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5654 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5655 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5657 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5658 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5659 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5661 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5662 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5663 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5665 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5666 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5667 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5668 "A", "CIM logic analyzer");
5670 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5671 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5672 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5674 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5675 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5676 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5678 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5679 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5680 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5682 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5683 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5684 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5686 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5687 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5688 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5690 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5691 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5692 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5694 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5695 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5696 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5698 if (chip_id(sc) > CHELSIO_T4) {
5699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5700 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5701 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5703 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5704 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5705 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5708 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5709 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5710 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5712 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5713 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5714 sysctl_cim_qcfg, "A", "CIM queue configuration");
5716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5717 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5718 sysctl_cpl_stats, "A", "CPL statistics");
5720 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5721 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5722 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5724 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5725 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5726 sysctl_devlog, "A", "firmware's device log");
5728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5729 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5730 sysctl_fcoe_stats, "A", "FCoE statistics");
5732 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5733 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5734 sysctl_hw_sched, "A", "hardware scheduler ");
5736 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5737 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5738 sysctl_l2t, "A", "hardware L2 table");
5740 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
5741 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5742 sysctl_smt, "A", "hardware source MAC table");
5744 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5745 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5746 sysctl_lb_stats, "A", "loopback statistics");
5748 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5749 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5750 sysctl_meminfo, "A", "memory regions");
5752 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5753 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5754 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5755 "A", "MPS TCAM entries");
5757 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5758 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5759 sysctl_path_mtus, "A", "path MTUs");
5761 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5762 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5763 sysctl_pm_stats, "A", "PM statistics");
5765 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5766 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5767 sysctl_rdma_stats, "A", "RDMA statistics");
5769 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5770 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5771 sysctl_tcp_stats, "A", "TCP statistics");
5773 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5774 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5775 sysctl_tids, "A", "TID information");
5777 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5778 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5779 sysctl_tp_err_stats, "A", "TP error statistics");
5781 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5782 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5783 "TP logic analyzer event capture mask");
5785 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5786 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5787 sysctl_tp_la, "A", "TP logic analyzer");
5789 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5790 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5791 sysctl_tx_rate, "A", "Tx rate");
5793 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5794 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5795 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5797 if (chip_id(sc) >= CHELSIO_T5) {
5798 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5799 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5800 sysctl_wcwr_stats, "A", "write combined work requests");
5804 if (is_offload(sc)) {
5811 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5812 NULL, "TOE parameters");
5813 children = SYSCTL_CHILDREN(oid);
5815 sc->tt.cong_algorithm = -1;
5816 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
5817 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
5818 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
5821 sc->tt.sndbuf = 256 * 1024;
5822 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5823 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5826 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5827 &sc->tt.ddp, 0, "DDP allowed");
5829 sc->tt.rx_coalesce = 1;
5830 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5831 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5834 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
5835 &sc->tt.tls, 0, "Inline TLS allowed");
5837 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
5838 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
5839 "I", "TCP ports that use inline TLS+TOE RX");
5841 sc->tt.tx_align = 1;
5842 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5843 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5845 sc->tt.tx_zcopy = 0;
5846 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5847 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5848 "Enable zero-copy aio_write(2)");
5850 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
5851 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5852 "cop_managed_offloading", CTLFLAG_RW,
5853 &sc->tt.cop_managed_offloading, 0,
5854 "COP (Connection Offload Policy) controls all TOE offload");
5856 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5857 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5858 "TP timer tick (us)");
5860 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5861 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5862 "TCP timestamp tick (us)");
5864 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5865 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5869 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5870 "IU", "DACK timer (us)");
5872 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5873 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5874 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
5876 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5877 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5878 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
5880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5881 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5882 sysctl_tp_timer, "LU", "Persist timer min (us)");
5884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5885 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5886 sysctl_tp_timer, "LU", "Persist timer max (us)");
5888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5889 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5890 sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
5892 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
5893 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5894 sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
5896 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5897 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5898 sysctl_tp_timer, "LU", "Initial SRTT (us)");
5900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5901 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5902 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5904 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
5905 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
5906 sysctl_tp_shift_cnt, "IU",
5907 "Number of SYN retransmissions before abort");
5909 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
5910 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
5911 sysctl_tp_shift_cnt, "IU",
5912 "Number of retransmissions before abort");
5914 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
5915 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
5916 sysctl_tp_shift_cnt, "IU",
5917 "Number of keepalive probes before abort");
5919 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
5920 CTLFLAG_RD, NULL, "TOE retransmit backoffs");
5921 children = SYSCTL_CHILDREN(oid);
5922 for (i = 0; i < 16; i++) {
5923 snprintf(s, sizeof(s), "%u", i);
5924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
5925 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
5926 "IU", "TOE retransmit backoff");
5933 vi_sysctls(struct vi_info *vi)
5935 struct sysctl_ctx_list *ctx;
5936 struct sysctl_oid *oid;
5937 struct sysctl_oid_list *children;
5939 ctx = device_get_sysctl_ctx(vi->dev);
5942 * dev.v?(cxgbe|cxl).X.
5944 oid = device_get_sysctl_tree(vi->dev);
5945 children = SYSCTL_CHILDREN(oid);
5947 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5948 vi->viid, "VI identifer");
5949 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5950 &vi->nrxq, 0, "# of rx queues");
5951 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5952 &vi->ntxq, 0, "# of tx queues");
5953 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5954 &vi->first_rxq, 0, "index of first rx queue");
5955 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5956 &vi->first_txq, 0, "index of first tx queue");
5957 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5958 vi->rss_size, "size of RSS indirection table");
5960 if (IS_MAIN_VI(vi)) {
5961 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5962 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5963 "Reserve queue 0 for non-flowid packets");
5967 if (vi->nofldrxq != 0) {
5968 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5970 "# of rx queues for offloaded TCP connections");
5971 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5973 "# of tx queues for offloaded TCP connections");
5974 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5975 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5976 "index of first TOE rx queue");
5977 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5978 CTLFLAG_RD, &vi->first_ofld_txq, 0,
5979 "index of first TOE tx queue");
5980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
5981 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5982 sysctl_holdoff_tmr_idx_ofld, "I",
5983 "holdoff timer index for TOE queues");
5984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
5985 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5986 sysctl_holdoff_pktc_idx_ofld, "I",
5987 "holdoff packet counter index for TOE queues");
5991 if (vi->nnmrxq != 0) {
5992 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5993 &vi->nnmrxq, 0, "# of netmap rx queues");
5994 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5995 &vi->nnmtxq, 0, "# of netmap tx queues");
5996 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5997 CTLFLAG_RD, &vi->first_nm_rxq, 0,
5998 "index of first netmap rx queue");
5999 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
6000 CTLFLAG_RD, &vi->first_nm_txq, 0,
6001 "index of first netmap tx queue");
6005 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
6006 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
6007 "holdoff timer index");
6008 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
6009 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
6010 "holdoff packet counter index");
6012 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
6013 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
6015 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
6016 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
6021 cxgbe_sysctls(struct port_info *pi)
6023 struct sysctl_ctx_list *ctx;
6024 struct sysctl_oid *oid;
6025 struct sysctl_oid_list *children, *children2;
6026 struct adapter *sc = pi->adapter;
6029 static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
6031 ctx = device_get_sysctl_ctx(pi->dev);
6036 oid = device_get_sysctl_tree(pi->dev);
6037 children = SYSCTL_CHILDREN(oid);
6039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
6040 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
6041 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
6042 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6043 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
6044 "PHY temperature (in Celsius)");
6045 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
6046 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
6047 "PHY firmware version");
6050 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
6051 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
6052 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
6053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
6054 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
6055 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
6056 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
6057 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
6058 "autonegotiation (-1 = not supported)");
6060 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
6061 port_top_speed(pi), "max speed (in Gbps)");
6062 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
6063 pi->mps_bg_map, "MPS buffer group map");
6064 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
6065 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
6067 if (sc->flags & IS_VF)
6071 * dev.(cxgbe|cxl).X.tc.
6073 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
6074 "Tx scheduler traffic classes (cl_rl)");
6075 children2 = SYSCTL_CHILDREN(oid);
6076 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
6077 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
6078 "pktsize for per-flow cl-rl (0 means up to the driver )");
6079 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
6080 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
6081 "burstsize for per-flow cl-rl (0 means up to the driver)");
6082 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6083 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6085 snprintf(name, sizeof(name), "%d", i);
6086 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6087 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
6089 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
6090 CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags,
6091 sysctl_bitfield_8b, "A", "flags");
6092 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6093 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6094 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6095 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
6096 sysctl_tc_params, "A", "traffic class parameters");
6100 * dev.cxgbe.X.stats.
6102 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
6103 NULL, "port statistics");
6104 children = SYSCTL_CHILDREN(oid);
6105 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6106 &pi->tx_parse_error, 0,
6107 "# of tx packets with invalid length or # of segments");
6109 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6110 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6111 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
6112 sysctl_handle_t4_reg64, "QU", desc)
6114 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6115 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6116 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6117 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6118 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6119 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6120 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6121 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6122 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6123 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6124 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6125 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6126 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6127 "# of tx frames in this range",
6128 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6129 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6130 "# of tx frames in this range",
6131 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6132 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6133 "# of tx frames in this range",
6134 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6135 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6136 "# of tx frames in this range",
6137 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6138 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6139 "# of tx frames in this range",
6140 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6141 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6142 "# of tx frames in this range",
6143 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6144 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6145 "# of tx frames in this range",
6146 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6147 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6148 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6149 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6150 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6151 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6152 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6153 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6154 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6155 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6156 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6157 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6159 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6160 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6161 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6162 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6163 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6164 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6165 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6166 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6168 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6169 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6170 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6171 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6172 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6173 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6174 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6175 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6176 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6177 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6178 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6179 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6180 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6181 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6182 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6183 "# of frames received with bad FCS",
6184 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6185 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6186 "# of frames received with length error",
6187 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6188 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6189 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6190 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6191 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6192 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
6193 "# of rx frames in this range",
6194 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
6195 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
6196 "# of rx frames in this range",
6197 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
6198 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
6199 "# of rx frames in this range",
6200 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
6201 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
6202 "# of rx frames in this range",
6203 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
6204 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
6205 "# of rx frames in this range",
6206 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
6207 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
6208 "# of rx frames in this range",
6209 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
6210 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
6211 "# of rx frames in this range",
6212 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
6213 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
6214 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
6215 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
6216 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
6217 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
6218 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
6219 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
6220 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
6221 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
6222 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
6223 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
6224 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
6225 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
6226 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
6227 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
6228 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
6229 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
6230 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
6232 #undef SYSCTL_ADD_T4_REG64
6234 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
6235 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
6236 &pi->stats.name, desc)
6238 /* We get these from port_stats and they may be stale by up to 1s */
6239 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
6240 "# drops due to buffer-group 0 overflows");
6241 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
6242 "# drops due to buffer-group 1 overflows");
6243 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
6244 "# drops due to buffer-group 2 overflows");
6245 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
6246 "# drops due to buffer-group 3 overflows");
6247 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
6248 "# of buffer-group 0 truncated packets");
6249 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
6250 "# of buffer-group 1 truncated packets");
6251 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
6252 "# of buffer-group 2 truncated packets");
6253 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
6254 "# of buffer-group 3 truncated packets");
6256 #undef SYSCTL_ADD_T4_PORTSTAT
6258 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records",
6259 CTLFLAG_RD, &pi->tx_tls_records,
6260 "# of TLS records transmitted");
6261 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets",
6262 CTLFLAG_RD, &pi->tx_tls_octets,
6263 "# of payload octets in transmitted TLS records");
6264 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records",
6265 CTLFLAG_RD, &pi->rx_tls_records,
6266 "# of TLS records received");
6267 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets",
6268 CTLFLAG_RD, &pi->rx_tls_octets,
6269 "# of payload octets in received TLS records");
6273 sysctl_int_array(SYSCTL_HANDLER_ARGS)
6275 int rc, *i, space = 0;
6278 sbuf_new_for_sysctl(&sb, NULL, 64, req);
6279 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
6281 sbuf_printf(&sb, " ");
6282 sbuf_printf(&sb, "%d", *i);
6285 rc = sbuf_finish(&sb);
6291 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
6296 rc = sysctl_wire_old_buffer(req, 0);
6300 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6304 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
6305 rc = sbuf_finish(sb);
6312 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
6317 rc = sysctl_wire_old_buffer(req, 0);
6321 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6325 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
6326 rc = sbuf_finish(sb);
6333 sysctl_btphy(SYSCTL_HANDLER_ARGS)
6335 struct port_info *pi = arg1;
6337 struct adapter *sc = pi->adapter;
6341 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
6344 /* XXX: magic numbers */
6345 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
6347 end_synchronized_op(sc, 0);
6353 rc = sysctl_handle_int(oidp, &v, 0, req);
6358 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
6360 struct vi_info *vi = arg1;
6363 val = vi->rsrv_noflowq;
6364 rc = sysctl_handle_int(oidp, &val, 0, req);
6365 if (rc != 0 || req->newptr == NULL)
6368 if ((val >= 1) && (vi->ntxq > 1))
6369 vi->rsrv_noflowq = 1;
6371 vi->rsrv_noflowq = 0;
6377 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
6379 struct vi_info *vi = arg1;
6380 struct adapter *sc = vi->pi->adapter;
6382 struct sge_rxq *rxq;
6387 rc = sysctl_handle_int(oidp, &idx, 0, req);
6388 if (rc != 0 || req->newptr == NULL)
6391 if (idx < 0 || idx >= SGE_NTIMERS)
6394 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6399 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
6400 for_each_rxq(vi, i, rxq) {
6401 #ifdef atomic_store_rel_8
6402 atomic_store_rel_8(&rxq->iq.intr_params, v);
6404 rxq->iq.intr_params = v;
6409 end_synchronized_op(sc, LOCK_HELD);
6414 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6416 struct vi_info *vi = arg1;
6417 struct adapter *sc = vi->pi->adapter;
6422 rc = sysctl_handle_int(oidp, &idx, 0, req);
6423 if (rc != 0 || req->newptr == NULL)
6426 if (idx < -1 || idx >= SGE_NCOUNTERS)
6429 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6434 if (vi->flags & VI_INIT_DONE)
6435 rc = EBUSY; /* cannot be changed once the queues are created */
6439 end_synchronized_op(sc, LOCK_HELD);
6444 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6446 struct vi_info *vi = arg1;
6447 struct adapter *sc = vi->pi->adapter;
6450 qsize = vi->qsize_rxq;
6452 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6453 if (rc != 0 || req->newptr == NULL)
6456 if (qsize < 128 || (qsize & 7))
6459 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6464 if (vi->flags & VI_INIT_DONE)
6465 rc = EBUSY; /* cannot be changed once the queues are created */
6467 vi->qsize_rxq = qsize;
6469 end_synchronized_op(sc, LOCK_HELD);
6474 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6476 struct vi_info *vi = arg1;
6477 struct adapter *sc = vi->pi->adapter;
6480 qsize = vi->qsize_txq;
6482 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6483 if (rc != 0 || req->newptr == NULL)
6486 if (qsize < 128 || qsize > 65536)
6489 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6494 if (vi->flags & VI_INIT_DONE)
6495 rc = EBUSY; /* cannot be changed once the queues are created */
6497 vi->qsize_txq = qsize;
6499 end_synchronized_op(sc, LOCK_HELD);
6504 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6506 struct port_info *pi = arg1;
6507 struct adapter *sc = pi->adapter;
6508 struct link_config *lc = &pi->link_cfg;
6511 if (req->newptr == NULL) {
6513 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
6515 rc = sysctl_wire_old_buffer(req, 0);
6519 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6523 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
6524 rc = sbuf_finish(sb);
6530 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
6533 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6539 if (s[0] < '0' || s[0] > '9')
6540 return (EINVAL); /* not a number */
6542 if (n & ~(PAUSE_TX | PAUSE_RX))
6543 return (EINVAL); /* some other bit is set too */
6545 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6550 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
6551 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
6552 lc->requested_fc |= n;
6553 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6555 lc->fc = lc->requested_fc;
6556 set_current_media(pi, &pi->media);
6560 end_synchronized_op(sc, 0);
6567 sysctl_fec(SYSCTL_HANDLER_ARGS)
6569 struct port_info *pi = arg1;
6570 struct adapter *sc = pi->adapter;
6571 struct link_config *lc = &pi->link_cfg;
6574 if (req->newptr == NULL) {
6576 static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
6578 rc = sysctl_wire_old_buffer(req, 0);
6582 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6586 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
6587 rc = sbuf_finish(sb);
6593 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
6596 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6602 if (s[0] < '0' || s[0] > '9')
6603 return (EINVAL); /* not a number */
6605 if (n & ~M_FW_PORT_CAP_FEC)
6606 return (EINVAL); /* some other bit is set too */
6608 return (EINVAL); /* one bit can be set at most */
6610 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6615 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
6616 lc->requested_fec = n &
6617 G_FW_PORT_CAP_FEC(lc->supported);
6618 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6620 lc->fec = lc->requested_fec;
6624 end_synchronized_op(sc, 0);
6631 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
6633 struct port_info *pi = arg1;
6634 struct adapter *sc = pi->adapter;
6635 struct link_config *lc = &pi->link_cfg;
6638 if (lc->supported & FW_PORT_CAP_ANEG)
6639 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0;
6642 rc = sysctl_handle_int(oidp, &val, 0, req);
6643 if (rc != 0 || req->newptr == NULL)
6646 val = AUTONEG_DISABLE;
6648 val = AUTONEG_ENABLE;
6652 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6657 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) {
6661 if (lc->requested_aneg == val) {
6662 rc = 0; /* no change, do nothing. */
6665 old = lc->requested_aneg;
6666 lc->requested_aneg = val;
6667 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6669 lc->requested_aneg = old;
6671 set_current_media(pi, &pi->media);
6674 end_synchronized_op(sc, 0);
6679 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
6681 struct adapter *sc = arg1;
6685 val = t4_read_reg64(sc, reg);
6687 return (sysctl_handle_64(oidp, &val, 0, req));
6691 sysctl_temperature(SYSCTL_HANDLER_ARGS)
6693 struct adapter *sc = arg1;
6695 uint32_t param, val;
6697 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
6700 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6701 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
6702 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
6703 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6704 end_synchronized_op(sc, 0);
6708 /* unknown is returned as 0 but we display -1 in that case */
6709 t = val == 0 ? -1 : val;
6711 rc = sysctl_handle_int(oidp, &t, 0, req);
6716 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
6718 struct adapter *sc = arg1;
6721 uint32_t param, val;
6723 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
6726 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6727 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
6728 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6729 end_synchronized_op(sc, 0);
6733 rc = sysctl_wire_old_buffer(req, 0);
6737 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6741 if (val == 0xffffffff) {
6742 /* Only debug and custom firmwares report load averages. */
6743 sbuf_printf(sb, "not available");
6745 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
6746 (val >> 16) & 0xff);
6748 rc = sbuf_finish(sb);
6755 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6757 struct adapter *sc = arg1;
6760 uint16_t incr[NMTUS][NCCTRL_WIN];
6761 static const char *dec_fac[] = {
6762 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6766 rc = sysctl_wire_old_buffer(req, 0);
6770 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6774 t4_read_cong_tbl(sc, incr);
6776 for (i = 0; i < NCCTRL_WIN; ++i) {
6777 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6778 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6779 incr[5][i], incr[6][i], incr[7][i]);
6780 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6781 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6782 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6783 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6786 rc = sbuf_finish(sb);
6792 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6793 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
6794 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6795 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
6799 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6801 struct adapter *sc = arg1;
6803 int rc, i, n, qid = arg2;
6806 u_int cim_num_obq = sc->chip_params->cim_num_obq;
6808 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6809 ("%s: bad qid %d\n", __func__, qid));
6811 if (qid < CIM_NUM_IBQ) {
6814 n = 4 * CIM_IBQ_SIZE;
6815 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6816 rc = t4_read_cim_ibq(sc, qid, buf, n);
6818 /* outbound queue */
6821 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6822 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6823 rc = t4_read_cim_obq(sc, qid, buf, n);
6830 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6832 rc = sysctl_wire_old_buffer(req, 0);
6836 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6842 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6843 for (i = 0, p = buf; i < n; i += 16, p += 4)
6844 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6847 rc = sbuf_finish(sb);
6855 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6857 struct adapter *sc = arg1;
6863 MPASS(chip_id(sc) <= CHELSIO_T5);
6865 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6869 rc = sysctl_wire_old_buffer(req, 0);
6873 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6877 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6880 rc = -t4_cim_read_la(sc, buf, NULL);
6884 sbuf_printf(sb, "Status Data PC%s",
6885 cfg & F_UPDBGLACAPTPCONLY ? "" :
6886 " LS0Stat LS0Addr LS0Data");
6888 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6889 if (cfg & F_UPDBGLACAPTPCONLY) {
6890 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
6892 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
6893 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6894 p[4] & 0xff, p[5] >> 8);
6895 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
6896 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6897 p[1] & 0xf, p[2] >> 4);
6900 "\n %02x %x%07x %x%07x %08x %08x "
6902 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6903 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6908 rc = sbuf_finish(sb);
6916 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6918 struct adapter *sc = arg1;
6924 MPASS(chip_id(sc) > CHELSIO_T5);
6926 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6930 rc = sysctl_wire_old_buffer(req, 0);
6934 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6938 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6941 rc = -t4_cim_read_la(sc, buf, NULL);
6945 sbuf_printf(sb, "Status Inst Data PC%s",
6946 cfg & F_UPDBGLACAPTPCONLY ? "" :
6947 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
6949 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6950 if (cfg & F_UPDBGLACAPTPCONLY) {
6951 sbuf_printf(sb, "\n %02x %08x %08x %08x",
6952 p[3] & 0xff, p[2], p[1], p[0]);
6953 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
6954 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6955 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6956 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
6957 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6958 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6961 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
6962 "%08x %08x %08x %08x %08x %08x",
6963 (p[9] >> 16) & 0xff,
6964 p[9] & 0xffff, p[8] >> 16,
6965 p[8] & 0xffff, p[7] >> 16,
6966 p[7] & 0xffff, p[6] >> 16,
6967 p[2], p[1], p[0], p[5], p[4], p[3]);
6971 rc = sbuf_finish(sb);
6979 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6981 struct adapter *sc = arg1;
6987 rc = sysctl_wire_old_buffer(req, 0);
6991 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6995 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6998 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
7001 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7002 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
7006 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
7007 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7008 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
7009 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
7010 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
7011 (p[1] >> 2) | ((p[2] & 3) << 30),
7012 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
7016 rc = sbuf_finish(sb);
7023 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
7025 struct adapter *sc = arg1;
7031 rc = sysctl_wire_old_buffer(req, 0);
7035 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7039 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
7042 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
7045 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
7046 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7047 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
7048 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
7049 p[4], p[3], p[2], p[1], p[0]);
7052 sbuf_printf(sb, "\n\nCntl ID Data");
7053 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7054 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
7055 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
7058 rc = sbuf_finish(sb);
7065 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
7067 struct adapter *sc = arg1;
7070 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7071 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7072 uint16_t thres[CIM_NUM_IBQ];
7073 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
7074 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
7075 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
7077 cim_num_obq = sc->chip_params->cim_num_obq;
7079 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
7080 obq_rdaddr = A_UP_OBQ_0_REALADDR;
7082 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
7083 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
7085 nq = CIM_NUM_IBQ + cim_num_obq;
7087 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
7089 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
7093 t4_read_cimq_cfg(sc, base, size, thres);
7095 rc = sysctl_wire_old_buffer(req, 0);
7099 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7104 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
7106 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
7107 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
7108 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
7109 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7110 G_QUEREMFLITS(p[2]) * 16);
7111 for ( ; i < nq; i++, p += 4, wr += 2)
7112 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
7113 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
7114 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7115 G_QUEREMFLITS(p[2]) * 16);
7117 rc = sbuf_finish(sb);
7124 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
7126 struct adapter *sc = arg1;
7129 struct tp_cpl_stats stats;
7131 rc = sysctl_wire_old_buffer(req, 0);
7135 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7139 mtx_lock(&sc->reg_lock);
7140 t4_tp_get_cpl_stats(sc, &stats, 0);
7141 mtx_unlock(&sc->reg_lock);
7143 if (sc->chip_params->nchan > 2) {
7144 sbuf_printf(sb, " channel 0 channel 1"
7145 " channel 2 channel 3");
7146 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
7147 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
7148 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
7149 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
7151 sbuf_printf(sb, " channel 0 channel 1");
7152 sbuf_printf(sb, "\nCPL requests: %10u %10u",
7153 stats.req[0], stats.req[1]);
7154 sbuf_printf(sb, "\nCPL responses: %10u %10u",
7155 stats.rsp[0], stats.rsp[1]);
7158 rc = sbuf_finish(sb);
7165 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
7167 struct adapter *sc = arg1;
7170 struct tp_usm_stats stats;
7172 rc = sysctl_wire_old_buffer(req, 0);
7176 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7180 t4_get_usm_stats(sc, &stats, 1);
7182 sbuf_printf(sb, "Frames: %u\n", stats.frames);
7183 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
7184 sbuf_printf(sb, "Drops: %u", stats.drops);
7186 rc = sbuf_finish(sb);
7192 static const char * const devlog_level_strings[] = {
7193 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
7194 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
7195 [FW_DEVLOG_LEVEL_ERR] = "ERR",
7196 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
7197 [FW_DEVLOG_LEVEL_INFO] = "INFO",
7198 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
7201 static const char * const devlog_facility_strings[] = {
7202 [FW_DEVLOG_FACILITY_CORE] = "CORE",
7203 [FW_DEVLOG_FACILITY_CF] = "CF",
7204 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
7205 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
7206 [FW_DEVLOG_FACILITY_RES] = "RES",
7207 [FW_DEVLOG_FACILITY_HW] = "HW",
7208 [FW_DEVLOG_FACILITY_FLR] = "FLR",
7209 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
7210 [FW_DEVLOG_FACILITY_PHY] = "PHY",
7211 [FW_DEVLOG_FACILITY_MAC] = "MAC",
7212 [FW_DEVLOG_FACILITY_PORT] = "PORT",
7213 [FW_DEVLOG_FACILITY_VI] = "VI",
7214 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
7215 [FW_DEVLOG_FACILITY_ACL] = "ACL",
7216 [FW_DEVLOG_FACILITY_TM] = "TM",
7217 [FW_DEVLOG_FACILITY_QFC] = "QFC",
7218 [FW_DEVLOG_FACILITY_DCB] = "DCB",
7219 [FW_DEVLOG_FACILITY_ETH] = "ETH",
7220 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
7221 [FW_DEVLOG_FACILITY_RI] = "RI",
7222 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
7223 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
7224 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
7225 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
7226 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
7230 sysctl_devlog(SYSCTL_HANDLER_ARGS)
7232 struct adapter *sc = arg1;
7233 struct devlog_params *dparams = &sc->params.devlog;
7234 struct fw_devlog_e *buf, *e;
7235 int i, j, rc, nentries, first = 0;
7237 uint64_t ftstamp = UINT64_MAX;
7239 if (dparams->addr == 0)
7242 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
7246 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
7250 nentries = dparams->size / sizeof(struct fw_devlog_e);
7251 for (i = 0; i < nentries; i++) {
7254 if (e->timestamp == 0)
7257 e->timestamp = be64toh(e->timestamp);
7258 e->seqno = be32toh(e->seqno);
7259 for (j = 0; j < 8; j++)
7260 e->params[j] = be32toh(e->params[j]);
7262 if (e->timestamp < ftstamp) {
7263 ftstamp = e->timestamp;
7268 if (buf[first].timestamp == 0)
7269 goto done; /* nothing in the log */
7271 rc = sysctl_wire_old_buffer(req, 0);
7275 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7280 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
7281 "Seq#", "Tstamp", "Level", "Facility", "Message");
7286 if (e->timestamp == 0)
7289 sbuf_printf(sb, "%10d %15ju %8s %8s ",
7290 e->seqno, e->timestamp,
7291 (e->level < nitems(devlog_level_strings) ?
7292 devlog_level_strings[e->level] : "UNKNOWN"),
7293 (e->facility < nitems(devlog_facility_strings) ?
7294 devlog_facility_strings[e->facility] : "UNKNOWN"));
7295 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
7296 e->params[2], e->params[3], e->params[4],
7297 e->params[5], e->params[6], e->params[7]);
7299 if (++i == nentries)
7301 } while (i != first);
7303 rc = sbuf_finish(sb);
7311 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
7313 struct adapter *sc = arg1;
7316 struct tp_fcoe_stats stats[MAX_NCHAN];
7317 int i, nchan = sc->chip_params->nchan;
7319 rc = sysctl_wire_old_buffer(req, 0);
7323 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7327 for (i = 0; i < nchan; i++)
7328 t4_get_fcoe_stats(sc, i, &stats[i], 1);
7331 sbuf_printf(sb, " channel 0 channel 1"
7332 " channel 2 channel 3");
7333 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
7334 stats[0].octets_ddp, stats[1].octets_ddp,
7335 stats[2].octets_ddp, stats[3].octets_ddp);
7336 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
7337 stats[0].frames_ddp, stats[1].frames_ddp,
7338 stats[2].frames_ddp, stats[3].frames_ddp);
7339 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
7340 stats[0].frames_drop, stats[1].frames_drop,
7341 stats[2].frames_drop, stats[3].frames_drop);
7343 sbuf_printf(sb, " channel 0 channel 1");
7344 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
7345 stats[0].octets_ddp, stats[1].octets_ddp);
7346 sbuf_printf(sb, "\nframesDDP: %16u %16u",
7347 stats[0].frames_ddp, stats[1].frames_ddp);
7348 sbuf_printf(sb, "\nframesDrop: %16u %16u",
7349 stats[0].frames_drop, stats[1].frames_drop);
7352 rc = sbuf_finish(sb);
7359 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
7361 struct adapter *sc = arg1;
7364 unsigned int map, kbps, ipg, mode;
7365 unsigned int pace_tab[NTX_SCHED];
7367 rc = sysctl_wire_old_buffer(req, 0);
7371 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7375 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
7376 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
7377 t4_read_pace_tbl(sc, pace_tab);
7379 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
7380 "Class IPG (0.1 ns) Flow IPG (us)");
7382 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
7383 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
7384 sbuf_printf(sb, "\n %u %-5s %u ", i,
7385 (mode & (1 << i)) ? "flow" : "class", map & 3);
7387 sbuf_printf(sb, "%9u ", kbps);
7389 sbuf_printf(sb, " disabled ");
7392 sbuf_printf(sb, "%13u ", ipg);
7394 sbuf_printf(sb, " disabled ");
7397 sbuf_printf(sb, "%10u", pace_tab[i]);
7399 sbuf_printf(sb, " disabled");
7402 rc = sbuf_finish(sb);
7409 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
7411 struct adapter *sc = arg1;
7415 struct lb_port_stats s[2];
7416 static const char *stat_name[] = {
7417 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
7418 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
7419 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
7420 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
7421 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
7422 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
7423 "BG2FramesTrunc:", "BG3FramesTrunc:"
7426 rc = sysctl_wire_old_buffer(req, 0);
7430 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7434 memset(s, 0, sizeof(s));
7436 for (i = 0; i < sc->chip_params->nchan; i += 2) {
7437 t4_get_lb_stats(sc, i, &s[0]);
7438 t4_get_lb_stats(sc, i + 1, &s[1]);
7442 sbuf_printf(sb, "%s Loopback %u"
7443 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
7445 for (j = 0; j < nitems(stat_name); j++)
7446 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
7450 rc = sbuf_finish(sb);
7457 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
7460 struct port_info *pi = arg1;
7461 struct link_config *lc = &pi->link_cfg;
7464 rc = sysctl_wire_old_buffer(req, 0);
7467 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7471 if (lc->link_ok || lc->link_down_rc == 255)
7472 sbuf_printf(sb, "n/a");
7474 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7476 rc = sbuf_finish(sb);
7489 mem_desc_cmp(const void *a, const void *b)
7491 return ((const struct mem_desc *)a)->base -
7492 ((const struct mem_desc *)b)->base;
7496 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7504 size = to - from + 1;
7508 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7509 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7513 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7515 struct adapter *sc = arg1;
7518 uint32_t lo, hi, used, alloc;
7519 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7520 static const char *region[] = {
7521 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
7522 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
7523 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
7524 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
7525 "RQUDP region:", "PBL region:", "TXPBL region:",
7526 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
7527 "On-chip queues:", "TLS keys:",
7529 struct mem_desc avail[4];
7530 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
7531 struct mem_desc *md = mem;
7533 rc = sysctl_wire_old_buffer(req, 0);
7537 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7541 for (i = 0; i < nitems(mem); i++) {
7546 /* Find and sort the populated memory ranges */
7548 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
7549 if (lo & F_EDRAM0_ENABLE) {
7550 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
7551 avail[i].base = G_EDRAM0_BASE(hi) << 20;
7552 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
7556 if (lo & F_EDRAM1_ENABLE) {
7557 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
7558 avail[i].base = G_EDRAM1_BASE(hi) << 20;
7559 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
7563 if (lo & F_EXT_MEM_ENABLE) {
7564 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
7565 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
7566 avail[i].limit = avail[i].base +
7567 (G_EXT_MEM_SIZE(hi) << 20);
7568 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
7571 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
7572 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
7573 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
7574 avail[i].limit = avail[i].base +
7575 (G_EXT_MEM1_SIZE(hi) << 20);
7579 if (!i) /* no memory available */
7581 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
7583 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
7584 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
7585 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
7586 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7587 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
7588 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
7589 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
7590 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
7591 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
7593 /* the next few have explicit upper bounds */
7594 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
7595 md->limit = md->base - 1 +
7596 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
7597 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
7600 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
7601 md->limit = md->base - 1 +
7602 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
7603 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
7606 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7607 if (chip_id(sc) <= CHELSIO_T5)
7608 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
7610 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
7614 md->idx = nitems(region); /* hide it */
7618 #define ulp_region(reg) \
7619 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
7620 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
7622 ulp_region(RX_ISCSI);
7623 ulp_region(RX_TDDP);
7625 ulp_region(RX_STAG);
7627 ulp_region(RX_RQUDP);
7633 md->idx = nitems(region);
7636 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
7637 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
7640 if (sge_ctrl & F_VFIFO_ENABLE)
7641 size = G_DBVFIFO_SIZE(fifo_size);
7643 size = G_T6_DBVFIFO_SIZE(fifo_size);
7646 md->base = G_BASEADDR(t4_read_reg(sc,
7647 A_SGE_DBVFIFO_BADDR));
7648 md->limit = md->base + (size << 2) - 1;
7653 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
7656 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
7660 md->base = sc->vres.ocq.start;
7661 if (sc->vres.ocq.size)
7662 md->limit = md->base + sc->vres.ocq.size - 1;
7664 md->idx = nitems(region); /* hide it */
7667 md->base = sc->vres.key.start;
7668 if (sc->vres.key.size)
7669 md->limit = md->base + sc->vres.key.size - 1;
7671 md->idx = nitems(region); /* hide it */
7674 /* add any address-space holes, there can be up to 3 */
7675 for (n = 0; n < i - 1; n++)
7676 if (avail[n].limit < avail[n + 1].base)
7677 (md++)->base = avail[n].limit;
7679 (md++)->base = avail[n].limit;
7682 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
7684 for (lo = 0; lo < i; lo++)
7685 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
7686 avail[lo].limit - 1);
7688 sbuf_printf(sb, "\n");
7689 for (i = 0; i < n; i++) {
7690 if (mem[i].idx >= nitems(region))
7691 continue; /* skip holes */
7693 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
7694 mem_region_show(sb, region[mem[i].idx], mem[i].base,
7698 sbuf_printf(sb, "\n");
7699 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
7700 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
7701 mem_region_show(sb, "uP RAM:", lo, hi);
7703 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
7704 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
7705 mem_region_show(sb, "uP Extmem2:", lo, hi);
7707 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
7708 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
7710 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
7711 (lo & F_PMRXNUMCHN) ? 2 : 1);
7713 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
7714 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
7715 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
7717 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
7718 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
7719 sbuf_printf(sb, "%u p-structs\n",
7720 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
7722 for (i = 0; i < 4; i++) {
7723 if (chip_id(sc) > CHELSIO_T5)
7724 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
7726 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
7728 used = G_T5_USED(lo);
7729 alloc = G_T5_ALLOC(lo);
7732 alloc = G_ALLOC(lo);
7734 /* For T6 these are MAC buffer groups */
7735 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
7738 for (i = 0; i < sc->chip_params->nchan; i++) {
7739 if (chip_id(sc) > CHELSIO_T5)
7740 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
7742 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
7744 used = G_T5_USED(lo);
7745 alloc = G_T5_ALLOC(lo);
7748 alloc = G_ALLOC(lo);
7750 /* For T6 these are MAC buffer groups */
7752 "\nLoopback %d using %u pages out of %u allocated",
7756 rc = sbuf_finish(sb);
7763 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7767 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7771 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7773 struct adapter *sc = arg1;
7777 MPASS(chip_id(sc) <= CHELSIO_T5);
7779 rc = sysctl_wire_old_buffer(req, 0);
7783 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7788 "Idx Ethernet address Mask Vld Ports PF"
7789 " VF Replication P0 P1 P2 P3 ML");
7790 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7791 uint64_t tcamx, tcamy, mask;
7792 uint32_t cls_lo, cls_hi;
7793 uint8_t addr[ETHER_ADDR_LEN];
7795 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7796 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7799 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7800 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7801 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7802 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7803 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
7804 addr[3], addr[4], addr[5], (uintmax_t)mask,
7805 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7806 G_PORTMAP(cls_hi), G_PF(cls_lo),
7807 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7809 if (cls_lo & F_REPLICATE) {
7810 struct fw_ldst_cmd ldst_cmd;
7812 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7813 ldst_cmd.op_to_addrspace =
7814 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7815 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7816 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7817 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7818 ldst_cmd.u.mps.rplc.fid_idx =
7819 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7820 V_FW_LDST_CMD_IDX(i));
7822 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7826 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7827 sizeof(ldst_cmd), &ldst_cmd);
7828 end_synchronized_op(sc, 0);
7831 sbuf_printf(sb, "%36d", rc);
7834 sbuf_printf(sb, " %08x %08x %08x %08x",
7835 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7836 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7837 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7838 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7841 sbuf_printf(sb, "%36s", "");
7843 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7844 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7845 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7849 (void) sbuf_finish(sb);
7851 rc = sbuf_finish(sb);
7858 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7860 struct adapter *sc = arg1;
7864 MPASS(chip_id(sc) > CHELSIO_T5);
7866 rc = sysctl_wire_old_buffer(req, 0);
7870 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7874 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
7875 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
7877 " P0 P1 P2 P3 ML\n");
7879 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7880 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7882 uint64_t tcamx, tcamy, val, mask;
7883 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7884 uint8_t addr[ETHER_ADDR_LEN];
7886 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7888 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7890 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7891 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7892 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7893 tcamy = G_DMACH(val) << 32;
7894 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7895 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7896 lookup_type = G_DATALKPTYPE(data2);
7897 port_num = G_DATAPORTNUM(data2);
7898 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7899 /* Inner header VNI */
7900 vniy = ((data2 & F_DATAVIDH2) << 23) |
7901 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7902 dip_hit = data2 & F_DATADIPHIT;
7907 vlan_vld = data2 & F_DATAVIDH2;
7908 ivlan = G_VIDL(val);
7911 ctl |= V_CTLXYBITSEL(1);
7912 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7913 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7914 tcamx = G_DMACH(val) << 32;
7915 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7916 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7917 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7918 /* Inner header VNI mask */
7919 vnix = ((data2 & F_DATAVIDH2) << 23) |
7920 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7926 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7928 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7929 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7931 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7932 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7933 "%012jx %06x %06x - - %3c"
7934 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
7935 addr[1], addr[2], addr[3], addr[4], addr[5],
7936 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7937 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7938 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7939 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7941 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7942 "%012jx - - ", i, addr[0], addr[1],
7943 addr[2], addr[3], addr[4], addr[5],
7947 sbuf_printf(sb, "%4u Y ", ivlan);
7949 sbuf_printf(sb, " - N ");
7951 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
7952 lookup_type ? 'I' : 'O', port_num,
7953 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7954 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7955 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7959 if (cls_lo & F_T6_REPLICATE) {
7960 struct fw_ldst_cmd ldst_cmd;
7962 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7963 ldst_cmd.op_to_addrspace =
7964 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7965 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7966 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7967 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7968 ldst_cmd.u.mps.rplc.fid_idx =
7969 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7970 V_FW_LDST_CMD_IDX(i));
7972 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7976 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7977 sizeof(ldst_cmd), &ldst_cmd);
7978 end_synchronized_op(sc, 0);
7981 sbuf_printf(sb, "%72d", rc);
7984 sbuf_printf(sb, " %08x %08x %08x %08x"
7985 " %08x %08x %08x %08x",
7986 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7987 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7988 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7989 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7990 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7991 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7992 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7993 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7996 sbuf_printf(sb, "%72s", "");
7998 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7999 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
8000 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
8001 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
8005 (void) sbuf_finish(sb);
8007 rc = sbuf_finish(sb);
8014 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
8016 struct adapter *sc = arg1;
8019 uint16_t mtus[NMTUS];
8021 rc = sysctl_wire_old_buffer(req, 0);
8025 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8029 t4_read_mtu_tbl(sc, mtus, NULL);
8031 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
8032 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
8033 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
8034 mtus[14], mtus[15]);
8036 rc = sbuf_finish(sb);
8043 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
8045 struct adapter *sc = arg1;
8048 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
8049 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
8050 static const char *tx_stats[MAX_PM_NSTATS] = {
8051 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
8052 "Tx FIFO wait", NULL, "Tx latency"
8054 static const char *rx_stats[MAX_PM_NSTATS] = {
8055 "Read:", "Write bypass:", "Write mem:", "Flush:",
8056 "Rx FIFO wait", NULL, "Rx latency"
8059 rc = sysctl_wire_old_buffer(req, 0);
8063 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8067 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
8068 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
8070 sbuf_printf(sb, " Tx pcmds Tx bytes");
8071 for (i = 0; i < 4; i++) {
8072 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8076 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
8077 for (i = 0; i < 4; i++) {
8078 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8082 if (chip_id(sc) > CHELSIO_T5) {
8084 "\n Total wait Total occupancy");
8085 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8087 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8091 MPASS(i < nitems(tx_stats));
8094 "\n Reads Total wait");
8095 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8097 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8101 rc = sbuf_finish(sb);
8108 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
8110 struct adapter *sc = arg1;
8113 struct tp_rdma_stats stats;
8115 rc = sysctl_wire_old_buffer(req, 0);
8119 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8123 mtx_lock(&sc->reg_lock);
8124 t4_tp_get_rdma_stats(sc, &stats, 0);
8125 mtx_unlock(&sc->reg_lock);
8127 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
8128 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
8130 rc = sbuf_finish(sb);
8137 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
8139 struct adapter *sc = arg1;
8142 struct tp_tcp_stats v4, v6;
8144 rc = sysctl_wire_old_buffer(req, 0);
8148 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8152 mtx_lock(&sc->reg_lock);
8153 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
8154 mtx_unlock(&sc->reg_lock);
8158 sbuf_printf(sb, "OutRsts: %20u %20u\n",
8159 v4.tcp_out_rsts, v6.tcp_out_rsts);
8160 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
8161 v4.tcp_in_segs, v6.tcp_in_segs);
8162 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
8163 v4.tcp_out_segs, v6.tcp_out_segs);
8164 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
8165 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
8167 rc = sbuf_finish(sb);
8174 sysctl_tids(SYSCTL_HANDLER_ARGS)
8176 struct adapter *sc = arg1;
8179 struct tid_info *t = &sc->tids;
8181 rc = sysctl_wire_old_buffer(req, 0);
8185 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8190 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
8195 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
8196 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
8200 sbuf_printf(sb, "TID range: ");
8201 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8204 if (chip_id(sc) <= CHELSIO_T5) {
8205 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
8206 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
8208 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
8209 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
8213 sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
8214 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
8216 sbuf_printf(sb, "%u-%u", t->tid_base, t->ntids - 1);
8217 sbuf_printf(sb, ", in use: %u\n",
8218 atomic_load_acq_int(&t->tids_in_use));
8222 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
8223 t->stid_base + t->nstids - 1, t->stids_in_use);
8227 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
8228 t->ftid_end, t->ftids_in_use);
8232 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
8233 t->etid_base + t->netids - 1, t->etids_in_use);
8236 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
8237 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
8238 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
8240 rc = sbuf_finish(sb);
8247 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
8249 struct adapter *sc = arg1;
8252 struct tp_err_stats stats;
8254 rc = sysctl_wire_old_buffer(req, 0);
8258 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8262 mtx_lock(&sc->reg_lock);
8263 t4_tp_get_err_stats(sc, &stats, 0);
8264 mtx_unlock(&sc->reg_lock);
8266 if (sc->chip_params->nchan > 2) {
8267 sbuf_printf(sb, " channel 0 channel 1"
8268 " channel 2 channel 3\n");
8269 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
8270 stats.mac_in_errs[0], stats.mac_in_errs[1],
8271 stats.mac_in_errs[2], stats.mac_in_errs[3]);
8272 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
8273 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
8274 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
8275 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
8276 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
8277 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
8278 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
8279 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
8280 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
8281 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
8282 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
8283 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
8284 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
8285 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
8286 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
8287 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
8288 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
8289 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
8290 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
8291 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
8292 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
8294 sbuf_printf(sb, " channel 0 channel 1\n");
8295 sbuf_printf(sb, "macInErrs: %10u %10u\n",
8296 stats.mac_in_errs[0], stats.mac_in_errs[1]);
8297 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
8298 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
8299 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
8300 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
8301 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
8302 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
8303 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
8304 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
8305 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
8306 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
8307 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
8308 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
8309 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
8310 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
8313 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
8314 stats.ofld_no_neigh, stats.ofld_cong_defer);
8316 rc = sbuf_finish(sb);
8323 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
8325 struct adapter *sc = arg1;
8326 struct tp_params *tpp = &sc->params.tp;
8330 mask = tpp->la_mask >> 16;
8331 rc = sysctl_handle_int(oidp, &mask, 0, req);
8332 if (rc != 0 || req->newptr == NULL)
8336 tpp->la_mask = mask << 16;
8337 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
8349 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
8355 uint64_t mask = (1ULL << f->width) - 1;
8356 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
8357 ((uintmax_t)v >> f->start) & mask);
8359 if (line_size + len >= 79) {
8361 sbuf_printf(sb, "\n ");
8363 sbuf_printf(sb, "%s ", buf);
8364 line_size += len + 1;
8367 sbuf_printf(sb, "\n");
8370 static const struct field_desc tp_la0[] = {
8371 { "RcfOpCodeOut", 60, 4 },
8373 { "WcfState", 52, 4 },
8374 { "RcfOpcSrcOut", 50, 2 },
8375 { "CRxError", 49, 1 },
8376 { "ERxError", 48, 1 },
8377 { "SanityFailed", 47, 1 },
8378 { "SpuriousMsg", 46, 1 },
8379 { "FlushInputMsg", 45, 1 },
8380 { "FlushInputCpl", 44, 1 },
8381 { "RssUpBit", 43, 1 },
8382 { "RssFilterHit", 42, 1 },
8384 { "InitTcb", 31, 1 },
8385 { "LineNumber", 24, 7 },
8387 { "EdataOut", 22, 1 },
8389 { "CdataOut", 20, 1 },
8390 { "EreadPdu", 19, 1 },
8391 { "CreadPdu", 18, 1 },
8392 { "TunnelPkt", 17, 1 },
8393 { "RcfPeerFin", 16, 1 },
8394 { "RcfReasonOut", 12, 4 },
8395 { "TxCchannel", 10, 2 },
8396 { "RcfTxChannel", 8, 2 },
8397 { "RxEchannel", 6, 2 },
8398 { "RcfRxChannel", 5, 1 },
8399 { "RcfDataOutSrdy", 4, 1 },
8401 { "RxOoDvld", 2, 1 },
8402 { "RxCongestion", 1, 1 },
8403 { "TxCongestion", 0, 1 },
8407 static const struct field_desc tp_la1[] = {
8408 { "CplCmdIn", 56, 8 },
8409 { "CplCmdOut", 48, 8 },
8410 { "ESynOut", 47, 1 },
8411 { "EAckOut", 46, 1 },
8412 { "EFinOut", 45, 1 },
8413 { "ERstOut", 44, 1 },
8418 { "DataIn", 39, 1 },
8419 { "DataInVld", 38, 1 },
8421 { "RxBufEmpty", 36, 1 },
8423 { "RxFbCongestion", 34, 1 },
8424 { "TxFbCongestion", 33, 1 },
8425 { "TxPktSumSrdy", 32, 1 },
8426 { "RcfUlpType", 28, 4 },
8428 { "Ebypass", 26, 1 },
8430 { "Static0", 24, 1 },
8432 { "Cbypass", 22, 1 },
8434 { "CPktOut", 20, 1 },
8435 { "RxPagePoolFull", 18, 2 },
8436 { "RxLpbkPkt", 17, 1 },
8437 { "TxLpbkPkt", 16, 1 },
8438 { "RxVfValid", 15, 1 },
8439 { "SynLearned", 14, 1 },
8440 { "SetDelEntry", 13, 1 },
8441 { "SetInvEntry", 12, 1 },
8442 { "CpcmdDvld", 11, 1 },
8443 { "CpcmdSave", 10, 1 },
8444 { "RxPstructsFull", 8, 2 },
8445 { "EpcmdDvld", 7, 1 },
8446 { "EpcmdFlush", 6, 1 },
8447 { "EpcmdTrimPrefix", 5, 1 },
8448 { "EpcmdTrimPostfix", 4, 1 },
8449 { "ERssIp4Pkt", 3, 1 },
8450 { "ERssIp6Pkt", 2, 1 },
8451 { "ERssTcpUdpPkt", 1, 1 },
8452 { "ERssFceFipPkt", 0, 1 },
8456 static const struct field_desc tp_la2[] = {
8457 { "CplCmdIn", 56, 8 },
8458 { "MpsVfVld", 55, 1 },
8465 { "DataIn", 39, 1 },
8466 { "DataInVld", 38, 1 },
8468 { "RxBufEmpty", 36, 1 },
8470 { "RxFbCongestion", 34, 1 },
8471 { "TxFbCongestion", 33, 1 },
8472 { "TxPktSumSrdy", 32, 1 },
8473 { "RcfUlpType", 28, 4 },
8475 { "Ebypass", 26, 1 },
8477 { "Static0", 24, 1 },
8479 { "Cbypass", 22, 1 },
8481 { "CPktOut", 20, 1 },
8482 { "RxPagePoolFull", 18, 2 },
8483 { "RxLpbkPkt", 17, 1 },
8484 { "TxLpbkPkt", 16, 1 },
8485 { "RxVfValid", 15, 1 },
8486 { "SynLearned", 14, 1 },
8487 { "SetDelEntry", 13, 1 },
8488 { "SetInvEntry", 12, 1 },
8489 { "CpcmdDvld", 11, 1 },
8490 { "CpcmdSave", 10, 1 },
8491 { "RxPstructsFull", 8, 2 },
8492 { "EpcmdDvld", 7, 1 },
8493 { "EpcmdFlush", 6, 1 },
8494 { "EpcmdTrimPrefix", 5, 1 },
8495 { "EpcmdTrimPostfix", 4, 1 },
8496 { "ERssIp4Pkt", 3, 1 },
8497 { "ERssIp6Pkt", 2, 1 },
8498 { "ERssTcpUdpPkt", 1, 1 },
8499 { "ERssFceFipPkt", 0, 1 },
8504 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8507 field_desc_show(sb, *p, tp_la0);
8511 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8515 sbuf_printf(sb, "\n");
8516 field_desc_show(sb, p[0], tp_la0);
8517 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8518 field_desc_show(sb, p[1], tp_la0);
8522 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
8526 sbuf_printf(sb, "\n");
8527 field_desc_show(sb, p[0], tp_la0);
8528 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8529 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
8533 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
8535 struct adapter *sc = arg1;
8540 void (*show_func)(struct sbuf *, uint64_t *, int);
8542 rc = sysctl_wire_old_buffer(req, 0);
8546 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8550 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
8552 t4_tp_read_la(sc, buf, NULL);
8555 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
8558 show_func = tp_la_show2;
8562 show_func = tp_la_show3;
8566 show_func = tp_la_show;
8569 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
8570 (*show_func)(sb, p, i);
8572 rc = sbuf_finish(sb);
8579 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
8581 struct adapter *sc = arg1;
8584 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
8586 rc = sysctl_wire_old_buffer(req, 0);
8590 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8594 t4_get_chan_txrate(sc, nrate, orate);
8596 if (sc->chip_params->nchan > 2) {
8597 sbuf_printf(sb, " channel 0 channel 1"
8598 " channel 2 channel 3\n");
8599 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
8600 nrate[0], nrate[1], nrate[2], nrate[3]);
8601 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
8602 orate[0], orate[1], orate[2], orate[3]);
8604 sbuf_printf(sb, " channel 0 channel 1\n");
8605 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
8606 nrate[0], nrate[1]);
8607 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
8608 orate[0], orate[1]);
8611 rc = sbuf_finish(sb);
8618 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
8620 struct adapter *sc = arg1;
8625 rc = sysctl_wire_old_buffer(req, 0);
8629 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8633 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
8636 t4_ulprx_read_la(sc, buf);
8639 sbuf_printf(sb, " Pcmd Type Message"
8641 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
8642 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
8643 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
8646 rc = sbuf_finish(sb);
8653 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
8655 struct adapter *sc = arg1;
8659 MPASS(chip_id(sc) >= CHELSIO_T5);
8661 rc = sysctl_wire_old_buffer(req, 0);
8665 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8669 v = t4_read_reg(sc, A_SGE_STAT_CFG);
8670 if (G_STATSOURCE_T5(v) == 7) {
8673 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
8675 sbuf_printf(sb, "total %d, incomplete %d",
8676 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8677 t4_read_reg(sc, A_SGE_STAT_MATCH));
8678 } else if (mode == 1) {
8679 sbuf_printf(sb, "total %d, data overflow %d",
8680 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8681 t4_read_reg(sc, A_SGE_STAT_MATCH));
8683 sbuf_printf(sb, "unknown mode %d", mode);
8686 rc = sbuf_finish(sb);
8693 sysctl_cpus(SYSCTL_HANDLER_ARGS)
8695 struct adapter *sc = arg1;
8696 enum cpu_sets op = arg2;
8701 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
8704 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
8708 rc = sysctl_wire_old_buffer(req, 0);
8712 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8717 sbuf_printf(sb, "%d ", i);
8718 rc = sbuf_finish(sb);
8726 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
8728 struct adapter *sc = arg1;
8729 int *old_ports, *new_ports;
8730 int i, new_count, rc;
8732 if (req->newptr == NULL && req->oldptr == NULL)
8733 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
8734 sizeof(sc->tt.tls_rx_ports[0])));
8736 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
8740 if (sc->tt.num_tls_rx_ports == 0) {
8742 rc = SYSCTL_OUT(req, &i, sizeof(i));
8744 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
8745 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
8746 if (rc == 0 && req->newptr != NULL) {
8747 new_count = req->newlen / sizeof(new_ports[0]);
8748 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
8750 rc = SYSCTL_IN(req, new_ports, new_count *
8751 sizeof(new_ports[0]));
8755 /* Allow setting to a single '-1' to clear the list. */
8756 if (new_count == 1 && new_ports[0] == -1) {
8758 old_ports = sc->tt.tls_rx_ports;
8759 sc->tt.tls_rx_ports = NULL;
8760 sc->tt.num_tls_rx_ports = 0;
8762 free(old_ports, M_CXGBE);
8764 for (i = 0; i < new_count; i++) {
8765 if (new_ports[i] < 1 ||
8766 new_ports[i] > IPPORT_MAX) {
8773 old_ports = sc->tt.tls_rx_ports;
8774 sc->tt.tls_rx_ports = new_ports;
8775 sc->tt.num_tls_rx_ports = new_count;
8777 free(old_ports, M_CXGBE);
8781 free(new_ports, M_CXGBE);
8783 end_synchronized_op(sc, 0);
8788 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8790 u_int rem = val % factor;
8793 snprintf(buf, len, "%u", val / factor);
8795 while (rem % 10 == 0)
8797 snprintf(buf, len, "%u.%u", val / factor, rem);
8802 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8804 struct adapter *sc = arg1;
8807 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8809 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8813 re = G_TIMERRESOLUTION(res);
8816 /* TCP timestamp tick */
8817 re = G_TIMESTAMPRESOLUTION(res);
8821 re = G_DELAYEDACKRESOLUTION(res);
8827 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8829 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8833 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8835 struct adapter *sc = arg1;
8836 u_int res, dack_re, v;
8837 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8839 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8840 dack_re = G_DELAYEDACKRESOLUTION(res);
8841 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8843 return (sysctl_handle_int(oidp, &v, 0, req));
8847 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8849 struct adapter *sc = arg1;
8852 u_long tp_tick_us, v;
8853 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8855 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8856 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
8857 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8858 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8860 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8861 tp_tick_us = (cclk_ps << tre) / 1000000;
8863 if (reg == A_TP_INIT_SRTT)
8864 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8866 v = tp_tick_us * t4_read_reg(sc, reg);
8868 return (sysctl_handle_long(oidp, &v, 0, req));
8872 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
8873 * passed to this function.
8876 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
8878 struct adapter *sc = arg1;
8882 MPASS(idx >= 0 && idx <= 24);
8884 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
8886 return (sysctl_handle_int(oidp, &v, 0, req));
8890 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
8892 struct adapter *sc = arg1;
8896 MPASS(idx >= 0 && idx < 16);
8898 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
8899 shift = (idx & 3) << 3;
8900 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
8902 return (sysctl_handle_int(oidp, &v, 0, req));
8906 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
8908 struct vi_info *vi = arg1;
8909 struct adapter *sc = vi->pi->adapter;
8911 struct sge_ofld_rxq *ofld_rxq;
8914 idx = vi->ofld_tmr_idx;
8916 rc = sysctl_handle_int(oidp, &idx, 0, req);
8917 if (rc != 0 || req->newptr == NULL)
8920 if (idx < 0 || idx >= SGE_NTIMERS)
8923 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8928 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
8929 for_each_ofld_rxq(vi, i, ofld_rxq) {
8930 #ifdef atomic_store_rel_8
8931 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
8933 ofld_rxq->iq.intr_params = v;
8936 vi->ofld_tmr_idx = idx;
8938 end_synchronized_op(sc, LOCK_HELD);
8943 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
8945 struct vi_info *vi = arg1;
8946 struct adapter *sc = vi->pi->adapter;
8949 idx = vi->ofld_pktc_idx;
8951 rc = sysctl_handle_int(oidp, &idx, 0, req);
8952 if (rc != 0 || req->newptr == NULL)
8955 if (idx < -1 || idx >= SGE_NCOUNTERS)
8958 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8963 if (vi->flags & VI_INIT_DONE)
8964 rc = EBUSY; /* cannot be changed once the queues are created */
8966 vi->ofld_pktc_idx = idx;
8968 end_synchronized_op(sc, LOCK_HELD);
8974 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8978 if (cntxt->cid > M_CTXTQID)
8981 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8982 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8985 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8989 if (sc->flags & FW_OK) {
8990 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8997 * Read via firmware failed or wasn't even attempted. Read directly via
9000 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
9002 end_synchronized_op(sc, 0);
9007 load_fw(struct adapter *sc, struct t4_data *fw)
9012 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
9017 * The firmware, with the sole exception of the memory parity error
9018 * handler, runs from memory and not flash. It is almost always safe to
9019 * install a new firmware on a running system. Just set bit 1 in
9020 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
9022 if (sc->flags & FULL_INIT_DONE &&
9023 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9028 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9029 if (fw_data == NULL) {
9034 rc = copyin(fw->data, fw_data, fw->len);
9036 rc = -t4_load_fw(sc, fw_data, fw->len);
9038 free(fw_data, M_CXGBE);
9040 end_synchronized_op(sc, 0);
9045 load_cfg(struct adapter *sc, struct t4_data *cfg)
9048 uint8_t *cfg_data = NULL;
9050 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9054 if (cfg->len == 0) {
9056 rc = -t4_load_cfg(sc, NULL, 0);
9060 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
9061 if (cfg_data == NULL) {
9066 rc = copyin(cfg->data, cfg_data, cfg->len);
9068 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9070 free(cfg_data, M_CXGBE);
9072 end_synchronized_op(sc, 0);
9077 load_boot(struct adapter *sc, struct t4_bootrom *br)
9080 uint8_t *br_data = NULL;
9083 if (br->len > 1024 * 1024)
9086 if (br->pf_offset == 0) {
9088 if (br->pfidx_addr > 7)
9090 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9091 A_PCIE_PF_EXPROM_OFST)));
9092 } else if (br->pf_offset == 1) {
9094 offset = G_OFFSET(br->pfidx_addr);
9099 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9105 rc = -t4_load_boot(sc, NULL, offset, 0);
9109 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9110 if (br_data == NULL) {
9115 rc = copyin(br->data, br_data, br->len);
9117 rc = -t4_load_boot(sc, br_data, offset, br->len);
9119 free(br_data, M_CXGBE);
9121 end_synchronized_op(sc, 0);
9126 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9129 uint8_t *bc_data = NULL;
9131 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9137 rc = -t4_load_bootcfg(sc, NULL, 0);
9141 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9142 if (bc_data == NULL) {
9147 rc = copyin(bc->data, bc_data, bc->len);
9149 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9151 free(bc_data, M_CXGBE);
9153 end_synchronized_op(sc, 0);
9158 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9161 struct cudbg_init *cudbg;
9164 /* buf is large, don't block if no memory is available */
9165 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9169 handle = cudbg_alloc_handle();
9170 if (handle == NULL) {
9175 cudbg = cudbg_get_init(handle);
9177 cudbg->print = (cudbg_print_cb)printf;
9180 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9181 __func__, dump->wr_flash, dump->len, dump->data);
9185 cudbg->use_flash = 1;
9186 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9187 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9189 rc = cudbg_collect(handle, buf, &dump->len);
9193 rc = copyout(buf, dump->data, dump->len);
9195 cudbg_free_handle(handle);
9201 free_offload_policy(struct t4_offload_policy *op)
9203 struct offload_rule *r;
9210 for (i = 0; i < op->nrules; i++, r++) {
9211 free(r->bpf_prog.bf_insns, M_CXGBE);
9213 free(op->rule, M_CXGBE);
9218 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
9221 struct t4_offload_policy *op, *old;
9222 struct bpf_program *bf;
9223 const struct offload_settings *s;
9224 struct offload_rule *r;
9227 if (!is_offload(sc))
9230 if (uop->nrules == 0) {
9231 /* Delete installed policies. */
9234 } if (uop->nrules > 256) { /* arbitrary */
9238 /* Copy userspace offload policy to kernel */
9239 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
9240 op->nrules = uop->nrules;
9241 len = op->nrules * sizeof(struct offload_rule);
9242 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9243 rc = copyin(uop->rule, op->rule, len);
9245 free(op->rule, M_CXGBE);
9251 for (i = 0; i < op->nrules; i++, r++) {
9253 /* Validate open_type */
9254 if (r->open_type != OPEN_TYPE_LISTEN &&
9255 r->open_type != OPEN_TYPE_ACTIVE &&
9256 r->open_type != OPEN_TYPE_PASSIVE &&
9257 r->open_type != OPEN_TYPE_DONTCARE) {
9260 * Rules 0 to i have malloc'd filters that need to be
9261 * freed. Rules i+1 to nrules have userspace pointers
9262 * and should be left alone.
9265 free_offload_policy(op);
9269 /* Validate settings */
9271 if ((s->offload != 0 && s->offload != 1) ||
9272 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
9273 s->sched_class < -1 ||
9274 s->sched_class >= sc->chip_params->nsched_cls) {
9280 u = bf->bf_insns; /* userspace ptr */
9281 bf->bf_insns = NULL;
9282 if (bf->bf_len == 0) {
9283 /* legal, matches everything */
9286 len = bf->bf_len * sizeof(*bf->bf_insns);
9287 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9288 rc = copyin(u, bf->bf_insns, len);
9292 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
9298 rw_wlock(&sc->policy_lock);
9301 rw_wunlock(&sc->policy_lock);
9302 free_offload_policy(old);
9307 #define MAX_READ_BUF_SIZE (128 * 1024)
9309 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9311 uint32_t addr, remaining, n;
9316 rc = validate_mem_range(sc, mr->addr, mr->len);
9320 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9322 remaining = mr->len;
9323 dst = (void *)mr->data;
9326 n = min(remaining, MAX_READ_BUF_SIZE);
9327 read_via_memwin(sc, 2, addr, buf, n);
9329 rc = copyout(buf, dst, n);
9341 #undef MAX_READ_BUF_SIZE
9344 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9348 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9351 if (i2cd->len > sizeof(i2cd->data))
9354 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9357 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9358 i2cd->offset, i2cd->len, &i2cd->data[0]);
9359 end_synchronized_op(sc, 0);
9365 t4_os_find_pci_capability(struct adapter *sc, int cap)
9369 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9373 t4_os_pci_save_state(struct adapter *sc)
9376 struct pci_devinfo *dinfo;
9379 dinfo = device_get_ivars(dev);
9381 pci_cfg_save(dev, dinfo, 0);
9386 t4_os_pci_restore_state(struct adapter *sc)
9389 struct pci_devinfo *dinfo;
9392 dinfo = device_get_ivars(dev);
9394 pci_cfg_restore(dev, dinfo);
9399 t4_os_portmod_changed(struct port_info *pi)
9401 struct adapter *sc = pi->adapter;
9404 static const char *mod_str[] = {
9405 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9408 MPASS((pi->flags & FIXED_IFMEDIA) == 0);
9411 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9413 build_medialist(pi, &pi->media);
9416 end_synchronized_op(sc, LOCK_HELD);
9420 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9421 if_printf(ifp, "transceiver unplugged.\n");
9422 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9423 if_printf(ifp, "unknown transceiver inserted.\n");
9424 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9425 if_printf(ifp, "unsupported transceiver inserted.\n");
9426 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9427 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9428 port_top_speed(pi), mod_str[pi->mod_type]);
9430 if_printf(ifp, "transceiver (type %d) inserted.\n",
9436 t4_os_link_changed(struct port_info *pi)
9440 struct link_config *lc;
9443 PORT_LOCK_ASSERT_OWNED(pi);
9445 for_each_vi(pi, v, vi) {
9452 ifp->if_baudrate = IF_Mbps(lc->speed);
9453 if_link_state_change(ifp, LINK_STATE_UP);
9455 if_link_state_change(ifp, LINK_STATE_DOWN);
9461 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9465 sx_slock(&t4_list_lock);
9466 SLIST_FOREACH(sc, &t4_list, link) {
9468 * func should not make any assumptions about what state sc is
9469 * in - the only guarantee is that sc->sc_lock is a valid lock.
9473 sx_sunlock(&t4_list_lock);
9477 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9481 struct adapter *sc = dev->si_drv1;
9483 rc = priv_check(td, PRIV_DRIVER);
9488 case CHELSIO_T4_GETREG: {
9489 struct t4_reg *edata = (struct t4_reg *)data;
9491 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9494 if (edata->size == 4)
9495 edata->val = t4_read_reg(sc, edata->addr);
9496 else if (edata->size == 8)
9497 edata->val = t4_read_reg64(sc, edata->addr);
9503 case CHELSIO_T4_SETREG: {
9504 struct t4_reg *edata = (struct t4_reg *)data;
9506 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9509 if (edata->size == 4) {
9510 if (edata->val & 0xffffffff00000000)
9512 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9513 } else if (edata->size == 8)
9514 t4_write_reg64(sc, edata->addr, edata->val);
9519 case CHELSIO_T4_REGDUMP: {
9520 struct t4_regdump *regs = (struct t4_regdump *)data;
9521 int reglen = t4_get_regs_len(sc);
9524 if (regs->len < reglen) {
9525 regs->len = reglen; /* hint to the caller */
9530 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9531 get_regs(sc, regs, buf);
9532 rc = copyout(buf, regs->data, reglen);
9536 case CHELSIO_T4_GET_FILTER_MODE:
9537 rc = get_filter_mode(sc, (uint32_t *)data);
9539 case CHELSIO_T4_SET_FILTER_MODE:
9540 rc = set_filter_mode(sc, *(uint32_t *)data);
9542 case CHELSIO_T4_GET_FILTER:
9543 rc = get_filter(sc, (struct t4_filter *)data);
9545 case CHELSIO_T4_SET_FILTER:
9546 rc = set_filter(sc, (struct t4_filter *)data);
9548 case CHELSIO_T4_DEL_FILTER:
9549 rc = del_filter(sc, (struct t4_filter *)data);
9551 case CHELSIO_T4_GET_SGE_CONTEXT:
9552 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9554 case CHELSIO_T4_LOAD_FW:
9555 rc = load_fw(sc, (struct t4_data *)data);
9557 case CHELSIO_T4_GET_MEM:
9558 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9560 case CHELSIO_T4_GET_I2C:
9561 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9563 case CHELSIO_T4_CLEAR_STATS: {
9565 u_int port_id = *(uint32_t *)data;
9566 struct port_info *pi;
9569 if (port_id >= sc->params.nports)
9571 pi = sc->port[port_id];
9576 t4_clr_port_stats(sc, pi->tx_chan);
9577 pi->tx_parse_error = 0;
9578 pi->tnl_cong_drops = 0;
9579 mtx_lock(&sc->reg_lock);
9580 for_each_vi(pi, v, vi) {
9581 if (vi->flags & VI_INIT_DONE)
9582 t4_clr_vi_stats(sc, vi->viid);
9584 bg_map = pi->mps_bg_map;
9587 i = ffs(bg_map) - 1;
9588 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
9589 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
9590 bg_map &= ~(1 << i);
9592 mtx_unlock(&sc->reg_lock);
9595 * Since this command accepts a port, clear stats for
9596 * all VIs on this port.
9598 for_each_vi(pi, v, vi) {
9599 if (vi->flags & VI_INIT_DONE) {
9600 struct sge_rxq *rxq;
9601 struct sge_txq *txq;
9602 struct sge_wrq *wrq;
9604 for_each_rxq(vi, i, rxq) {
9605 #if defined(INET) || defined(INET6)
9606 rxq->lro.lro_queued = 0;
9607 rxq->lro.lro_flushed = 0;
9610 rxq->vlan_extraction = 0;
9613 for_each_txq(vi, i, txq) {
9616 txq->vlan_insertion = 0;
9620 txq->txpkts0_wrs = 0;
9621 txq->txpkts1_wrs = 0;
9622 txq->txpkts0_pkts = 0;
9623 txq->txpkts1_pkts = 0;
9624 mp_ring_reset_stats(txq->r);
9628 /* nothing to clear for each ofld_rxq */
9630 for_each_ofld_txq(vi, i, wrq) {
9631 wrq->tx_wrs_direct = 0;
9632 wrq->tx_wrs_copied = 0;
9636 if (IS_MAIN_VI(vi)) {
9637 wrq = &sc->sge.ctrlq[pi->port_id];
9638 wrq->tx_wrs_direct = 0;
9639 wrq->tx_wrs_copied = 0;
9645 case CHELSIO_T4_SCHED_CLASS:
9646 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9648 case CHELSIO_T4_SCHED_QUEUE:
9649 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9651 case CHELSIO_T4_GET_TRACER:
9652 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9654 case CHELSIO_T4_SET_TRACER:
9655 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9657 case CHELSIO_T4_LOAD_CFG:
9658 rc = load_cfg(sc, (struct t4_data *)data);
9660 case CHELSIO_T4_LOAD_BOOT:
9661 rc = load_boot(sc, (struct t4_bootrom *)data);
9663 case CHELSIO_T4_LOAD_BOOTCFG:
9664 rc = load_bootcfg(sc, (struct t4_data *)data);
9666 case CHELSIO_T4_CUDBG_DUMP:
9667 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
9669 case CHELSIO_T4_SET_OFLD_POLICY:
9670 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
9680 t4_db_full(struct adapter *sc)
9683 CXGBE_UNIMPLEMENTED(__func__);
9687 t4_db_dropped(struct adapter *sc)
9690 CXGBE_UNIMPLEMENTED(__func__);
9695 toe_capability(struct vi_info *vi, int enable)
9698 struct port_info *pi = vi->pi;
9699 struct adapter *sc = pi->adapter;
9701 ASSERT_SYNCHRONIZED_OP(sc);
9703 if (!is_offload(sc))
9707 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9708 /* TOE is already enabled. */
9713 * We need the port's queues around so that we're able to send
9714 * and receive CPLs to/from the TOE even if the ifnet for this
9715 * port has never been UP'd administratively.
9717 if (!(vi->flags & VI_INIT_DONE)) {
9718 rc = vi_full_init(vi);
9722 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9723 rc = vi_full_init(&pi->vi[0]);
9728 if (isset(&sc->offload_map, pi->port_id)) {
9729 /* TOE is enabled on another VI of this port. */
9734 if (!uld_active(sc, ULD_TOM)) {
9735 rc = t4_activate_uld(sc, ULD_TOM);
9738 "You must kldload t4_tom.ko before trying "
9739 "to enable TOE on a cxgbe interface.\n");
9743 KASSERT(sc->tom_softc != NULL,
9744 ("%s: TOM activated but softc NULL", __func__));
9745 KASSERT(uld_active(sc, ULD_TOM),
9746 ("%s: TOM activated but flag not set", __func__));
9749 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9750 if (!uld_active(sc, ULD_IWARP))
9751 (void) t4_activate_uld(sc, ULD_IWARP);
9752 if (!uld_active(sc, ULD_ISCSI))
9753 (void) t4_activate_uld(sc, ULD_ISCSI);
9756 setbit(&sc->offload_map, pi->port_id);
9760 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9763 KASSERT(uld_active(sc, ULD_TOM),
9764 ("%s: TOM never initialized?", __func__));
9765 clrbit(&sc->offload_map, pi->port_id);
9772 * Add an upper layer driver to the global list.
9775 t4_register_uld(struct uld_info *ui)
9780 sx_xlock(&t4_uld_list_lock);
9781 SLIST_FOREACH(u, &t4_uld_list, link) {
9782 if (u->uld_id == ui->uld_id) {
9788 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9791 sx_xunlock(&t4_uld_list_lock);
9796 t4_unregister_uld(struct uld_info *ui)
9801 sx_xlock(&t4_uld_list_lock);
9803 SLIST_FOREACH(u, &t4_uld_list, link) {
9805 if (ui->refcount > 0) {
9810 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9816 sx_xunlock(&t4_uld_list_lock);
9821 t4_activate_uld(struct adapter *sc, int id)
9824 struct uld_info *ui;
9826 ASSERT_SYNCHRONIZED_OP(sc);
9828 if (id < 0 || id > ULD_MAX)
9830 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
9832 sx_slock(&t4_uld_list_lock);
9834 SLIST_FOREACH(ui, &t4_uld_list, link) {
9835 if (ui->uld_id == id) {
9836 if (!(sc->flags & FULL_INIT_DONE)) {
9837 rc = adapter_full_init(sc);
9842 rc = ui->activate(sc);
9844 setbit(&sc->active_ulds, id);
9851 sx_sunlock(&t4_uld_list_lock);
9857 t4_deactivate_uld(struct adapter *sc, int id)
9860 struct uld_info *ui;
9862 ASSERT_SYNCHRONIZED_OP(sc);
9864 if (id < 0 || id > ULD_MAX)
9868 sx_slock(&t4_uld_list_lock);
9870 SLIST_FOREACH(ui, &t4_uld_list, link) {
9871 if (ui->uld_id == id) {
9872 rc = ui->deactivate(sc);
9874 clrbit(&sc->active_ulds, id);
9881 sx_sunlock(&t4_uld_list_lock);
9887 uld_active(struct adapter *sc, int uld_id)
9890 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9892 return (isset(&sc->active_ulds, uld_id));
9897 * t = ptr to tunable.
9898 * nc = number of CPUs.
9899 * c = compiled in default for that tunable.
9902 calculate_nqueues(int *t, int nc, const int c)
9908 nq = *t < 0 ? -*t : c;
9913 * Come up with reasonable defaults for some of the tunables, provided they're
9914 * not set by the user (in which case we'll use the values as is).
9917 tweak_tunables(void)
9919 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
9923 t4_ntxq = rss_getnumbuckets();
9925 calculate_nqueues(&t4_ntxq, nc, NTXQ);
9929 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9933 t4_nrxq = rss_getnumbuckets();
9935 calculate_nqueues(&t4_nrxq, nc, NRXQ);
9939 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9941 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
9942 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
9943 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9946 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
9947 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9949 if (t4_toecaps_allowed == -1)
9950 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9952 if (t4_rdmacaps_allowed == -1) {
9953 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9954 FW_CAPS_CONFIG_RDMA_RDMAC;
9957 if (t4_iscsicaps_allowed == -1) {
9958 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9959 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9960 FW_CAPS_CONFIG_ISCSI_T10DIF;
9963 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
9964 t4_tmr_idx_ofld = TMR_IDX_OFLD;
9966 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
9967 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
9969 if (t4_toecaps_allowed == -1)
9970 t4_toecaps_allowed = 0;
9972 if (t4_rdmacaps_allowed == -1)
9973 t4_rdmacaps_allowed = 0;
9975 if (t4_iscsicaps_allowed == -1)
9976 t4_iscsicaps_allowed = 0;
9980 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
9981 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
9984 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
9985 t4_tmr_idx = TMR_IDX;
9987 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
9988 t4_pktc_idx = PKTC_IDX;
9990 if (t4_qsize_txq < 128)
9993 if (t4_qsize_rxq < 128)
9995 while (t4_qsize_rxq & 7)
9998 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
10001 * Number of VIs to create per-port. The first VI is the "main" regular
10002 * VI for the port. The rest are additional virtual interfaces on the
10003 * same physical port. Note that the main VI does not have native
10004 * netmap support but the extra VIs do.
10006 * Limit the number of VIs per port to the number of available
10007 * MAC addresses per port.
10009 if (t4_num_vis < 1)
10011 if (t4_num_vis > nitems(vi_mac_funcs)) {
10012 t4_num_vis = nitems(vi_mac_funcs);
10013 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
10016 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
10017 pcie_relaxed_ordering = 1;
10018 #if defined(__i386__) || defined(__amd64__)
10019 if (cpu_vendor_id == CPU_VENDOR_INTEL)
10020 pcie_relaxed_ordering = 0;
10027 t4_dump_tcb(struct adapter *sc, int tid)
10029 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
10031 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
10032 save = t4_read_reg(sc, reg);
10033 base = sc->memwin[2].mw_base;
10035 /* Dump TCB for the tid */
10036 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
10037 tcb_addr += tid * TCB_SIZE;
10041 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
10043 pf = V_PFNUM(sc->pf);
10044 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
10046 t4_write_reg(sc, reg, win_pos | pf);
10047 t4_read_reg(sc, reg);
10049 off = tcb_addr - win_pos;
10050 for (i = 0; i < 4; i++) {
10052 for (j = 0; j < 8; j++, off += 4)
10053 buf[j] = htonl(t4_read_reg(sc, base + off));
10055 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
10056 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
10060 t4_write_reg(sc, reg, save);
10061 t4_read_reg(sc, reg);
10065 t4_dump_devlog(struct adapter *sc)
10067 struct devlog_params *dparams = &sc->params.devlog;
10068 struct fw_devlog_e e;
10069 int i, first, j, m, nentries, rc;
10070 uint64_t ftstamp = UINT64_MAX;
10072 if (dparams->start == 0) {
10073 db_printf("devlog params not valid\n");
10077 nentries = dparams->size / sizeof(struct fw_devlog_e);
10078 m = fwmtype_to_hwmtype(dparams->memtype);
10080 /* Find the first entry. */
10082 for (i = 0; i < nentries && !db_pager_quit; i++) {
10083 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10084 sizeof(e), (void *)&e);
10088 if (e.timestamp == 0)
10091 e.timestamp = be64toh(e.timestamp);
10092 if (e.timestamp < ftstamp) {
10093 ftstamp = e.timestamp;
10103 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10104 sizeof(e), (void *)&e);
10108 if (e.timestamp == 0)
10111 e.timestamp = be64toh(e.timestamp);
10112 e.seqno = be32toh(e.seqno);
10113 for (j = 0; j < 8; j++)
10114 e.params[j] = be32toh(e.params[j]);
10116 db_printf("%10d %15ju %8s %8s ",
10117 e.seqno, e.timestamp,
10118 (e.level < nitems(devlog_level_strings) ?
10119 devlog_level_strings[e.level] : "UNKNOWN"),
10120 (e.facility < nitems(devlog_facility_strings) ?
10121 devlog_facility_strings[e.facility] : "UNKNOWN"));
10122 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10123 e.params[3], e.params[4], e.params[5], e.params[6],
10126 if (++i == nentries)
10128 } while (i != first && !db_pager_quit);
10131 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10132 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10134 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10141 t = db_read_token();
10143 dev = device_lookup_by_name(db_tok_string);
10148 db_printf("usage: show t4 devlog <nexus>\n");
10153 db_printf("device not found\n");
10157 t4_dump_devlog(device_get_softc(dev));
10160 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10169 t = db_read_token();
10171 dev = device_lookup_by_name(db_tok_string);
10172 t = db_read_token();
10173 if (t == tNUMBER) {
10174 tid = db_tok_number;
10181 db_printf("usage: show t4 tcb <nexus> <tid>\n");
10186 db_printf("device not found\n");
10190 db_printf("invalid tid\n");
10194 t4_dump_tcb(device_get_softc(dev), tid);
10199 * Borrowed from cesa_prep_aes_key().
10201 * NB: The crypto engine wants the words in the decryption key in reverse
10205 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
10207 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
10211 rijndaelKeySetupEnc(ek, enc_key, kbits);
10213 dkey += (kbits / 8) / 4;
10217 for (i = 0; i < 4; i++)
10218 *--dkey = htobe32(ek[4 * 10 + i]);
10221 for (i = 0; i < 2; i++)
10222 *--dkey = htobe32(ek[4 * 11 + 2 + i]);
10223 for (i = 0; i < 4; i++)
10224 *--dkey = htobe32(ek[4 * 12 + i]);
10227 for (i = 0; i < 4; i++)
10228 *--dkey = htobe32(ek[4 * 13 + i]);
10229 for (i = 0; i < 4; i++)
10230 *--dkey = htobe32(ek[4 * 14 + i]);
10233 MPASS(dkey == dec_key);
10236 static struct sx mlu; /* mod load unload */
10237 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10240 mod_event(module_t mod, int cmd, void *arg)
10243 static int loaded = 0;
10248 if (loaded++ == 0) {
10250 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10251 t4_filter_rpl, CPL_COOKIE_FILTER);
10252 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
10253 do_l2t_write_rpl, CPL_COOKIE_FILTER);
10254 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
10255 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
10256 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10257 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
10258 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
10259 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
10260 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10261 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10262 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
10264 sx_init(&t4_list_lock, "T4/T5 adapters");
10265 SLIST_INIT(&t4_list);
10267 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10268 SLIST_INIT(&t4_uld_list);
10270 t4_tracer_modload();
10278 if (--loaded == 0) {
10281 sx_slock(&t4_list_lock);
10282 if (!SLIST_EMPTY(&t4_list)) {
10284 sx_sunlock(&t4_list_lock);
10288 sx_slock(&t4_uld_list_lock);
10289 if (!SLIST_EMPTY(&t4_uld_list)) {
10291 sx_sunlock(&t4_uld_list_lock);
10292 sx_sunlock(&t4_list_lock);
10297 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10298 uprintf("%ju clusters with custom free routine "
10299 "still is use.\n", t4_sge_extfree_refs());
10300 pause("t4unload", 2 * hz);
10303 sx_sunlock(&t4_uld_list_lock);
10305 sx_sunlock(&t4_list_lock);
10307 if (t4_sge_extfree_refs() == 0) {
10308 t4_tracer_modunload();
10310 sx_destroy(&t4_uld_list_lock);
10312 sx_destroy(&t4_list_lock);
10313 t4_sge_modunload();
10317 loaded++; /* undo earlier decrement */
10328 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10329 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10330 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10332 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10333 MODULE_VERSION(t4nex, 1);
10334 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10336 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10337 #endif /* DEV_NETMAP */
10339 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10340 MODULE_VERSION(t5nex, 1);
10341 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10343 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10344 #endif /* DEV_NETMAP */
10346 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10347 MODULE_VERSION(t6nex, 1);
10348 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10350 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10351 #endif /* DEV_NETMAP */
10353 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10354 MODULE_VERSION(cxgbe, 1);
10356 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10357 MODULE_VERSION(cxl, 1);
10359 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10360 MODULE_VERSION(cc, 1);
10362 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10363 MODULE_VERSION(vcxgbe, 1);
10365 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10366 MODULE_VERSION(vcxl, 1);
10368 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10369 MODULE_VERSION(vcc, 1);